From d6beac5235118b251deb18a6579aab85271eaa7b Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Mon, 29 Jul 2019 16:09:28 +0200 Subject: [PATCH 01/83] compcov levels to enable the instrumentation of only immediates --- qemu_mode/libcompcov/README.compcov | 8 ++- qemu_mode/libcompcov/libcompcov.so.c | 54 +++++++++++++++++-- qemu_mode/patches/afl-qemu-cpu-inl.h | 9 +++- .../patches/afl-qemu-cpu-translate-inl.h | 9 ++-- qemu_mode/patches/i386-translate.diff | 6 +-- 5 files changed, 71 insertions(+), 15 deletions(-) diff --git a/qemu_mode/libcompcov/README.compcov b/qemu_mode/libcompcov/README.compcov index 2a4a0ee5..9be13d88 100644 --- a/qemu_mode/libcompcov/README.compcov +++ b/qemu_mode/libcompcov/README.compcov @@ -18,15 +18,19 @@ For optimized binaries this is an issue, those functions are often inlined and this module is not capable to log the coverage in this case. If you have the source code of the fuzzing target you should nto use this -library and QEMU but build ot with afl-clang-fast and the laf-intel options. +library and QEMU but build it with afl-clang-fast and the laf-intel options. To use this library make sure to preload it with AFL_PRELOAD. export AFL_PRELOAD=/path/to/libcompcov.so - export AFL_QEMU_COMPCOV=1 + export AFL_COMPCOV_LEVEL=1 afl-fuzz -Q -i input -o output -- +The AFL_COMPCOV_LEVEL tells to QEMU and libcompcov how to log comaprisons. +Level 1 logs just comparison with immediates / read-only memory and level 2 +logs all the comparisons. + The library make use of https://github.com/ouadev/proc_maps_parser and so it is Linux specific. However this is not a strict dependency, other UNIX operating systems can be supported simply replacing the code related to the diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c index 582230db..92e4dbaa 100644 --- a/qemu_mode/libcompcov/libcompcov.so.c +++ b/qemu_mode/libcompcov/libcompcov.so.c @@ -45,6 +45,8 @@ static void *__compcov_code_start, static u8 *__compcov_afl_map; +static u32 __compcov_level; + static int (*__libc_strcmp)(const char*, const char*); static int (*__libc_strncmp)(const char*, const char*, size_t); static int (*__libc_strcasecmp)(const char*, const char*); @@ -54,6 +56,28 @@ static int (*__libc_memcmp)(const void*, const void*, size_t); static int debug_fd = -1; +#define MAX_MAPPINGS 1024 + +static struct mapping { + void *st, *en; +} __compcov_ro[MAX_MAPPINGS]; + +static u32 __compcov_ro_cnt; + + +/* Check an address against the list of read-only mappings. */ + +static u8 __compcov_is_ro(const void* ptr) { + + u32 i; + + for (i = 0; i < __compcov_ro_cnt; i++) + if (ptr >= __compcov_ro[i].st && ptr <= __compcov_ro[i].en) return 1; + + return 0; +} + + static size_t __strlen2(const char *s1, const char *s2, size_t max_length) { // from https://github.com/googleprojectzero/CompareCoverage @@ -71,6 +95,15 @@ static void __compcov_load(void) { __libc_strcasecmp = dlsym(RTLD_NEXT, "strcasecmp"); __libc_strncasecmp = dlsym(RTLD_NEXT, "strncasecmp"); __libc_memcmp = dlsym(RTLD_NEXT, "memcmp"); + + if (getenv("AFL_QEMU_COMPCOV")) { + + __compcov_level = 1; + } + if (getenv("AFL_COMPCOV_LEVEL")) { + + __compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); + } char *id_str = getenv(SHM_ENV_VAR); int shm_id; @@ -110,6 +143,12 @@ static void __compcov_load(void) { __compcov_code_end = maps_tmp->addr_end; } } + + if ((maps_tmp->is_w && !maps_tmp->is_r) || __compcov_ro_cnt == MAX_MAPPINGS) + continue; + + __compcov_ro[__compcov_ro_cnt].st = maps_tmp->addr_start; + __compcov_ro[__compcov_ro_cnt].en = maps_tmp->addr_end; } pmparser_free(maps); @@ -149,7 +188,8 @@ int strcmp(const char* str1, const char* str2) { void* retaddr = __builtin_return_address(0); - if (__compcov_is_in_bound(retaddr)) { + if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && + !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); @@ -173,7 +213,8 @@ int strncmp(const char* str1, const char* str2, size_t len) { void* retaddr = __builtin_return_address(0); - if (__compcov_is_in_bound(retaddr)) { + if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && + !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); n = MIN(n, len); @@ -198,7 +239,8 @@ int strcasecmp(const char* str1, const char* str2) { void* retaddr = __builtin_return_address(0); - if (__compcov_is_in_bound(retaddr)) { + if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && + !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { /* Fallback to strcmp, maybe improve in future */ size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); @@ -223,7 +265,8 @@ int strncasecmp(const char* str1, const char* str2, size_t len) { void* retaddr = __builtin_return_address(0); - if (__compcov_is_in_bound(retaddr)) { + if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && + !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { /* Fallback to strncmp, maybe improve in future */ size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); @@ -249,7 +292,8 @@ int memcmp(const void* mem1, const void* mem2, size_t len) { void* retaddr = __builtin_return_address(0); - if (__compcov_is_in_bound(retaddr)) { + if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && + !__compcov_is_ro(mem1) && !__compcov_is_ro(mem2))) { size_t n = len; diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h index 03951fea..b769f62e 100644 --- a/qemu_mode/patches/afl-qemu-cpu-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-inl.h @@ -66,7 +66,7 @@ abi_ulong afl_entry_point, /* ELF entry point (_start) */ afl_start_code, /* .text start pointer */ afl_end_code; /* .text end pointer */ -u8 afl_enable_compcov; +u8 afl_compcov_level; /* Set in the child process in forkserver mode: */ @@ -159,9 +159,14 @@ static void afl_setup(void) { } + /* Maintain for compatibility */ if (getenv("AFL_QEMU_COMPCOV")) { - afl_enable_compcov = 1; + afl_compcov_level = 1; + } + if (getenv("AFL_COMPCOV_LEVEL")) { + + afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); } /* pthread_atfork() seems somewhat broken in util/rcu.c, and I'm diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h index 0ca89c98..4716c2ac 100644 --- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h @@ -40,7 +40,7 @@ extern unsigned char *afl_area_ptr; extern unsigned int afl_inst_rms; extern abi_ulong afl_start_code, afl_end_code; -extern u8 afl_enable_compcov; +extern u8 afl_compcov_level; void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2); @@ -95,11 +95,14 @@ static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1, static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2, - TCGMemOp ot) { + TCGMemOp ot, int is_imm) { void *func; - if (!afl_enable_compcov || cur_loc > afl_end_code || cur_loc < afl_start_code) + if (!afl_compcov_level || cur_loc > afl_end_code || cur_loc < afl_start_code) + return; + + if (!is_imm && afl_compcov_level < 2) return; switch (ot) { diff --git a/qemu_mode/patches/i386-translate.diff b/qemu_mode/patches/i386-translate.diff index 0bc48828..239b2404 100644 --- a/qemu_mode/patches/i386-translate.diff +++ b/qemu_mode/patches/i386-translate.diff @@ -15,11 +15,11 @@ index 0dd5fbe4..b95d341e 100644 tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1); -+ afl_gen_compcov(s1->pc, s1->cc_srcT, s1->T1, ot); ++ afl_gen_compcov(s1->pc, s1->cc_srcT, s1->T1, ot, d == OR_EAX); } else { tcg_gen_mov_tl(s1->cc_srcT, s1->T0); tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1); -+ afl_gen_compcov(s1->pc, s1->T0, s1->T1, ot); ++ afl_gen_compcov(s1->pc, s1->T0, s1->T1, ot, d == OR_EAX); gen_op_st_rm_T0_A0(s1, ot, d); } gen_op_update2_cc(s1); @@ -27,7 +27,7 @@ index 0dd5fbe4..b95d341e 100644 tcg_gen_mov_tl(cpu_cc_src, s1->T1); tcg_gen_mov_tl(s1->cc_srcT, s1->T0); tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1); -+ afl_gen_compcov(s1->pc, s1->T0, s1->T1, ot); ++ afl_gen_compcov(s1->pc, s1->T0, s1->T1, ot, d == OR_EAX); set_cc_op(s1, CC_OP_SUBB + ot); break; } From ccb231e4f47692bf7f8594c65f120cf741984651 Mon Sep 17 00:00:00 2001 From: floyd Date: Tue, 6 Aug 2019 16:39:42 +0200 Subject: [PATCH 02/83] set AFL_CC correctly, if set to afl-clang but TEST_CC is afl-gcc, this will fail --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4103dbfa..e3e0a38c 100644 --- a/Makefile +++ b/Makefile @@ -157,7 +157,7 @@ ifndef AFL_NO_X86 test_build: afl-gcc afl-as afl-showmap @echo "[*] Testing the CC wrapper and instrumentation output..." - unset AFL_USE_ASAN AFL_USE_MSAN; AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) + unset AFL_USE_ASAN AFL_USE_MSAN; AFL_CC=$(TEST_CC) AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) echo 0 | ./afl-showmap -m none -q -o .test-instr0 ./test-instr echo 1 | ./afl-showmap -m none -q -o .test-instr1 ./test-instr @rm -f test-instr From 1315021388c456f3d4f342255612da32fe0e2b87 Mon Sep 17 00:00:00 2001 From: floyd Date: Tue, 6 Aug 2019 16:39:42 +0200 Subject: [PATCH 03/83] unset AFL_CC correctly, if set to afl-clang but TEST_CC is afl-gcc, this will fail (eg. when later installing QEMU but AFL_CC was already set) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e3e0a38c..0431c1d0 100644 --- a/Makefile +++ b/Makefile @@ -157,7 +157,7 @@ ifndef AFL_NO_X86 test_build: afl-gcc afl-as afl-showmap @echo "[*] Testing the CC wrapper and instrumentation output..." - unset AFL_USE_ASAN AFL_USE_MSAN; AFL_CC=$(TEST_CC) AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) + unset AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) echo 0 | ./afl-showmap -m none -q -o .test-instr0 ./test-instr echo 1 | ./afl-showmap -m none -q -o .test-instr1 ./test-instr @rm -f test-instr From 0f476a289ff104c311ae3a1dadcc5815665e00ee Mon Sep 17 00:00:00 2001 From: floyd Date: Tue, 6 Aug 2019 17:00:14 +0200 Subject: [PATCH 04/83] Ugly patch for this issue https://lists.sr.ht/~philmd/qemu/patches/6224#%3C20190617114005.24603-1-berrange@redhat.com%3E+linux-user/syscall.c in QEMU with ubuntu 19.10 --- qemu_mode/patches/syscall.diff | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/qemu_mode/patches/syscall.diff b/qemu_mode/patches/syscall.diff index cb2acfcd..60b5905e 100644 --- a/qemu_mode/patches/syscall.diff +++ b/qemu_mode/patches/syscall.diff @@ -2,9 +2,10 @@ diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 280137da..8c0e749f 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c -@@ -112,6 +112,8 @@ +@@ -112,6 +112,9 @@ #include "qemu.h" #include "fd-trans.h" ++#include +extern unsigned int afl_forksrv_pid; + @@ -32,4 +33,4 @@ index 280137da..8c0e749f 100644 + } #ifdef TARGET_NR_set_robust_list - case TARGET_NR_set_robust_list: + case TARGET_NR_set_robust_list: \ No newline at end of file From dc2c46e23c067eaeadc042fe84c603a496e90cf6 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Wed, 7 Aug 2019 20:22:47 +0200 Subject: [PATCH 05/83] change instrumentation test to trigger different bitmap entries with clang 9.0 --- Makefile | 2 +- llvm_mode/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 0431c1d0..47b06785 100644 --- a/Makefile +++ b/Makefile @@ -158,7 +158,7 @@ ifndef AFL_NO_X86 test_build: afl-gcc afl-as afl-showmap @echo "[*] Testing the CC wrapper and instrumentation output..." unset AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) - echo 0 | ./afl-showmap -m none -q -o .test-instr0 ./test-instr + echo -n| ./afl-showmap -m none -q -o .test-instr0 ./test-instr echo 1 | ./afl-showmap -m none -q -o .test-instr1 ./test-instr @rm -f test-instr @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping to troubleshoot the issue."; echo; exit 1; fi diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index 2b685ddc..80ffb38a 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -174,7 +174,7 @@ endif test_build: $(PROGS) @echo "[*] Testing the CC wrapper and instrumentation output..." unset AFL_USE_ASAN AFL_USE_MSAN AFL_INST_RATIO; AFL_QUIET=1 AFL_PATH=. AFL_CC=$(CC) AFL_LLVM_LAF_SPLIT_SWITCHES=1 AFL_LLVM_LAF_TRANSFORM_COMPARES=1 AFL_LLVM_LAF_SPLIT_COMPARES=1 ../afl-clang-fast $(CFLAGS) ../test-instr.c -o test-instr $(LDFLAGS) - echo 0 | ../afl-showmap -m none -q -o .test-instr0 ./test-instr + echo -n| ../afl-showmap -m none -q -o .test-instr0 ./test-instr echo 1 | ../afl-showmap -m none -q -o .test-instr1 ./test-instr @rm -f test-instr @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping to troubleshoot the issue."; echo; exit 1; fi From a6fe8ae0af884c76ea92128a6fd878a13dd3abf3 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Wed, 7 Aug 2019 20:25:22 +0200 Subject: [PATCH 06/83] fix compilation error with llvm 9.0 --- llvm_mode/compare-transform-pass.so.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/llvm_mode/compare-transform-pass.so.cc b/llvm_mode/compare-transform-pass.so.cc index d0dbe8ec..e7886db1 100644 --- a/llvm_mode/compare-transform-pass.so.cc +++ b/llvm_mode/compare-transform-pass.so.cc @@ -82,7 +82,11 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const , nullptr #endif ); +#if LLVM_VERSION_MAJOR < 9 Function* tolowerFn = cast(c); +#else + FunctionCallee tolowerFn = c; +#endif /* iterate over all functions, bbs and instruction and add suitable calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp */ for (auto &F : M) { From 09c95b7ea7b10796bf0002392530041ab68816f7 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Wed, 7 Aug 2019 20:26:41 +0200 Subject: [PATCH 07/83] reviewed neverZero for llvm 9.0 --- llvm_mode/LLVMInsTrim.so.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/llvm_mode/LLVMInsTrim.so.cc b/llvm_mode/LLVMInsTrim.so.cc index fad6dd4f..0a15680d 100644 --- a/llvm_mode/LLVMInsTrim.so.cc +++ b/llvm_mode/LLVMInsTrim.so.cc @@ -292,10 +292,11 @@ namespace { Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1)); #if LLVM_VERSION_MAJOR < 9 - if (neverZero_counters_str != NULL) { // with llvm 9 we make this the default as the bug in llvm is then fixed + if (neverZero_counters_str != NULL) // with llvm 9 we make this the default as the bug in llvm is then fixed #else - #warning "neverZero implementation needs to be reviewed!" + if (1) // with llvm 9 we make this the default as the bug in llvm is then fixed #endif + { /* hexcoder: Realize a counter that skips zero during overflow. * Once this counter reaches its maximum value, it next increments to 1 * @@ -308,15 +309,13 @@ namespace { auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0)); auto carry = IRB.CreateZExt(cf, Int8Ty); Incr = IRB.CreateAdd(Incr, carry); -#if LLVM_VERSION_MAJOR < 9 } -#endif IRB.CreateStore(Incr, MapPtrIdx)->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); /* Set prev_loc to cur_loc >> 1 */ /* - StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, cur_loc >> 1), AFLPrevLoc); + StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, L >> 1), OldPrev); Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); */ From 07df1e303419a642b96750411af99b586d0ea5a0 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Wed, 7 Aug 2019 20:34:57 +0200 Subject: [PATCH 08/83] bugfix 'echo -n' is not POSIX, use input redirection from /dev/null --- Makefile | 2 +- llvm_mode/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 47b06785..9e92de81 100644 --- a/Makefile +++ b/Makefile @@ -158,7 +158,7 @@ ifndef AFL_NO_X86 test_build: afl-gcc afl-as afl-showmap @echo "[*] Testing the CC wrapper and instrumentation output..." unset AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) - echo -n| ./afl-showmap -m none -q -o .test-instr0 ./test-instr + ./afl-showmap -m none -q -o .test-instr0 ./test-instr to troubleshoot the issue."; echo; exit 1; fi diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index 80ffb38a..d01fbbdf 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -174,7 +174,7 @@ endif test_build: $(PROGS) @echo "[*] Testing the CC wrapper and instrumentation output..." unset AFL_USE_ASAN AFL_USE_MSAN AFL_INST_RATIO; AFL_QUIET=1 AFL_PATH=. AFL_CC=$(CC) AFL_LLVM_LAF_SPLIT_SWITCHES=1 AFL_LLVM_LAF_TRANSFORM_COMPARES=1 AFL_LLVM_LAF_SPLIT_COMPARES=1 ../afl-clang-fast $(CFLAGS) ../test-instr.c -o test-instr $(LDFLAGS) - echo -n| ../afl-showmap -m none -q -o .test-instr0 ./test-instr + ../afl-showmap -m none -q -o .test-instr0 ./test-instr to troubleshoot the issue."; echo; exit 1; fi From 8b6a4e575978fac83155efd90d39030c84b2a755 Mon Sep 17 00:00:00 2001 From: David Carlier Date: Tue, 30 Jul 2019 07:25:56 +0000 Subject: [PATCH 09/83] For BSD/APPLE platform use native random calls Solution not involving file descriptors, seeded upon fork and on a regular basis. Signed-off-by: Tobias Kortkamp --- afl-fuzz.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/afl-fuzz.c b/afl-fuzz.c index a1388a55..9397d5be 100644 --- a/afl-fuzz.c +++ b/afl-fuzz.c @@ -62,6 +62,7 @@ #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) # include +# define HAVE_ARC4RANDOM 1 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ /* For systems that have sched_setaffinity; right now just Linux, but one @@ -219,7 +220,9 @@ EXP_ST u8 skip_deterministic, /* Skip deterministic stages? */ fast_cal; /* Try to calibrate faster? */ static s32 out_fd, /* Persistent fd for out_file */ +#ifndef HAVE_ARC4RANDOM dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ +#endif dev_null_fd = -1, /* Persistent fd for /dev/null */ fsrv_ctl_fd, /* Fork server control pipe (write) */ fsrv_st_fd; /* Fork server status pipe (read) */ @@ -297,7 +300,9 @@ static u8 stage_val_type; /* Value type (STAGE_VAL_*) */ static u64 stage_finds[32], /* Patterns found per fuzz stage */ stage_cycles[32]; /* Execs per fuzz stage */ +#ifndef HAVE_ARC4RANDOM static u32 rand_cnt; /* Random number counter */ +#endif static u64 total_cal_us, /* Total calibration time (us) */ total_cal_cycles; /* Total calibration cycles */ @@ -641,14 +646,8 @@ static void trim_py(char** ret, size_t* retlen) { int select_algorithm(void) { int i_puppet, j_puppet; - u32 seed[2]; - if (!fixed_seed) { - ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom"); - srandom(seed[0]); - } - - double sele = ((double)(random()%10000)*0.0001); + double sele = ((double)(UR(10000))*0.0001); j_puppet = 0; for (i_puppet = 0; i_puppet < operator_num; i_puppet++) { if (unlikely(i_puppet == 0)) { @@ -699,7 +698,15 @@ static u64 get_cur_time_us(void) { have slight bias. */ static inline u32 UR(u32 limit) { +#ifdef HAVE_ARC4RANDOM + if (fixed_seed) { + return random() % limit; + } + /* The boundary not being necessarily a power of 2, + we need to ensure the result uniformity. */ + return arc4random_uniform(limit); +#else if (!fixed_seed && unlikely(!rand_cnt--)) { u32 seed[2]; @@ -709,6 +716,7 @@ static inline u32 UR(u32 limit) { } return random() % limit; +#endif } @@ -2407,7 +2415,9 @@ EXP_ST void init_forkserver(char** argv) { close(out_dir_fd); close(dev_null_fd); +#ifndef HAVE_ARC4RANDOM close(dev_urandom_fd); +#endif close(fileno(plot_file)); /* This should improve performance a bit, since it stops the linker from @@ -2681,7 +2691,9 @@ static u8 run_target(char** argv, u32 timeout) { close(dev_null_fd); close(out_dir_fd); +#ifndef HAVE_ARC4RANDOM close(dev_urandom_fd); +#endif close(fileno(plot_file)); /* Set sane defaults for ASAN if nothing else specified. */ @@ -11466,8 +11478,10 @@ EXP_ST void setup_dirs_fds(void) { dev_null_fd = open("/dev/null", O_RDWR); if (dev_null_fd < 0) PFATAL("Unable to open /dev/null"); +#ifndef HAVE_ARC4RANDOM dev_urandom_fd = open("/dev/urandom", O_RDONLY); if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom"); +#endif /* Gnuplot output file. */ From 2971b5b31527be94037dfc4f60231ee2a0a1ea25 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Thu, 8 Aug 2019 10:36:43 +0200 Subject: [PATCH 10/83] documentation update --- TODO | 35 ++++++++++++++++++++++++++++++----- docs/ChangeLog | 3 ++- docs/env_variables.txt | 8 ++++++-- llvm_mode/README.llvm | 3 +-- qemu_mode/README.qemu | 31 +++++++++++++++++++++---------- 5 files changed, 60 insertions(+), 20 deletions(-) diff --git a/TODO b/TODO index 3d1e444d..42987cb9 100644 --- a/TODO +++ b/TODO @@ -1,6 +1,6 @@ Roadmap 2.53d: ============== - - indent all the code: clang-format -style=Google + - indent all the code: .clang-format - update docs/sister_projects.txt @@ -18,16 +18,41 @@ gcc_plugin: qemu_mode: - deferred mode with AFL_DEFERRED_QEMU=0xaddress + @andrea - dont we have that already with AFL_ENTRYPOINT? unit testing / or large testcase campaign Roadmap 2.54d: ============== - - expand MAP size to 256k (current L2 cache size on processors) - -> 18 bit map - - llvm_mode: dynamic map size and collission free basic block IDs - +Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges. + At afl's default map that means ~16 collisions and ~3 wrappings. + Solution #1: increase map size. + every +1 decreases fuzzing speed by ~10% and halfs the collisions + birthday paradox predicts at collisions at this # of edges: + 2^16 = 302 + 2^17 = 427 + 2^18 = 603 + 2^19 = 853 + 2^20 = 1207 + 2^21 = 1706 + 2^22 = 2412 + 2^23 = 3411 + 2^24 = 4823 + Its an easy solution but also not a good one. + Solution #2: use dynamic map size and collision free basic block IDs + This only works in llvm_mode and llvm >= 9 though + A potential good future solution + Solution #3: write instruction pointers to a big shared map + 512kb/1MB shared map and the instrumented code writes the instruction + pointer into the map. Map must be big enough but could be command line + controlled. + Good: complete coverage information, nothing is lost. choice of analysis + impacts speed, but this can be decided by user options + Neutral: a little bit slower but no loss of coverage + Bad: completely changes how afl uses the map and the scheduling. + Overall another very good solution + qemu_mode: - persistent mode patching the return address (WinAFL style) - instrument only comparison with immediate values by default when using compcov diff --git a/docs/ChangeLog b/docs/ChangeLog index 5e78610e..dfebb68a 100644 --- a/docs/ChangeLog +++ b/docs/ChangeLog @@ -17,6 +17,8 @@ sending a mail to . Version ++2.53d (dev): ---------------------- + - llvm 9 is now supported (still needs testing) + - fix building qemu on some Ubuntus (thanks to floyd!) - custom mutator by a loaded library is now supported (thanks to kyakdan!) - fix for a few features to support different map sized than 2^16 - afl-showmap: new option -r now shows the real values in the buckets (stock @@ -26,7 +28,6 @@ Version ++2.53d (dev): - ... your patch? :) - -------------------------- Version ++2.53c (release): -------------------------- diff --git a/docs/env_variables.txt b/docs/env_variables.txt index 36fdc369..93066dbc 100644 --- a/docs/env_variables.txt +++ b/docs/env_variables.txt @@ -257,10 +257,14 @@ The QEMU wrapper used to instrument binary-only code supports several settings: Use this if you are unsure if the entrypoint might be wrong - but use it directly, e.g. afl-qemu-trace ./program - - If you want to specify a specific entrypoint into the binary (this can - be very good for the performance!), use AFL_ENTRYPOINT for this. + - AFL_ENTRYPOINT allows you to specify a specific entrypoint into the + binary (this can be very good for the performance!). The entrypoint is specified as hex address, e.g. 0x4004110 + - AFL_QEMU_COMPCOV is for a sub-project in qemu_mode called ./libcompcov + which implements laf-intel for qemu. It also needs AFL_PRELOAD and + you can find more information in qemu_mode/libcompcov/README.compcov + 5) Settings for afl-cmin ------------------------ diff --git a/llvm_mode/README.llvm b/llvm_mode/README.llvm index a0c40211..f324b8c0 100644 --- a/llvm_mode/README.llvm +++ b/llvm_mode/README.llvm @@ -8,8 +8,7 @@ Fast LLVM-based instrumentation for afl-fuzz 1) Introduction --------------- -! llvm_mode works with llvm version 3.8.1 up to 8.x ! -! llvm version 9 does not work yet ! +! llvm_mode works with llvm versions 3.8.1 up to 9 ! The code in this directory allows you to instrument programs for AFL using true compiler-level instrumentation, instead of the more crude diff --git a/qemu_mode/README.qemu b/qemu_mode/README.qemu index 124fce12..754c0259 100644 --- a/qemu_mode/README.qemu +++ b/qemu_mode/README.qemu @@ -46,7 +46,19 @@ Note: if you want the QEMU helper to be installed on your system for all users, you need to build it before issuing 'make install' in the parent directory. -3) Notes on linking +3) Options +---------- + +There is ./libcompcov/ which implements laf-intel (splitting memcmp, +strncmp, etc. to make these conditions easier solvable by afl-fuzz). +Highly recommended. + +Another option is the environment variable AFL_ENTRYPOINT which allows +move the forkserver to a different part, e.g. just before the file is +opened (e.g. way after command line parsing and config file loading, etc) +which can be a huge speed improvement. + +4) Notes on linking ------------------- The feature is supported only on Linux. Supporting BSD may amount to porting @@ -68,7 +80,7 @@ practice, this means two things: Setting AFL_INST_LIBS=1 can be used to circumvent the .text detection logic and instrument every basic block encountered. -4) Benchmarking +5) Benchmarking --------------- If you want to compare the performance of the QEMU instrumentation with that of @@ -84,7 +96,7 @@ Comparative measurements of execution speed or instrumentation coverage will be fairly meaningless if the optimization levels or instrumentation scopes don't match. -5) Gotchas, feedback, bugs +6) Gotchas, feedback, bugs -------------------------- If you need to fix up checksums or do other cleanup on mutated test cases, see @@ -106,7 +118,7 @@ with -march=core2, can help. Beyond that, this is an early-stage mechanism, so fields reports are welcome. You can send them to . -6) Alternatives: static rewriting +7) Alternatives: static rewriting --------------------------------- Statically rewriting binaries just once, instead of attempting to translate @@ -114,12 +126,11 @@ them at run time, can be a faster alternative. That said, static rewriting is fraught with peril, because it depends on being able to properly and fully model program control flow without actually executing each and every code path. -If you want to experiment with this mode of operation, there is a module -contributed by Aleksandar Nikolich: +The best implementation is this one: https://github.com/vanhauser-thc/afl-dyninst - https://groups.google.com/forum/#!topic/afl-users/HlSQdbOTlpg -At this point, the author reports the possibility of hiccups with stripped -binaries. That said, if we can get it to be comparably reliable to QEMU, we may -decide to switch to this mode, but I had no time to play with it yet. +The issue however is Dyninst which is not rewriting the binaries so that +they run stable. a lot of crashes happen, especially in C++ programs that +use throw/catch. Try it first, and if it works for you be happy as it is +2-3x as fast as qemu_mode. From e1183be22ec38e180ec86737e204c5effbc1648b Mon Sep 17 00:00:00 2001 From: van Hauser Date: Thu, 8 Aug 2019 10:43:27 +0200 Subject: [PATCH 11/83] documentation update --- Makefile | 2 +- README.md | 7 ++++--- llvm_mode/Makefile | 6 +++--- llvm_mode/README.llvm | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 9e92de81..5c5d7fb2 100644 --- a/Makefile +++ b/Makefile @@ -158,7 +158,7 @@ ifndef AFL_NO_X86 test_build: afl-gcc afl-as afl-showmap @echo "[*] Testing the CC wrapper and instrumentation output..." unset AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_QUIET=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) - ./afl-showmap -m none -q -o .test-instr0 ./test-instr to troubleshoot the issue."; echo; exit 1; fi diff --git a/README.md b/README.md index 2124b862..dff6463b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # american fuzzy lop plus plus (afl++) Release Version: 2.53c + Github Version: 2.53d @@ -17,7 +18,7 @@ get any improvements since November 2017. Among others afl++ has, e.g. more performant llvm_mode, supporting - llvm up to version 8, Qemu 3.1, more speed and crashfixes for Qemu, + llvm up to version 9, Qemu 3.1, more speed and crashfixes for Qemu, laf-intel feature for Qemu (with libcompcov) and more. Additionally the following patches have been integrated: @@ -120,7 +121,7 @@ superior to blind fuzzing or coverage-only tools. PLEASE NOTE: llvm_mode compilation with afl-clang-fast/afl-clang-fast++ instead of afl-gcc/afl-g++ is much faster and has a few cool features. See llvm_mode/ - however few code does not compile with llvm. -We support llvm versions 4.0 to 8. +We support llvm versions 3.8.0 to 9. When source code is available, instrumentation can be injected by a companion tool that works as a drop-in replacement for gcc or clang in any standard build @@ -143,7 +144,7 @@ For C++ programs, you'd would also want to set `CXX=/path/to/afl/afl-g++`. The clang wrappers (afl-clang and afl-clang++) can be used in the same way; clang users may also opt to leverage a higher-performance instrumentation mode, as described in [llvm_mode/README.llvm](llvm_mode/README.llvm). -Clang/LLVM has a much better performance and works with LLVM version 4.0 to 8. +Clang/LLVM has a much better performance and works with LLVM version 3.8.0 to 9. Using the LAF Intel performance enhancements are also recommended, see [llvm_mode/README.laf-intel](llvm_mode/README.laf-intel) diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index d01fbbdf..1c661525 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -27,11 +27,11 @@ VERSION = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2) LLVM_CONFIG ?= llvm-config LLVMVER = $(shell $(LLVM_CONFIG) --version) -LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version | egrep -q '^9|3.0' && echo 1 || echo 0 ) +LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version | egrep -q '^[12]|^3\.0|^1[0-9]' && echo 1 || echo 0 ) LLVM_MAJOR = ($shell $(LLVM_CONFIG) --version | sed 's/\..*//') ifeq "$(LLVM_UNSUPPORTED)" "1" - $(warn llvm_mode only supports versions 3.8.0 up to 8.x ) + $(warn llvm_mode only supports versions 3.8.0 up to 9 ) endif # this is not visible yet: @@ -174,7 +174,7 @@ endif test_build: $(PROGS) @echo "[*] Testing the CC wrapper and instrumentation output..." unset AFL_USE_ASAN AFL_USE_MSAN AFL_INST_RATIO; AFL_QUIET=1 AFL_PATH=. AFL_CC=$(CC) AFL_LLVM_LAF_SPLIT_SWITCHES=1 AFL_LLVM_LAF_TRANSFORM_COMPARES=1 AFL_LLVM_LAF_SPLIT_COMPARES=1 ../afl-clang-fast $(CFLAGS) ../test-instr.c -o test-instr $(LDFLAGS) - ../afl-showmap -m none -q -o .test-instr0 ./test-instr to troubleshoot the issue."; echo; exit 1; fi diff --git a/llvm_mode/README.llvm b/llvm_mode/README.llvm index f324b8c0..9bb091ac 100644 --- a/llvm_mode/README.llvm +++ b/llvm_mode/README.llvm @@ -8,7 +8,7 @@ Fast LLVM-based instrumentation for afl-fuzz 1) Introduction --------------- -! llvm_mode works with llvm versions 3.8.1 up to 9 ! +! llvm_mode works with llvm versions 3.8.0 up to 9 ! The code in this directory allows you to instrument programs for AFL using true compiler-level instrumentation, instead of the more crude From 73d02f3a808903f02dd0b547e7db9ff00efdf6c5 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Thu, 8 Aug 2019 23:09:58 +0200 Subject: [PATCH 12/83] fix some compiler warnings --- llvm_mode/Makefile | 2 +- llvm_mode/MarkNodes.cc | 2 +- llvm_mode/split-compares-pass.so.cc | 6 ++---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index 1c661525..8331b662 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -48,7 +48,7 @@ ifdef AFL_TRACE_PC endif CXXFLAGS ?= -O3 -funroll-loops -CXXFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \ +CXXFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g \ -DVERSION=\"$(VERSION)\" -Wno-variadic-macros CLANG_CFL = `$(LLVM_CONFIG) --cxxflags` -Wl,-znodelete -fno-rtti -fpic $(CXXFLAGS) diff --git a/llvm_mode/MarkNodes.cc b/llvm_mode/MarkNodes.cc index a156fccb..348dc264 100644 --- a/llvm_mode/MarkNodes.cc +++ b/llvm_mode/MarkNodes.cc @@ -193,7 +193,7 @@ namespace DominatorTree{ idom[now] = idom[idom[now]]; } } -}; // End of DominatorTree +} // End of DominatorTree std::vector Visited, InStack; std::vector TopoOrder, InDeg; diff --git a/llvm_mode/split-compares-pass.so.cc b/llvm_mode/split-compares-pass.so.cc index c025628f..a74b60fa 100644 --- a/llvm_mode/split-compares-pass.so.cc +++ b/llvm_mode/split-compares-pass.so.cc @@ -495,14 +495,12 @@ bool SplitComparesTransform::runOnModule(Module &M) { errs() << "Running split-compare-pass " << 64 << "\n"; splitCompares(M, 64); - [[clang::fallthrough]]; - /* fallthrough */ + [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ case 32: errs() << "Running split-compare-pass " << 32 << "\n"; splitCompares(M, 32); - [[clang::fallthrough]]; - /* fallthrough */ + [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ case 16: errs() << "Running split-compare-pass " << 16 << "\n"; splitCompares(M, 16); From 41d2e7d6b6ac1bab20f73e1e023c8450bbfc657e Mon Sep 17 00:00:00 2001 From: van Hauser Date: Fri, 9 Aug 2019 00:34:26 +0200 Subject: [PATCH 13/83] minor corrections --- Makefile | 2 +- afl-common.h | 1 + llvm_mode/Makefile | 2 +- test-instr.c | 7 +++++-- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 5c5d7fb2..e6e3af85 100644 --- a/Makefile +++ b/Makefile @@ -161,7 +161,7 @@ test_build: afl-gcc afl-as afl-showmap ./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null echo 1 | ./afl-showmap -m none -q -o .test-instr1 ./test-instr @rm -f test-instr - @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping to troubleshoot the issue."; echo; exit 1; fi + @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please post to https://github.com/vanhauser-thc/AFLplusplus/issues to troubleshoot the issue."; echo; exit 1; fi @echo "[+] All right, the instrumentation seems to be working!" else diff --git a/afl-common.h b/afl-common.h index 07afb75d..161caa39 100644 --- a/afl-common.h +++ b/afl-common.h @@ -1,5 +1,6 @@ #ifndef __AFLCOMMON_H #define __AFLCOMMON_H +#include "types.h" void detect_file_args(char **argv, u8 *prog_in); #endif diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index 8331b662..3923f8e3 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -177,7 +177,7 @@ test_build: $(PROGS) ../afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null echo 1 | ../afl-showmap -m none -q -o .test-instr1 ./test-instr @rm -f test-instr - @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please ping to troubleshoot the issue."; echo; exit 1; fi + @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation does not seem to be behaving correctly!"; echo; echo "Please post to https://github.com/vanhauser-thc/AFLplusplus/issues to troubleshoot the issue."; echo; exit 1; fi @echo "[+] All right, the instrumentation seems to be working!" all_done: test_build diff --git a/test-instr.c b/test-instr.c index 1b978c55..9107f15e 100644 --- a/test-instr.c +++ b/test-instr.c @@ -20,9 +20,12 @@ int main(int argc, char** argv) { - char buf[8]; + char buff[8]; + char *buf = buff; - if (read(0, buf, sizeof(buf)) < 1) { + if (argc > 1) + buf = argv[1]; + else if (read(0, buf, sizeof(buf)) < 1) { printf("Hum?\n"); exit(1); } From ed603dcba2984b717bcdf9b06f0e8ec7554e0523 Mon Sep 17 00:00:00 2001 From: Zhu Xing Date: Sat, 10 Aug 2019 08:19:21 +0000 Subject: [PATCH 14/83] llvm-mode: Get the clang version correctly When using clang-8.0, The previous command in the Makefile will get two 8.0.0, thus a warning message print. --- llvm_mode/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index 3923f8e3..66490184 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -75,7 +75,7 @@ endif # sanity check. # Are versions of clang --version and llvm-config --version equal? -CLANGVER = $(shell $(CC) --version | sed -E -ne '/^.*([0-9]\.[0-9]\.[0-9]).*/s//\1/p') +CLANGVER = $(shell $(CC) --version | sed -ne "1,2p" | sed -E -ne '/^.*([0-9]\.[0-9]\.[0-9]).*/s//\1/p') ifeq "$(shell echo '\#include @\#include @int main() { int _id = shmget(IPC_PRIVATE, 65536, IPC_CREAT | IPC_EXCL | 0600); shmctl(_id, IPC_RMID, 0); return 0;}' | tr @ '\n' | $(CC) -x c - -o .test2 2>/dev/null && echo 1 || echo 0 )" "1" From 0612aa2b65a6995c8cd372385a0d096efef7ba93 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Sat, 10 Aug 2019 17:10:18 +0200 Subject: [PATCH 15/83] optimized version extraction for clang (restricted to first line) --- llvm_mode/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index 66490184..fc7a6fd9 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -75,7 +75,7 @@ endif # sanity check. # Are versions of clang --version and llvm-config --version equal? -CLANGVER = $(shell $(CC) --version | sed -ne "1,2p" | sed -E -ne '/^.*([0-9]\.[0-9]\.[0-9]).*/s//\1/p') +CLANGVER = $(shell $(CC) --version | sed -E -ne '1{/^.*([0-9]\.[0-9]\.[0-9]).*/s//\1/p}') ifeq "$(shell echo '\#include @\#include @int main() { int _id = shmget(IPC_PRIVATE, 65536, IPC_CREAT | IPC_EXCL | 0600); shmctl(_id, IPC_RMID, 0); return 0;}' | tr @ '\n' | $(CC) -x c - -o .test2 2>/dev/null && echo 1 || echo 0 )" "1" From 642cf8b5ff662e125bd96d6dd0377c0fcaf9e29d Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Sat, 10 Aug 2019 19:58:18 +0200 Subject: [PATCH 16/83] performance tuning prefer preincrement over postincrement --- afl-fuzz.c | 562 ++++++++++++++++++++++++++--------------------------- 1 file changed, 281 insertions(+), 281 deletions(-) diff --git a/afl-fuzz.c b/afl-fuzz.c index a1388a55..be44c69e 100644 --- a/afl-fuzz.c +++ b/afl-fuzz.c @@ -650,7 +650,7 @@ int select_algorithm(void) { double sele = ((double)(random()%10000)*0.0001); j_puppet = 0; - for (i_puppet = 0; i_puppet < operator_num; i_puppet++) { + for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) { if (unlikely(i_puppet == 0)) { if (sele < probability_now[swarm_now][i_puppet]) break; @@ -718,7 +718,7 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) { u32 i; - for (i = 0; i < cnt - 2; i++) { + for (i = 0; i < cnt - 2; ++i) { u32 j = i + UR(cnt - i); void *s = ptrs[i]; @@ -817,7 +817,7 @@ static void bind_to_free_cpu(void) { closedir(d); - for (i = 0; i < cpu_core_count; i++) if (!cpu_used[i]) break; + for (i = 0; i < cpu_core_count; ++i) if (!cpu_used[i]) break; if (i == cpu_core_count) { @@ -857,7 +857,7 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { s32 l_loc = -1; u32 pos; - for (pos = 0; pos < len; pos++) { + for (pos = 0; pos < len; ++pos) { if (*(ptr1++) != *(ptr2++)) { @@ -1133,8 +1133,8 @@ static void add_to_queue(u8* fname, u32 len, u8 passed_det) { } else q_prev100 = queue = queue_top = q; - queued_paths++; - pending_not_fuzzed++; + ++queued_paths; + ++pending_not_fuzzed; cycles_wo_finds = 0; @@ -1275,8 +1275,8 @@ static inline u8 has_new_bits(u8* virgin_map) { } - current++; - virgin++; + ++current; + ++virgin; } @@ -1336,10 +1336,10 @@ static u32 count_bytes(u8* mem) { u32 v = *(ptr++); if (!v) continue; - if (v & FF(0)) ret++; - if (v & FF(1)) ret++; - if (v & FF(2)) ret++; - if (v & FF(3)) ret++; + if (v & FF(0)) ++ret; + if (v & FF(1)) ++ret; + if (v & FF(2)) ++ret; + if (v & FF(3)) ++ret; } @@ -1365,10 +1365,10 @@ static u32 count_non_255_bytes(u8* mem) { case. */ if (v == 0xffffffff) continue; - if ((v & FF(0)) != FF(0)) ret++; - if ((v & FF(1)) != FF(1)) ret++; - if ((v & FF(2)) != FF(2)) ret++; - if ((v & FF(3)) != FF(3)) ret++; + if ((v & FF(0)) != FF(0)) ++ret; + if ((v & FF(1)) != FF(1)) ++ret; + if ((v & FF(2)) != FF(2)) ++ret; + if ((v & FF(3)) != FF(3)) ++ret; } @@ -1414,7 +1414,7 @@ static void simplify_trace(u64* mem) { } else *mem = 0x0101010101010101ULL; - mem++; + ++mem; } @@ -1441,7 +1441,7 @@ static void simplify_trace(u32* mem) { } else *mem = 0x01010101; - mem++; + ++mem; } } @@ -1504,7 +1504,7 @@ static inline void classify_counts(u64* mem) { } - mem++; + ++mem; } @@ -1529,7 +1529,7 @@ static inline void classify_counts(u32* mem) { } - mem++; + ++mem; } @@ -1549,7 +1549,7 @@ static void minimize_bits(u8* dst, u8* src) { while (i < MAP_SIZE) { if (*(src++)) dst[i >> 3] |= 1 << (i & 7); - i++; + ++i; } @@ -1589,7 +1589,7 @@ static void update_bitmap_score(struct queue_entry* q) { /* For every byte set in trace_bits[], see if there is a previous winner, and how it compares to us. */ - for (i = 0; i < MAP_SIZE; i++) + for (i = 0; i < MAP_SIZE; ++i) if (trace_bits[i]) { @@ -1621,7 +1621,7 @@ static void update_bitmap_score(struct queue_entry* q) { /* Insert ourselves as the new winner. */ top_rated[i] = q; - q->tc_ref++; + ++q->tc_ref; if (!q->trace_mini) { q->trace_mini = ck_alloc(MAP_SIZE >> 3); @@ -1666,7 +1666,7 @@ static void cull_queue(void) { /* Let's see if anything in the bitmap isn't captured in temp_v. If yes, and if it has a top_rated[] contender, let's use it. */ - for (i = 0; i < MAP_SIZE; i++) + for (i = 0; i < MAP_SIZE; ++i) if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) { u32 j = MAP_SIZE >> 3; @@ -1678,9 +1678,9 @@ static void cull_queue(void) { temp_v[j] &= ~top_rated[i]->trace_mini[j]; top_rated[i]->favored = 1; - queued_favored++; + ++queued_favored; - if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) pending_favored++; + if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored; } @@ -1785,7 +1785,7 @@ static void read_testcases(void) { } - for (i = 0; i < nl_cnt; i++) { + for (i = 0; i < nl_cnt; ++i) { struct stat st; @@ -1881,15 +1881,15 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len, u8 *rptr, *wptr; u32 klen = 0; - cur_line++; + ++cur_line; /* Trim on left and right. */ - while (isspace(*lptr)) lptr++; + while (isspace(*lptr)) ++lptr; rptr = lptr + strlen(lptr) - 1; - while (rptr >= lptr && isspace(*rptr)) rptr--; - rptr++; + while (rptr >= lptr && isspace(*rptr)) --rptr; + ++rptr; *rptr = 0; /* Skip empty lines and comments. */ @@ -1907,28 +1907,28 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len, /* Skip alphanumerics and dashes (label). */ - while (isalnum(*lptr) || *lptr == '_') lptr++; + while (isalnum(*lptr) || *lptr == '_') ++lptr; /* If @number follows, parse that. */ if (*lptr == '@') { - lptr++; + ++lptr; if (atoi(lptr) > dict_level) continue; - while (isdigit(*lptr)) lptr++; + while (isdigit(*lptr)) ++lptr; } /* Skip whitespace and = signs. */ - while (isspace(*lptr) || *lptr == '=') lptr++; + while (isspace(*lptr) || *lptr == '=') ++lptr; /* Consume opening '"'. */ if (*lptr != '"') FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line); - lptr++; + ++lptr; if (!*lptr) FATAL("Empty keyword in line %u.", cur_line); @@ -1952,7 +1952,7 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len, case '\\': - lptr++; + ++lptr; if (*lptr == '\\' || *lptr == '"') { *(wptr++) = *(lptr++); @@ -1968,14 +1968,14 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len, (strchr(hexdigits, tolower(lptr[2])) - hexdigits); lptr += 3; - klen++; + ++klen; break; default: *(wptr++) = *(lptr++); - klen++; + ++klen; } @@ -1990,7 +1990,7 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len, if (*min_len > klen) *min_len = klen; if (*max_len < klen) *max_len = klen; - extras_cnt++; + ++extras_cnt; } @@ -2073,7 +2073,7 @@ static void load_extras(u8* dir) { close(fd); ck_free(fn); - extras_cnt++; + ++extras_cnt; } @@ -2123,7 +2123,7 @@ static void maybe_add_auto(u8* mem, u32 len) { /* Skip runs of identical bytes. */ - for (i = 1; i < len; i++) + for (i = 1; i < len; ++i) if (mem[0] ^ mem[i]) break; if (i == len) return; @@ -2154,10 +2154,10 @@ static void maybe_add_auto(u8* mem, u32 len) { match. We optimize by exploiting the fact that extras[] are sorted by size. */ - for (i = 0; i < extras_cnt; i++) + for (i = 0; i < extras_cnt; ++i) if (extras[i].len >= len) break; - for (; i < extras_cnt && extras[i].len == len; i++) + for (; i < extras_cnt && extras[i].len == len; ++i) if (!memcmp_nocase(extras[i].data, mem, len)) return; /* Last but not least, check a_extras[] for matches. There are no @@ -2165,7 +2165,7 @@ static void maybe_add_auto(u8* mem, u32 len) { auto_changed = 1; - for (i = 0; i < a_extras_cnt; i++) { + for (i = 0; i < a_extras_cnt; ++i) { if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) { @@ -2187,7 +2187,7 @@ static void maybe_add_auto(u8* mem, u32 len) { a_extras[a_extras_cnt].data = ck_memdup(mem, len); a_extras[a_extras_cnt].len = len; - a_extras_cnt++; + ++a_extras_cnt; } else { @@ -2226,7 +2226,7 @@ static void save_auto(void) { if (!auto_changed) return; auto_changed = 0; - for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); i++) { + for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); ++i) { u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i); s32 fd; @@ -2251,7 +2251,7 @@ static void load_auto(void) { u32 i; - for (i = 0; i < USE_AUTO_EXTRAS; i++) { + for (i = 0; i < USE_AUTO_EXTRAS; ++i) { u8 tmp[MAX_AUTO_EXTRA + 1]; u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i); @@ -2294,12 +2294,12 @@ static void destroy_extras(void) { u32 i; - for (i = 0; i < extras_cnt; i++) + for (i = 0; i < extras_cnt; ++i) ck_free(extras[i].data); ck_free(extras); - for (i = 0; i < a_extras_cnt; i++) + for (i = 0; i < a_extras_cnt; ++i) ck_free(a_extras[i].data); ck_free(a_extras); @@ -2763,7 +2763,7 @@ static u8 run_target(char** argv, u32 timeout) { setitimer(ITIMER_REAL, &it, NULL); - total_execs++; + ++total_execs; /* Any subsequent operations on trace_bits must not be moved by the compiler below this point. Past this location, trace_bits[] behave @@ -2904,7 +2904,7 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD, exec_tmout * CAL_TMOUT_PERC / 100); - q->cal_failed++; + ++q->cal_failed; stage_name = "calibration"; stage_max = fast_cal ? 3 : CAL_CYCLES; @@ -2919,7 +2919,7 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, start_us = get_cur_time_us(); - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { u32 cksum; @@ -2950,7 +2950,7 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 i; - for (i = 0; i < MAP_SIZE; i++) { + for (i = 0; i < MAP_SIZE; ++i) { if (!var_bytes[i] && first_trace[i] != trace_bits[i]) { @@ -2988,7 +2988,7 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, q->cal_failed = 0; total_bitmap_size += q->bitmap_size; - total_bitmap_entries++; + ++total_bitmap_entries; update_bitmap_score(q); @@ -3002,7 +3002,7 @@ abort_calibration: if (new_bits == 2 && !q->has_new_cov) { q->has_new_cov = 1; - queued_with_cov++; + ++queued_with_cov; } /* Mark variable paths. */ @@ -3013,7 +3013,7 @@ abort_calibration: if (!q->var_behavior) { mark_as_variable(q); - queued_variable++; + ++queued_variable; } } @@ -3037,7 +3037,7 @@ static void check_map_coverage(void) { if (count_bytes(trace_bits) < 100) return; - for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; i++) + for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; ++i) if (trace_bits[i]) return; WARNF("Recompile binary with newer version of afl to improve coverage!"); @@ -3104,7 +3104,7 @@ static void perform_dry_run(char** argv) { if (timeout_given > 1) { WARNF("Test case results in a timeout (skipping)"); q->cal_failed = CAL_CHANCES; - cal_failures++; + ++cal_failures; break; } @@ -3139,7 +3139,7 @@ static void perform_dry_run(char** argv) { if (skip_crashes) { WARNF("Test case results in a crash (skipping)"); q->cal_failed = CAL_CHANCES; - cal_failures++; + ++cal_failures; break; } @@ -3215,7 +3215,7 @@ static void perform_dry_run(char** argv) { case FAULT_NOBITS: - useless_at_start++; + ++useless_at_start; if (!in_bitmap && !shuffle_queue) WARNF("No new instrumentation output, test case may be useless."); @@ -3297,7 +3297,7 @@ static void pivot_inputs(void) { u8 *nfn, *rsl = strrchr(q->fname, '/'); u32 orig_id; - if (!rsl) rsl = q->fname; else rsl++; + if (!rsl) rsl = q->fname; else ++rsl; /* If the original file name conforms to the syntax and the recorded ID matches the one we'd assign, just use the original file name. @@ -3364,7 +3364,7 @@ static void pivot_inputs(void) { if (q->passed_det) mark_as_det_done(q); q = q->next; - id++; + ++id; } @@ -3494,7 +3494,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { future fuzzing, etc. */ if (!(hnb = has_new_bits(virgin_bits))) { - if (crash_mode) total_crashes++; + if (crash_mode) ++total_crashes; return 0; } @@ -3513,7 +3513,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { if (hnb == 2) { queue_top->has_new_cov = 1; - queued_with_cov++; + ++queued_with_cov; } queue_top->exec_cksum = cksum; @@ -3544,7 +3544,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we just keep everything. */ - total_tmouts++; + ++total_tmouts; if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping; @@ -3560,7 +3560,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { } - unique_tmouts++; + ++unique_tmouts; /* Before saving, we make sure that it's a genuine hang by re-running the target with a more generous timeout (unless the default timeout @@ -3594,7 +3594,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { #endif /* ^!SIMPLE_FILES */ - unique_hangs++; + ++unique_hangs; last_hang_time = get_cur_time(); @@ -3608,7 +3608,7 @@ keep_as_crash: except for slightly different limits and no need to re-run test cases. */ - total_crashes++; + ++total_crashes; if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping; @@ -3638,7 +3638,7 @@ keep_as_crash: #endif /* ^!SIMPLE_FILES */ - unique_crashes++; + ++unique_crashes; last_crash_time = get_cur_time(); last_crash_execs = total_execs; @@ -4868,7 +4868,7 @@ static u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { write_to_testcase(retbuf, retlen); fault = run_target(argv, exec_tmout); - trim_execs++; + ++trim_execs; if (stop_soon || fault == FAULT_ERROR) goto abort_trimming; @@ -4994,7 +4994,7 @@ static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { write_with_gap(in_buf, q->len, remove_pos, trim_avail); fault = run_target(argv, exec_tmout); - trim_execs++; + ++trim_execs; if (stop_soon || fault == FAULT_ERROR) goto abort_trimming; @@ -5032,7 +5032,7 @@ static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { /* Since this can be slow, update the screen every now and then. */ if (!(trim_exec++ % stats_update_freq)) show_stats(); - stage_cur++; + ++stage_cur; } @@ -5093,7 +5093,7 @@ EXP_ST u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) { if (fault == FAULT_TMOUT) { if (subseq_tmouts++ > TMOUT_LIMIT) { - cur_skipped_paths++; + ++cur_skipped_paths; return 1; } @@ -5105,7 +5105,7 @@ EXP_ST u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) { if (skip_requested) { skip_requested = 0; - cur_skipped_paths++; + ++cur_skipped_paths; return 1; } @@ -5315,7 +5315,7 @@ static u8 could_be_bitflip(u32 xor_val) { /* Shift left until first bit set. */ - while (!(xor_val & 1)) { sh++; xor_val >>= 1; } + while (!(xor_val & 1)) { ++sh; xor_val >>= 1; } /* 1-, 2-, and 4-bit patterns are OK anywhere. */ @@ -5345,12 +5345,12 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { /* See if one-byte adjustments to any byte could produce this result. */ - for (i = 0; i < blen; i++) { + for (i = 0; i < blen; ++i) { u8 a = old_val >> (8 * i), b = new_val >> (8 * i); - if (a != b) { diffs++; ov = a; nv = b; } + if (a != b) { ++diffs; ov = a; nv = b; } } @@ -5369,12 +5369,12 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { diffs = 0; - for (i = 0; i < blen / 2; i++) { + for (i = 0; i < blen / 2; ++i) { u16 a = old_val >> (16 * i), b = new_val >> (16 * i); - if (a != b) { diffs++; ov = a; nv = b; } + if (a != b) { ++diffs; ov = a; nv = b; } } @@ -5427,9 +5427,9 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { /* See if one-byte insertions from interesting_8 over old_val could produce new_val. */ - for (i = 0; i < blen; i++) { + for (i = 0; i < blen; ++i) { - for (j = 0; j < sizeof(interesting_8); j++) { + for (j = 0; j < sizeof(interesting_8); ++j) { u32 tval = (old_val & ~(0xff << (i * 8))) | (((u8)interesting_8[j]) << (i * 8)); @@ -5447,9 +5447,9 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { /* See if two-byte insertions over old_val could give us new_val. */ - for (i = 0; i < blen - 1; i++) { + for (i = 0; i < blen - 1; ++i) { - for (j = 0; j < sizeof(interesting_16) / 2; j++) { + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { u32 tval = (old_val & ~(0xffff << (i * 8))) | (((u16)interesting_16[j]) << (i * 8)); @@ -5476,7 +5476,7 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { /* See if four-byte insertions could produce the same result (LE only). */ - for (j = 0; j < sizeof(interesting_32) / 4; j++) + for (j = 0; j < sizeof(interesting_32) / 4; ++j) if (new_val == (u32)interesting_32[j]) return 1; } @@ -5588,7 +5588,7 @@ static u8 fuzz_one_original(char** argv) { } if (stop_soon || res != crash_mode) { - cur_skipped_paths++; + ++cur_skipped_paths; goto abandon_entry; } @@ -5606,7 +5606,7 @@ static u8 fuzz_one_original(char** argv) { FATAL("Unable to execute target application"); if (stop_soon) { - cur_skipped_paths++; + ++cur_skipped_paths; goto abandon_entry; } @@ -5639,7 +5639,7 @@ static u8 fuzz_one_original(char** argv) { orig_hit_cnt = queued_paths + unique_crashes; - for (stage_cur = 0 ; stage_cur < stage_max ; stage_cur++) { + for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) { size_t orig_size = (size_t) len; size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX)); if (mutated_size > 0) { @@ -5712,7 +5712,7 @@ static u8 fuzz_one_original(char** argv) { prev_cksum = queue_cur->exec_cksum; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur >> 3; @@ -5759,7 +5759,7 @@ static u8 fuzz_one_original(char** argv) { final character and force output. */ if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - a_len++; + ++a_len; if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) maybe_add_auto(a_collect, a_len); @@ -5783,7 +5783,7 @@ static u8 fuzz_one_original(char** argv) { if (cksum != queue_cur->exec_cksum) { if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - a_len++; + ++a_len; } @@ -5804,7 +5804,7 @@ static u8 fuzz_one_original(char** argv) { orig_hit_cnt = new_hit_cnt; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur >> 3; @@ -5831,7 +5831,7 @@ static u8 fuzz_one_original(char** argv) { orig_hit_cnt = new_hit_cnt; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur >> 3; @@ -5875,7 +5875,7 @@ static u8 fuzz_one_original(char** argv) { if (EFF_APOS(len - 1) != 0) { eff_map[EFF_APOS(len - 1)] = 1; - eff_cnt++; + ++eff_cnt; } /* Walking byte. */ @@ -5886,7 +5886,7 @@ static u8 fuzz_one_original(char** argv) { orig_hit_cnt = new_hit_cnt; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur; @@ -5913,7 +5913,7 @@ static u8 fuzz_one_original(char** argv) { if (cksum != queue_cur->exec_cksum) { eff_map[EFF_APOS(stage_cur)] = 1; - eff_cnt++; + ++eff_cnt; } } @@ -5957,7 +5957,7 @@ static u8 fuzz_one_original(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; i++) { + for (i = 0; i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -5971,7 +5971,7 @@ static u8 fuzz_one_original(char** argv) { *(u16*)(out_buf + i) ^= 0xFFFF; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; *(u16*)(out_buf + i) ^= 0xFFFF; @@ -5994,7 +5994,7 @@ static u8 fuzz_one_original(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -6008,7 +6008,7 @@ static u8 fuzz_one_original(char** argv) { *(u32*)(out_buf + i) ^= 0xFFFFFFFF; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; *(u32*)(out_buf + i) ^= 0xFFFFFFFF; @@ -6038,7 +6038,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u8 orig = out_buf[i]; @@ -6051,7 +6051,7 @@ skip_bitflip: stage_cur_byte = i; - for (j = 1; j <= ARITH_MAX; j++) { + for (j = 1; j <= ARITH_MAX; ++j) { u8 r = orig ^ (orig + j); @@ -6064,7 +6064,7 @@ skip_bitflip: out_buf[i] = orig + j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6076,7 +6076,7 @@ skip_bitflip: out_buf[i] = orig - j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6102,7 +6102,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; i++) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16*)(out_buf + i); @@ -6115,7 +6115,7 @@ skip_bitflip: stage_cur_byte = i; - for (j = 1; j <= ARITH_MAX; j++) { + for (j = 1; j <= ARITH_MAX; ++j) { u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), @@ -6135,9 +6135,9 @@ skip_bitflip: *(u16*)(out_buf + i) = orig + j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; - } else stage_max--; + } else --stage_max; if ((orig & 0xff) < j && !could_be_bitflip(r2)) { @@ -6145,7 +6145,7 @@ skip_bitflip: *(u16*)(out_buf + i) = orig - j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6160,7 +6160,7 @@ skip_bitflip: *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6170,7 +6170,7 @@ skip_bitflip: *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6196,7 +6196,7 @@ skip_bitflip: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; i < len - 3; ++i) { u32 orig = *(u32*)(out_buf + i); @@ -6210,7 +6210,7 @@ skip_bitflip: stage_cur_byte = i; - for (j = 1; j <= ARITH_MAX; j++) { + for (j = 1; j <= ARITH_MAX; ++j) { u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), @@ -6228,7 +6228,7 @@ skip_bitflip: *(u32*)(out_buf + i) = orig + j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6238,7 +6238,7 @@ skip_bitflip: *(u32*)(out_buf + i) = orig - j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6252,7 +6252,7 @@ skip_bitflip: *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6262,7 +6262,7 @@ skip_bitflip: *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6294,7 +6294,7 @@ skip_arith: /* Setting 8-bit integers. */ - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u8 orig = out_buf[i]; @@ -6307,7 +6307,7 @@ skip_arith: stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_8); j++) { + for (j = 0; j < sizeof(interesting_8); ++j) { /* Skip if the value could be a product of bitflips or arithmetics. */ @@ -6323,7 +6323,7 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; out_buf[i] = orig; - stage_cur++; + ++stage_cur; } @@ -6345,7 +6345,7 @@ skip_arith: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; i++) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16*)(out_buf + i); @@ -6358,7 +6358,7 @@ skip_arith: stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_16) / 2; j++) { + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { stage_cur_val = interesting_16[j]; @@ -6374,7 +6374,7 @@ skip_arith: *(u16*)(out_buf + i) = interesting_16[j]; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6387,7 +6387,7 @@ skip_arith: *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6427,7 +6427,7 @@ skip_arith: stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_32) / 4; j++) { + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { stage_cur_val = interesting_32[j]; @@ -6443,7 +6443,7 @@ skip_arith: *(u32*)(out_buf + i) = interesting_32[j]; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6456,7 +6456,7 @@ skip_arith: *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -6490,7 +6490,7 @@ skip_interest: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u32 last_len = 0; @@ -6501,7 +6501,7 @@ skip_interest: between writes at a particular offset determined by the outer loop. */ - for (j = 0; j < extras_cnt; j++) { + for (j = 0; j < extras_cnt; ++j) { /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also skip them if there's no room to insert the payload, if the token @@ -6523,7 +6523,7 @@ skip_interest: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } @@ -6548,11 +6548,11 @@ skip_interest: ex_tmp = ck_alloc(len + MAX_DICT_FILE); - for (i = 0; i <= len; i++) { + for (i = 0; i <= len; ++i) { stage_cur_byte = i; - for (j = 0; j < extras_cnt; j++) { + for (j = 0; j < extras_cnt; ++j) { if (len + extras[j].len > MAX_FILE) { stage_max--; @@ -6570,7 +6570,7 @@ skip_interest: goto abandon_entry; } - stage_cur++; + ++stage_cur; } @@ -6599,13 +6599,13 @@ skip_user_extras: orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u32 last_len = 0; stage_cur_byte = i; - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) { + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { /* See the comment in the earlier code; extras are sorted by size. */ @@ -6623,7 +6623,7 @@ skip_user_extras: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } @@ -6664,7 +6664,7 @@ python_stage: char* retbuf = NULL; size_t retlen = 0; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { struct queue_entry* target; u32 tid; u8* new_buf; @@ -6682,7 +6682,7 @@ retry_external_pick: while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) { target = target->next; - splicing_with++; + ++splicing_with; } if (!target) goto retry_external_pick; @@ -6780,13 +6780,13 @@ havoc_stage: /* We essentially just do several thousand runs (depending on perf_score) where we take the input file and make random stacked tweaks. */ - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); stage_cur_val = use_stacking; - for (i = 0; i < use_stacking; i++) { + for (i = 0; i < use_stacking; ++i) { switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) { @@ -7240,7 +7240,7 @@ retry_splicing: while (target && (target->len < 2 || target == queue_cur)) { target = target->next; - splicing_with++; + ++splicing_with; } if (!target) goto retry_splicing; @@ -7307,7 +7307,7 @@ abandon_entry: if (queue_cur->favored) pending_favored--; } - queue_cur->fuzz_level++; + ++queue_cur->fuzz_level; munmap(orig_in, queue_cur->len); @@ -7422,7 +7422,7 @@ static u8 pilot_fuzzing(char** argv) { } if (stop_soon || res != crash_mode) { - cur_skipped_paths++; + ++cur_skipped_paths; goto abandon_entry; } @@ -7440,7 +7440,7 @@ static u8 pilot_fuzzing(char** argv) { FATAL("Unable to execute target application"); if (stop_soon) { - cur_skipped_paths++; + ++cur_skipped_paths; goto abandon_entry; } @@ -7509,7 +7509,7 @@ static u8 pilot_fuzzing(char** argv) { prev_cksum = queue_cur->exec_cksum; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur >> 3; @@ -7556,7 +7556,7 @@ static u8 pilot_fuzzing(char** argv) { final character and force output. */ if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - a_len++; + ++a_len; if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) maybe_add_auto(a_collect, a_len); @@ -7581,7 +7581,7 @@ static u8 pilot_fuzzing(char** argv) { if (cksum != queue_cur->exec_cksum) { if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - a_len++; + ++a_len; } @@ -7602,7 +7602,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur >> 3; @@ -7635,7 +7635,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur >> 3; @@ -7682,7 +7682,7 @@ static u8 pilot_fuzzing(char** argv) { if (EFF_APOS(len - 1) != 0) { eff_map[EFF_APOS(len - 1)] = 1; - eff_cnt++; + ++eff_cnt; } /* Walking byte. */ @@ -7695,7 +7695,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur; @@ -7722,7 +7722,7 @@ static u8 pilot_fuzzing(char** argv) { if (cksum != queue_cur->exec_cksum) { eff_map[EFF_APOS(stage_cur)] = 1; - eff_cnt++; + ++eff_cnt; } } @@ -7773,7 +7773,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; i++) { + for (i = 0; i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -7787,7 +7787,7 @@ static u8 pilot_fuzzing(char** argv) { *(u16*)(out_buf + i) ^= 0xFFFF; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; *(u16*)(out_buf + i) ^= 0xFFFF; @@ -7815,7 +7815,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -7829,7 +7829,7 @@ static u8 pilot_fuzzing(char** argv) { *(u32*)(out_buf + i) ^= 0xFFFFFFFF; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; *(u32*)(out_buf + i) ^= 0xFFFFFFFF; @@ -7867,7 +7867,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u8 orig = out_buf[i]; @@ -7880,7 +7880,7 @@ static u8 pilot_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 1; j <= ARITH_MAX; j++) { + for (j = 1; j <= ARITH_MAX; ++j) { u8 r = orig ^ (orig + j); @@ -7893,7 +7893,7 @@ static u8 pilot_fuzzing(char** argv) { out_buf[i] = orig + j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -7905,7 +7905,7 @@ static u8 pilot_fuzzing(char** argv) { out_buf[i] = orig - j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -7938,7 +7938,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; i++) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16*)(out_buf + i); @@ -7951,7 +7951,7 @@ static u8 pilot_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 1; j <= ARITH_MAX; j++) { + for (j = 1; j <= ARITH_MAX; ++j) { u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), @@ -7971,7 +7971,7 @@ static u8 pilot_fuzzing(char** argv) { *(u16*)(out_buf + i) = orig + j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -7981,7 +7981,7 @@ static u8 pilot_fuzzing(char** argv) { *(u16*)(out_buf + i) = orig - j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -7996,7 +7996,7 @@ static u8 pilot_fuzzing(char** argv) { *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -8006,7 +8006,7 @@ static u8 pilot_fuzzing(char** argv) { *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -8037,7 +8037,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; i < len - 3; ++i) { u32 orig = *(u32*)(out_buf + i); @@ -8051,7 +8051,7 @@ static u8 pilot_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 1; j <= ARITH_MAX; j++) { + for (j = 1; j <= ARITH_MAX; ++j) { u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), @@ -8069,7 +8069,7 @@ static u8 pilot_fuzzing(char** argv) { *(u32*)(out_buf + i) = orig + j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -8093,7 +8093,7 @@ static u8 pilot_fuzzing(char** argv) { *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -8103,7 +8103,7 @@ static u8 pilot_fuzzing(char** argv) { *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -8140,7 +8140,7 @@ static u8 pilot_fuzzing(char** argv) { /* Setting 8-bit integers. */ - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u8 orig = out_buf[i]; @@ -8153,7 +8153,7 @@ static u8 pilot_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_8); j++) { + for (j = 0; j < sizeof(interesting_8); ++j) { /* Skip if the value could be a product of bitflips or arithmetics. */ @@ -8169,7 +8169,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; out_buf[i] = orig; - stage_cur++; + ++stage_cur; } @@ -8196,7 +8196,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; i++) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16*)(out_buf + i); @@ -8209,7 +8209,7 @@ static u8 pilot_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_16) / 2; j++) { + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { stage_cur_val = interesting_16[j]; @@ -8225,7 +8225,7 @@ static u8 pilot_fuzzing(char** argv) { *(u16*)(out_buf + i) = interesting_16[j]; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -8238,7 +8238,7 @@ static u8 pilot_fuzzing(char** argv) { *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -8269,7 +8269,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; i < len - 3; ++i) { u32 orig = *(u32*)(out_buf + i); @@ -8283,7 +8283,7 @@ static u8 pilot_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_32) / 4; j++) { + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { stage_cur_val = interesting_32[j]; @@ -8299,7 +8299,7 @@ static u8 pilot_fuzzing(char** argv) { *(u32*)(out_buf + i) = interesting_32[j]; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -8312,7 +8312,7 @@ static u8 pilot_fuzzing(char** argv) { *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -8353,7 +8353,7 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u32 last_len = 0; @@ -8364,7 +8364,7 @@ static u8 pilot_fuzzing(char** argv) { between writes at a particular offset determined by the outer loop. */ - for (j = 0; j < extras_cnt; j++) { + for (j = 0; j < extras_cnt; ++j) { /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also skip them if there's no room to insert the payload, if the token @@ -8386,7 +8386,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } @@ -8414,11 +8414,11 @@ static u8 pilot_fuzzing(char** argv) { ex_tmp = ck_alloc(len + MAX_DICT_FILE); - for (i = 0; i <= len; i++) { + for (i = 0; i <= len; ++i) { stage_cur_byte = i; - for (j = 0; j < extras_cnt; j++) { + for (j = 0; j < extras_cnt; ++j) { if (len + extras[j].len > MAX_FILE) { stage_max--; @@ -8436,7 +8436,7 @@ static u8 pilot_fuzzing(char** argv) { goto abandon_entry; } - stage_cur++; + ++stage_cur; } @@ -8466,13 +8466,13 @@ static u8 pilot_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u32 last_len = 0; stage_cur_byte = i; - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) { + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { /* See the comment in the earlier code; extras are sorted by size. */ @@ -8490,7 +8490,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } @@ -8600,20 +8600,20 @@ static u8 pilot_fuzzing(char** argv) { - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); stage_cur_val = use_stacking; - for (i = 0; i < operator_num; i++) + for (i = 0; i < operator_num; ++i) { stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; } - for (i = 0; i < use_stacking; i++) { + for (i = 0; i < use_stacking; ++i) { switch (select_algorithm()) { @@ -8921,7 +8921,7 @@ static u8 pilot_fuzzing(char** argv) { { u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; i++) + for (i = 0; i < 16; ++i) { if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i]) stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet; @@ -8979,7 +8979,7 @@ static u8 pilot_fuzzing(char** argv) { while (target && (target->len < 2 || target == queue_cur)) { target = target->next; - splicing_with++; + ++splicing_with; } if (!target) goto retry_splicing_puppet; @@ -9071,7 +9071,7 @@ static u8 pilot_fuzzing(char** argv) { temp_puppet_find = total_puppet_find; u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; i++) { + for (i = 0; i < operator_num; ++i) { double temp_eff = 0.0; if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i]) @@ -9091,7 +9091,7 @@ static u8 pilot_fuzzing(char** argv) { swarm_now = swarm_now + 1; if (swarm_now == swarm_num) { key_module = 1; - for (i = 0; i < operator_num; i++) { + for (i = 0; i < operator_num; ++i) { core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i]; core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i]; core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i]; @@ -9099,7 +9099,7 @@ static u8 pilot_fuzzing(char** argv) { double swarm_eff = 0.0; swarm_now = 0; - for (i = 0; i < swarm_num; i++) { + for (i = 0; i < swarm_num; ++i) { if (swarm_fitness[i] > swarm_eff) { swarm_eff = swarm_fitness[i]; swarm_now = i; @@ -9225,7 +9225,7 @@ static u8 core_fuzzing(char** argv) { } if (stop_soon || res != crash_mode) { - cur_skipped_paths++; + ++cur_skipped_paths; goto abandon_entry; } @@ -9243,7 +9243,7 @@ static u8 core_fuzzing(char** argv) { FATAL("Unable to execute target application"); if (stop_soon) { - cur_skipped_paths++; + ++cur_skipped_paths; goto abandon_entry; } @@ -9309,7 +9309,7 @@ static u8 core_fuzzing(char** argv) { prev_cksum = queue_cur->exec_cksum; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur >> 3; @@ -9356,7 +9356,7 @@ static u8 core_fuzzing(char** argv) { final character and force output. */ if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - a_len++; + ++a_len; if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) maybe_add_auto(a_collect, a_len); @@ -9381,7 +9381,7 @@ static u8 core_fuzzing(char** argv) { if (cksum != queue_cur->exec_cksum) { if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - a_len++; + ++a_len; } @@ -9404,7 +9404,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur >> 3; @@ -9433,7 +9433,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur >> 3; @@ -9478,7 +9478,7 @@ static u8 core_fuzzing(char** argv) { if (EFF_APOS(len - 1) != 0) { eff_map[EFF_APOS(len - 1)] = 1; - eff_cnt++; + ++eff_cnt; } /* Walking byte. */ @@ -9490,7 +9490,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { stage_cur_byte = stage_cur; @@ -9517,7 +9517,7 @@ static u8 core_fuzzing(char** argv) { if (cksum != queue_cur->exec_cksum) { eff_map[EFF_APOS(stage_cur)] = 1; - eff_cnt++; + ++eff_cnt; } } @@ -9565,7 +9565,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; i++) { + for (i = 0; i < len - 1; ++i) { /* Let's consult the effector map... */ @@ -9579,7 +9579,7 @@ static u8 core_fuzzing(char** argv) { *(u16*)(out_buf + i) ^= 0xFFFF; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; *(u16*)(out_buf + i) ^= 0xFFFF; @@ -9605,7 +9605,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; i < len - 3; ++i) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && @@ -9619,7 +9619,7 @@ static u8 core_fuzzing(char** argv) { *(u32*)(out_buf + i) ^= 0xFFFFFFFF; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; *(u32*)(out_buf + i) ^= 0xFFFFFFFF; @@ -9653,7 +9653,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u8 orig = out_buf[i]; @@ -9666,7 +9666,7 @@ static u8 core_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 1; j <= ARITH_MAX; j++) { + for (j = 1; j <= ARITH_MAX; ++j) { u8 r = orig ^ (orig + j); @@ -9679,7 +9679,7 @@ static u8 core_fuzzing(char** argv) { out_buf[i] = orig + j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9691,7 +9691,7 @@ static u8 core_fuzzing(char** argv) { out_buf[i] = orig - j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9721,7 +9721,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; i++) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16*)(out_buf + i); @@ -9734,7 +9734,7 @@ static u8 core_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 1; j <= ARITH_MAX; j++) { + for (j = 1; j <= ARITH_MAX; ++j) { u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), @@ -9754,7 +9754,7 @@ static u8 core_fuzzing(char** argv) { *(u16*)(out_buf + i) = orig + j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9764,7 +9764,7 @@ static u8 core_fuzzing(char** argv) { *(u16*)(out_buf + i) = orig - j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9779,7 +9779,7 @@ static u8 core_fuzzing(char** argv) { *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9789,7 +9789,7 @@ static u8 core_fuzzing(char** argv) { *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9817,7 +9817,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; i < len - 3; ++i) { u32 orig = *(u32*)(out_buf + i); @@ -9831,7 +9831,7 @@ static u8 core_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 1; j <= ARITH_MAX; j++) { + for (j = 1; j <= ARITH_MAX; ++j) { u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), @@ -9849,7 +9849,7 @@ static u8 core_fuzzing(char** argv) { *(u32*)(out_buf + i) = orig + j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9859,7 +9859,7 @@ static u8 core_fuzzing(char** argv) { *(u32*)(out_buf + i) = orig - j; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9873,7 +9873,7 @@ static u8 core_fuzzing(char** argv) { *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9883,7 +9883,7 @@ static u8 core_fuzzing(char** argv) { *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -9919,7 +9919,7 @@ static u8 core_fuzzing(char** argv) { /* Setting 8-bit integers. */ - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u8 orig = out_buf[i]; @@ -9932,7 +9932,7 @@ static u8 core_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_8); j++) { + for (j = 0; j < sizeof(interesting_8); ++j) { /* Skip if the value could be a product of bitflips or arithmetics. */ @@ -9948,7 +9948,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; out_buf[i] = orig; - stage_cur++; + ++stage_cur; } @@ -9973,7 +9973,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; i++) { + for (i = 0; i < len - 1; ++i) { u16 orig = *(u16*)(out_buf + i); @@ -9986,7 +9986,7 @@ static u8 core_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_16) / 2; j++) { + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { stage_cur_val = interesting_16[j]; @@ -10002,7 +10002,7 @@ static u8 core_fuzzing(char** argv) { *(u16*)(out_buf + i) = interesting_16[j]; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -10015,7 +10015,7 @@ static u8 core_fuzzing(char** argv) { *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -10045,7 +10045,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 3; i++) { + for (i = 0; i < len - 3; ++i) { u32 orig = *(u32*)(out_buf + i); @@ -10059,7 +10059,7 @@ static u8 core_fuzzing(char** argv) { stage_cur_byte = i; - for (j = 0; j < sizeof(interesting_32) / 4; j++) { + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { stage_cur_val = interesting_32[j]; @@ -10075,7 +10075,7 @@ static u8 core_fuzzing(char** argv) { *(u32*)(out_buf + i) = interesting_32[j]; if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -10088,7 +10088,7 @@ static u8 core_fuzzing(char** argv) { *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } else stage_max--; @@ -10125,7 +10125,7 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u32 last_len = 0; @@ -10136,7 +10136,7 @@ static u8 core_fuzzing(char** argv) { between writes at a particular offset determined by the outer loop. */ - for (j = 0; j < extras_cnt; j++) { + for (j = 0; j < extras_cnt; ++j) { /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also skip them if there's no room to insert the payload, if the token @@ -10158,7 +10158,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } @@ -10186,11 +10186,11 @@ static u8 core_fuzzing(char** argv) { ex_tmp = ck_alloc(len + MAX_DICT_FILE); - for (i = 0; i <= len; i++) { + for (i = 0; i <= len; ++i) { stage_cur_byte = i; - for (j = 0; j < extras_cnt; j++) { + for (j = 0; j < extras_cnt; ++j) { if (len + extras[j].len > MAX_FILE) { stage_max--; @@ -10208,7 +10208,7 @@ static u8 core_fuzzing(char** argv) { goto abandon_entry; } - stage_cur++; + ++stage_cur; } @@ -10238,13 +10238,13 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len; i++) { + for (i = 0; i < len; ++i) { u32 last_len = 0; stage_cur_byte = i; - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) { + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { /* See the comment in the earlier code; extras are sorted by size. */ @@ -10262,7 +10262,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + ++stage_cur; } @@ -10320,7 +10320,7 @@ static u8 core_fuzzing(char** argv) { s32 temp_len_puppet; cur_ms_lv = get_cur_time(); - //for (; swarm_now < swarm_num; swarm_now++) + //for (; swarm_now < swarm_num; ++swarm_now) { if (key_puppet == 1) { if (unlikely(orig_hit_cnt_puppet == 0)) { @@ -10356,16 +10356,16 @@ static u8 core_fuzzing(char** argv) { orig_hit_cnt = queued_paths + unique_crashes; havoc_queued = queued_paths; - for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); stage_cur_val = use_stacking; - for (i = 0; i < operator_num; i++) { + for (i = 0; i < operator_num; ++i) { core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i]; } - for (i = 0; i < use_stacking; i++) { + for (i = 0; i < use_stacking; ++i) { switch (select_algorithm()) { @@ -10657,7 +10657,7 @@ static u8 core_fuzzing(char** argv) { { u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; i++) + for (i = 0; i < 16; ++i) { if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i]) core_operator_finds_puppet_v2[i] += temp_temp_puppet; @@ -10711,7 +10711,7 @@ static u8 core_fuzzing(char** argv) { while (target && (target->len < 2 || target == queue_cur)) { target = target->next; - splicing_with++; + ++splicing_with; } if (!target) goto retry_splicing_puppet; @@ -10797,7 +10797,7 @@ static u8 core_fuzzing(char** argv) { new_hit_cnt = queued_paths + unique_crashes; u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; i++) + for (i = 0; i < operator_num; ++i) { core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i]; @@ -10826,27 +10826,27 @@ void pso_updating(void) { w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end; int tmp_swarm, i, j; u64 temp_operator_finds_puppet = 0; - for (i = 0; i < operator_num; i++) + for (i = 0; i < operator_num; ++i) { operator_finds_puppet[i] = core_operator_finds_puppet[i]; - for (j = 0; j < swarm_num; j++) + for (j = 0; j < swarm_num; ++j) { operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i]; } temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i]; } - for (i = 0; i < operator_num; i++) + for (i = 0; i < operator_num; ++i) { if (operator_finds_puppet[i]) G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet)); } - for (tmp_swarm = 0; tmp_swarm < swarm_num; tmp_swarm++) + for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) { double x_temp = 0.0; - for (i = 0; i < operator_num; i++) + for (i = 0; i < operator_num; ++i) { probability_now[tmp_swarm][i] = 0.0; v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); @@ -10858,7 +10858,7 @@ void pso_updating(void) { x_temp += x_now[tmp_swarm][i]; } - for (i = 0; i < operator_num; i++) + for (i = 0; i < operator_num; ++i) { x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; if (likely(i != 0)) @@ -11102,7 +11102,7 @@ EXP_ST void check_binary(u8* fname) { cur_elem = ck_alloc(delim - env_path + 1); memcpy(cur_elem, env_path, delim - env_path); - delim++; + ++delim; } else cur_elem = ck_strdup(env_path); @@ -11504,7 +11504,7 @@ static void setup_cmdline_file(char** argv) { while (argv[i]) { fprintf(cmdline_file, "%s\n", argv[i]); - i++; + ++i; } fclose(cmdline_file); @@ -11708,7 +11708,7 @@ static void get_core_count(void) { if (!f) return; while (fgets(tmp, sizeof(tmp), f)) - if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) cpu_core_count++; + if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) ++cpu_core_count; fclose(f); @@ -11724,7 +11724,7 @@ static void get_core_count(void) { /* Add ourselves, since the 1-minute average doesn't include that yet. */ - cur_runnable++; + ++cur_runnable; #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ @@ -11779,7 +11779,7 @@ static void fix_up_sync(void) { if (!isalnum(*x) && *x != '_' && *x != '-') FATAL("Non-alphanumeric fuzzer ID specified via -S or -M"); - x++; + ++x; } @@ -11956,12 +11956,12 @@ static void save_cmdline(u32 argc, char** argv) { u32 len = 1, i; u8* buf; - for (i = 0; i < argc; i++) + for (i = 0; i < argc; ++i) len += strlen(argv[i]) + 1; buf = orig_cmdline = ck_alloc(len); - for (i = 0; i < argc; i++) { + for (i = 0; i < argc; ++i) { u32 l = strlen(argv[i]); @@ -11978,7 +11978,7 @@ static void save_cmdline(u32 argc, char** argv) { int stricmp(char const *a, char const *b) { int d; - for (;; a++, b++) { + for (;; ++a, ++b) { d = tolower(*a) - tolower(*b); if (d != 0 || !*a) return d; @@ -12260,11 +12260,11 @@ int main(int argc, char** argv) { if (g_now > g_max) g_now = 0; w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end; - for (tmp_swarm = 0; tmp_swarm < swarm_num; tmp_swarm++) { + for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) { double total_puppet_temp = 0.0; swarm_fitness[tmp_swarm] = 0.0; - for (i = 0; i < operator_num; i++) { + for (i = 0; i < operator_num; ++i) { stage_finds_puppet[tmp_swarm][i] = 0; probability_now[tmp_swarm][i] = 0.0; x_now[tmp_swarm][i] = ((double)(random() % 7000)*0.0001 + 0.1); @@ -12276,7 +12276,7 @@ int main(int argc, char** argv) { } - for (i = 0; i < operator_num; i++) { + for (i = 0; i < operator_num; ++i) { stage_cycles_puppet_v2[tmp_swarm][i] = stage_cycles_puppet[tmp_swarm][i]; stage_finds_puppet_v2[tmp_swarm][i] = stage_finds_puppet[tmp_swarm][i]; x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp; @@ -12284,7 +12284,7 @@ int main(int argc, char** argv) { double x_temp = 0.0; - for (i = 0; i < operator_num; i++) { + for (i = 0; i < operator_num; ++i) { probability_now[tmp_swarm][i] = 0.0; v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); @@ -12298,7 +12298,7 @@ int main(int argc, char** argv) { x_temp += x_now[tmp_swarm][i]; } - for (i = 0; i < operator_num; i++) { + for (i = 0; i < operator_num; ++i) { x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; if (likely(i != 0)) probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; @@ -12309,7 +12309,7 @@ int main(int argc, char** argv) { FATAL("ERROR probability"); } - for (i = 0; i < operator_num; i++) { + for (i = 0; i < operator_num; ++i) { core_operator_finds_puppet[i] = 0; core_operator_finds_puppet_v2[i] = 0; core_operator_cycles_puppet[i] = 0; @@ -12482,7 +12482,7 @@ int main(int argc, char** argv) { break; } - i++; + ++i; } } @@ -12530,13 +12530,13 @@ int main(int argc, char** argv) { if (!queue_cur) { - queue_cycle++; + ++queue_cycle; current_entry = 0; cur_skipped_paths = 0; queue_cur = queue; while (seek_to) { - current_entry++; + ++current_entry; seek_to--; queue_cur = queue_cur->next; } @@ -12553,7 +12553,7 @@ int main(int argc, char** argv) { if (queued_paths == prev_queued) { - if (use_splicing) cycles_wo_finds++; else use_splicing = 1; + if (use_splicing) ++cycles_wo_finds; else use_splicing = 1; } else cycles_wo_finds = 0; @@ -12578,7 +12578,7 @@ int main(int argc, char** argv) { if (stop_soon) break; queue_cur = queue_cur->next; - current_entry++; + ++current_entry; if (most_time_key == 1) { u64 cur_ms_lv = get_cur_time(); From f5d4912ca837d5efcd1aac4d436c7563c7614646 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Sun, 11 Aug 2019 11:56:28 +0200 Subject: [PATCH 17/83] performance optimization predecrement instead of postdecrement --- afl-fuzz.c | 132 ++++++++++++++++++++++++++--------------------------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/afl-fuzz.c b/afl-fuzz.c index be44c69e..c8b4185a 100644 --- a/afl-fuzz.c +++ b/afl-fuzz.c @@ -1898,7 +1898,7 @@ static void load_extras_file(u8* fname, u32* min_len, u32* max_len, /* All other lines must end with '"', which we can consume. */ - rptr--; + --rptr; if (rptr < lptr || *rptr != '"') FATAL("Malformed name=\"value\" pair in line %u.", cur_line); @@ -5209,7 +5209,7 @@ static u32 calculate_score(struct queue_entry* q) { } else if (q->handicap) { perf_score *= 2; - q->handicap--; + --q->handicap; } @@ -5962,7 +5962,7 @@ static u8 fuzz_one_original(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max--; + --stage_max; continue; } @@ -5999,7 +5999,7 @@ static u8 fuzz_one_original(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max--; + --stage_max; continue; } @@ -6066,7 +6066,7 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; r = orig ^ (orig - j); @@ -6078,7 +6078,7 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; out_buf[i] = orig; @@ -6147,7 +6147,7 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; /* Big endian comes next. Same deal. */ @@ -6162,7 +6162,7 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((orig >> 8) < j && !could_be_bitflip(r4)) { @@ -6172,7 +6172,7 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; *(u16*)(out_buf + i) = orig; @@ -6230,7 +6230,7 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { @@ -6240,7 +6240,7 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; /* Big endian next. */ @@ -6254,7 +6254,7 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { @@ -6264,7 +6264,7 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; *(u32*)(out_buf + i) = orig; @@ -6313,7 +6313,7 @@ skip_arith: if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || could_be_arith(orig, (u8)interesting_8[j], 1)) { - stage_max--; + --stage_max; continue; } @@ -6376,7 +6376,7 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && @@ -6389,7 +6389,7 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; } @@ -6445,7 +6445,7 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && @@ -6458,7 +6458,7 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; } @@ -6513,7 +6513,7 @@ skip_interest: !memcmp(extras[j].data, out_buf + i, extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { - stage_max--; + --stage_max; continue; } @@ -6555,7 +6555,7 @@ skip_interest: for (j = 0; j < extras_cnt; ++j) { if (len + extras[j].len > MAX_FILE) { - stage_max--; + --stage_max; continue; } @@ -6613,7 +6613,7 @@ skip_user_extras: !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { - stage_max--; + --stage_max; continue; } @@ -7302,9 +7302,9 @@ abandon_entry: cycle and have not seen this entry before. */ if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) { - pending_not_fuzzed--; + --pending_not_fuzzed; queue_cur->was_fuzzed = 1; - if (queue_cur->favored) pending_favored--; + if (queue_cur->favored) --pending_favored; } ++queue_cur->fuzz_level; @@ -7778,7 +7778,7 @@ static u8 pilot_fuzzing(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max--; + --stage_max; continue; } @@ -7820,7 +7820,7 @@ static u8 pilot_fuzzing(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max--; + --stage_max; continue; } @@ -7895,7 +7895,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; r = orig ^ (orig - j); @@ -7907,7 +7907,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; out_buf[i] = orig; @@ -7973,7 +7973,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((orig & 0xff) < j && !could_be_bitflip(r2)) { @@ -7983,7 +7983,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; /* Big endian comes next. Same deal. */ @@ -7998,7 +7998,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((orig >> 8) < j && !could_be_bitflip(r4)) { @@ -8008,7 +8008,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; *(u16*)(out_buf + i) = orig; @@ -8071,7 +8071,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { @@ -8081,7 +8081,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; stage_cur++; - } else stage_max--; + } else --stage_max; /* Big endian next. */ @@ -8095,7 +8095,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { @@ -8105,7 +8105,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; *(u32*)(out_buf + i) = orig; @@ -8159,7 +8159,7 @@ static u8 pilot_fuzzing(char** argv) { if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || could_be_arith(orig, (u8)interesting_8[j], 1)) { - stage_max--; + --stage_max; continue; } @@ -8227,7 +8227,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && @@ -8240,7 +8240,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; } @@ -8301,7 +8301,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && @@ -8314,7 +8314,7 @@ static u8 pilot_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; } @@ -8376,7 +8376,7 @@ static u8 pilot_fuzzing(char** argv) { !memcmp(extras[j].data, out_buf + i, extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { - stage_max--; + --stage_max; continue; } @@ -8421,7 +8421,7 @@ static u8 pilot_fuzzing(char** argv) { for (j = 0; j < extras_cnt; ++j) { if (len + extras[j].len > MAX_FILE) { - stage_max--; + --stage_max; continue; } @@ -8480,7 +8480,7 @@ static u8 pilot_fuzzing(char** argv) { !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { - stage_max--; + --stage_max; continue; } @@ -9041,8 +9041,8 @@ static u8 pilot_fuzzing(char** argv) { // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { // queue_cur->was_fuzzed = 1; - // pending_not_fuzzed--; - // if (queue_cur->favored) pending_favored--; + // --pending_not_fuzzed; + // if (queue_cur->favored) --pending_favored; // } munmap(orig_in, queue_cur->len); @@ -9570,7 +9570,7 @@ static u8 core_fuzzing(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max--; + --stage_max; continue; } @@ -9610,7 +9610,7 @@ static u8 core_fuzzing(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max--; + --stage_max; continue; } @@ -9681,7 +9681,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; r = orig ^ (orig - j); @@ -9693,7 +9693,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; out_buf[i] = orig; @@ -9756,7 +9756,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((orig & 0xff) < j && !could_be_bitflip(r2)) { @@ -9766,7 +9766,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; /* Big endian comes next. Same deal. */ @@ -9781,7 +9781,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((orig >> 8) < j && !could_be_bitflip(r4)) { @@ -9791,7 +9791,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; *(u16*)(out_buf + i) = orig; @@ -9851,7 +9851,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { @@ -9861,7 +9861,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; /* Big endian next. */ @@ -9875,7 +9875,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { @@ -9885,7 +9885,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; *(u32*)(out_buf + i) = orig; @@ -9938,7 +9938,7 @@ static u8 core_fuzzing(char** argv) { if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || could_be_arith(orig, (u8)interesting_8[j], 1)) { - stage_max--; + --stage_max; continue; } @@ -10004,7 +10004,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && @@ -10017,7 +10017,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; } @@ -10077,7 +10077,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && @@ -10090,7 +10090,7 @@ static u8 core_fuzzing(char** argv) { if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else stage_max--; + } else --stage_max; } @@ -10148,7 +10148,7 @@ static u8 core_fuzzing(char** argv) { !memcmp(extras[j].data, out_buf + i, extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { - stage_max--; + --stage_max; continue; } @@ -10193,7 +10193,7 @@ static u8 core_fuzzing(char** argv) { for (j = 0; j < extras_cnt; ++j) { if (len + extras[j].len > MAX_FILE) { - stage_max--; + --stage_max; continue; } @@ -10252,7 +10252,7 @@ static u8 core_fuzzing(char** argv) { !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { - stage_max--; + --stage_max; continue; } @@ -12537,7 +12537,7 @@ int main(int argc, char** argv) { while (seek_to) { ++current_entry; - seek_to--; + --seek_to; queue_cur = queue_cur->next; } From f63318a20f5ed974de5f8068a67d1292c64ca776 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Sun, 11 Aug 2019 15:40:53 +0200 Subject: [PATCH 18/83] several code cleanups: avoid #if in macro parameters avoid arithmetic with void pointers (undefined behaviour) avoid some shadowed variables --- afl-analyze.c | 4 +- afl-common.c | 1 + afl-fuzz.c | 171 +++++++++++++++++++++++++------------------------- afl-tmin.c | 4 +- alloc-inl.h | 26 ++++---- debug.h | 6 +- 6 files changed, 107 insertions(+), 105 deletions(-) diff --git a/afl-analyze.c b/afl-analyze.c index 53b694ec..af93150e 100644 --- a/afl-analyze.c +++ b/afl-analyze.c @@ -738,8 +738,8 @@ static void usage(u8* argv0) { "Execution control settings:\n\n" " -f file - input file read by the tested program (stdin)\n" - " -t msec - timeout for each run (%u ms)\n" - " -m megs - memory limit for child process (%u MB)\n" + " -t msec - timeout for each run (%d ms)\n" + " -m megs - memory limit for child process (%d MB)\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use unicorn-based instrumentation (Unicorn mode)\n\n" diff --git a/afl-common.c b/afl-common.c index 1c5e5bfe..5e2d0628 100644 --- a/afl-common.c +++ b/afl-common.c @@ -28,6 +28,7 @@ void detect_file_args(char** argv, u8* prog_in) { cwd = getcwd(buf, (size_t)size); /* portable version */ } else { PFATAL("getcwd() failed"); + cwd = 0; /* for dumb compilers */ } #endif diff --git a/afl-fuzz.c b/afl-fuzz.c index c8b4185a..3cae3fc5 100644 --- a/afl-fuzz.c +++ b/afl-fuzz.c @@ -445,7 +445,6 @@ static PyObject *py_functions[PY_FUNC_COUNT]; static int init_py() { Py_Initialize(); u8* module_name = getenv("AFL_PYTHON_MODULE"); - u8 py_notrim = 0; if (module_name) { PyObject* py_name = PyString_FromString(module_name); @@ -454,6 +453,7 @@ static int init_py() { Py_DECREF(py_name); if (py_module != NULL) { + u8 py_notrim = 0; py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init"); py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz"); py_functions[PY_FUNC_INIT_TRIM] = PyObject_GetAttrString(py_module, "init_trim"); @@ -529,9 +529,9 @@ static void finalize_py() { } static void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** ret, size_t* retlen) { - PyObject *py_args, *py_value; if (py_module != NULL) { + PyObject *py_args, *py_value; py_args = PyTuple_New(2); py_value = PyByteArray_FromStringAndSize(buf, buflen); if (!py_value) { @@ -1026,7 +1026,7 @@ static u8* DTD(u64 cur_ms, u64 event_ms) { t_m = (delta / 1000 / 60) % 60; t_s = (delta / 1000) % 60; - sprintf(tmp, "%s days, %u hrs, %u min, %u sec", DI(t_d), t_h, t_m, t_s); + sprintf(tmp, "%s days, %d hrs, %d min, %d sec", DI(t_d), t_h, t_m, t_s); return tmp; } @@ -1086,7 +1086,6 @@ static void mark_as_variable(struct queue_entry* q) { static void mark_as_redundant(struct queue_entry* q, u8 state) { u8* fn; - s32 fd; if (state == q->fs_redundant) return; @@ -1097,6 +1096,8 @@ static void mark_as_redundant(struct queue_entry* q, u8 state) { if (state) { + s32 fd; + fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", fn); close(fd); @@ -1749,12 +1750,12 @@ static void read_testcases(void) { struct dirent **nl; s32 nl_cnt; u32 i; - u8* fn; + u8* fn1; /* Auto-detect non-in-place resumption attempts. */ - fn = alloc_printf("%s/queue", in_dir); - if (!access(fn, F_OK)) in_dir = fn; else ck_free(fn); + fn1 = alloc_printf("%s/queue", in_dir); + if (!access(fn1, F_OK)) in_dir = fn1; else ck_free(fn1); ACTF("Scanning '%s'...", in_dir); @@ -1789,28 +1790,28 @@ static void read_testcases(void) { struct stat st; - u8* fn = alloc_printf("%s/%s", in_dir, nl[i]->d_name); + u8* fn2 = alloc_printf("%s/%s", in_dir, nl[i]->d_name); u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name); u8 passed_det = 0; free(nl[i]); /* not tracked */ - if (lstat(fn, &st) || access(fn, R_OK)) - PFATAL("Unable to access '%s'", fn); + if (lstat(fn2, &st) || access(fn2, R_OK)) + PFATAL("Unable to access '%s'", fn2); /* This also takes care of . and .. */ - if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn, "/README.txt")) { + if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn2, "/README.txt")) { - ck_free(fn); + ck_free(fn2); ck_free(dfn); continue; } if (st.st_size > MAX_FILE) - FATAL("Test case '%s' is too big (%s, limit is %s)", fn, + FATAL("Test case '%s' is too big (%s, limit is %s)", fn2, DMS(st.st_size), DMS(MAX_FILE)); /* Check for metadata that indicates that deterministic fuzzing @@ -1821,7 +1822,7 @@ static void read_testcases(void) { if (!access(dfn, F_OK)) passed_det = 1; ck_free(dfn); - add_to_queue(fn, st.st_size, passed_det); + add_to_queue(fn2, st.st_size, passed_det); } @@ -2093,7 +2094,7 @@ check_and_sort: DMS(max_len)); if (extras_cnt > MAX_DET_EXTRAS) - WARNF("More than %u tokens - will use them probabilistically.", + WARNF("More than %d tokens - will use them probabilistically.", MAX_DET_EXTRAS); } @@ -2491,6 +2492,15 @@ EXP_ST void init_forkserver(char** argv) { } else if (!mem_limit) { +#ifdef __APPLE__ +#define MSG_FORK_ON_APPLE \ + " - On MacOS X, the semantics of fork() syscalls are non-standard and may\n" \ + " break afl-fuzz performance optimizations when running platform-specific\n" \ + " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n" +#else +#define MSG_FORK_ON_APPLE "" +#endif + SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, before receiving any input\n" " from the fuzzer! There are several probable explanations:\n\n" @@ -2498,19 +2508,21 @@ EXP_ST void init_forkserver(char** argv) { " - The binary is just buggy and explodes entirely on its own. If so, you\n" " need to fix the underlying problem or find a better replacement.\n\n" -#ifdef __APPLE__ - - " - On MacOS X, the semantics of fork() syscalls are non-standard and may\n" - " break afl-fuzz performance optimizations when running platform-specific\n" - " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n" - -#endif /* __APPLE__ */ + MSG_FORK_ON_APPLE " - Less likely, there is a horrible bug in the fuzzer. If other options\n" " fail, poke for troubleshooting tips.\n"); } else { +#ifdef RLIMIT_AS +#define MSG_ULIMIT_USAGE \ + " ( ulimit -Sv $[%llu << 10];" +#else +#define MSG_ULIMIT_USAGE \ + " ( ulimit -Sd $[%llu << 10];" +#endif /* ^RLIMIT_AS */ + SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, before receiving any input\n" " from the fuzzer! There are several probable explanations:\n\n" @@ -2520,11 +2532,7 @@ EXP_ST void init_forkserver(char** argv) { " the limit with the -m setting in the command line. A simple way confirm\n" " this diagnosis would be:\n\n" -#ifdef RLIMIT_AS - " ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n" -#else - " ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n" -#endif /* ^RLIMIT_AS */ + MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" " Tip: you can use http://jwilk.net/software/recidivm to quickly\n" " estimate the required amount of virtual memory for the binary.\n\n" @@ -2532,13 +2540,7 @@ EXP_ST void init_forkserver(char** argv) { " - The binary is just buggy and explodes entirely on its own. If so, you\n" " need to fix the underlying problem or find a better replacement.\n\n" -#ifdef __APPLE__ - - " - On MacOS X, the semantics of fork() syscalls are non-standard and may\n" - " break afl-fuzz performance optimizations when running platform-specific\n" - " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n" - -#endif /* __APPLE__ */ + MSG_FORK_ON_APPLE " - Less likely, there is a horrible bug in the fuzzer. If other options\n" " fail, poke for troubleshooting tips.\n", @@ -2546,6 +2548,7 @@ EXP_ST void init_forkserver(char** argv) { } + FATAL("Fork server crashed with signal %d", WTERMSIG(status)); } @@ -2579,11 +2582,7 @@ EXP_ST void init_forkserver(char** argv) { " fault in the dynamic linker. This can be fixed with the -m option. A\n" " simple way to confirm the diagnosis may be:\n\n" -#ifdef RLIMIT_AS - " ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n" -#else - " ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n" -#endif /* ^RLIMIT_AS */ + MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" " Tip: you can use http://jwilk.net/software/recidivm to quickly\n" " estimate the required amount of virtual memory for the binary.\n\n" @@ -2864,7 +2863,8 @@ static void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) { if (skip_at) ck_write(fd, mem, skip_at, out_file); - if (tail_len) ck_write(fd, mem + skip_at + skip_len, tail_len, out_file); + u8 *memu8 = mem; + if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file); if (!out_file) { @@ -3158,23 +3158,13 @@ static void perform_dry_run(char** argv) { " bumping it up with the -m setting in the command line. If in doubt,\n" " try something along the lines of:\n\n" -#ifdef RLIMIT_AS - " ( ulimit -Sv $[%llu << 10]; /path/to/binary [...] for troubleshooting tips.\n", @@ -3190,18 +3180,14 @@ static void perform_dry_run(char** argv) { " so, please remove it. The fuzzer should be seeded with interesting\n" " inputs - but not ones that cause an outright crash.\n\n" -#ifdef __APPLE__ - - " - On MacOS X, the semantics of fork() syscalls are non-standard and may\n" - " break afl-fuzz performance optimizations when running platform-specific\n" - " binaries. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n" - -#endif /* __APPLE__ */ + MSG_FORK_ON_APPLE " - Least likely, there is a horrible bug in the fuzzer. If other options\n" " fail, poke for troubleshooting tips.\n"); } +#undef MSG_ULIMIT_USAGE +#undef MSG_FORK_ON_APPLE FATAL("Test case '%s' results in a crash", fn); @@ -3393,20 +3379,20 @@ static u8* describe_op(u8 hnb) { sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time); if (splicing_with >= 0) - sprintf(ret + strlen(ret), "+%06u", splicing_with); + sprintf(ret + strlen(ret), "+%06d", splicing_with); sprintf(ret + strlen(ret), ",op:%s", stage_short); if (stage_cur_byte >= 0) { - sprintf(ret + strlen(ret), ",pos:%u", stage_cur_byte); + sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte); if (stage_val_type != STAGE_VAL_NONE) sprintf(ret + strlen(ret), ",val:%s%+d", (stage_val_type == STAGE_VAL_BE) ? "be:" : "", stage_cur_val); - } else sprintf(ret + strlen(ret), ",rep:%u", stage_cur_val); + } else sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val); } @@ -4017,21 +4003,21 @@ static void maybe_delete_out_dir(void) { if (f) { - u64 start_time, last_update; + u64 start_time2, last_update; if (fscanf(f, "start_time : %llu\n" - "last_update : %llu\n", &start_time, &last_update) != 2) + "last_update : %llu\n", &start_time2, &last_update) != 2) FATAL("Malformed data in '%s'", fn); fclose(f); /* Let's see how much work is at stake. */ - if (!in_place_resume && last_update - start_time > OUTPUT_GRACE * 60) { + if (!in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) { SAYF("\n" cLRD "[-] " cRST "The job output directory already exists and contains the results of more\n" - " than %u minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n" + " than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n" " automatically delete this data for you.\n\n" " If you wish to start a new session, remove or rename the directory manually,\n" @@ -4475,7 +4461,7 @@ static void show_stats(void) { together, but then cram them into a fixed-width field - so we need to put them in a temporary buffer first. */ - sprintf(tmp, "%s%s%d (%0.02f%%)", DI(current_entry), + sprintf(tmp, "%s%s%u (%0.02f%%)", DI(current_entry), queue_cur->favored ? "." : "*", queue_cur->fuzz_level, ((double)current_entry * 100) / queued_paths); @@ -4850,7 +4836,7 @@ static u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { stage_max = init_trim_py(in_buf, q->len); if (not_on_tty && debug) - SAYF("[Python Trimming] START: Max %d iterations, %d bytes", stage_max, q->len); + SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, q->len); while(stage_cur < stage_max) { sprintf(tmp, "ptrim %s", DI(trim_exec)); @@ -4893,7 +4879,7 @@ static u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { stage_cur = post_trim_py(1); if (not_on_tty && debug) - SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %d bytes)", stage_cur, stage_max, q->len); + SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", stage_cur, stage_max, q->len); } else { /* Tell the Python module that the trimming was unsuccessful */ stage_cur = post_trim_py(0); @@ -4907,7 +4893,7 @@ static u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { } if (not_on_tty && debug) - SAYF("[Python Trimming] DONE: %d bytes -> %d bytes", orig_len, q->len); + SAYF("[Python Trimming] DONE: %u bytes -> %u bytes", orig_len, q->len); /* If we have made changes to in_buf, we also need to update the on-disk version of the test case. */ @@ -5556,7 +5542,7 @@ static u8 fuzz_one_original(char** argv) { orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %u", queue_cur->fname, len); + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len); close(fd); @@ -5614,7 +5600,7 @@ static u8 fuzz_one_original(char** argv) { queue_cur->trim_done = 1; - if (len != queue_cur->len) len = queue_cur->len; + len = queue_cur->len; } @@ -7448,7 +7434,7 @@ static u8 pilot_fuzzing(char** argv) { queue_cur->trim_done = 1; - if (len != queue_cur->len) len = queue_cur->len; + len = queue_cur->len; } @@ -8564,7 +8550,9 @@ static u8 pilot_fuzzing(char** argv) { { +#ifndef IGNORE_FINDS havoc_stage_puppet: +#endif stage_cur_byte = -1; @@ -9251,7 +9239,7 @@ static u8 core_fuzzing(char** argv) { queue_cur->trim_done = 1; - if (len != queue_cur->len) len = queue_cur->len; + len = queue_cur->len; } @@ -10330,7 +10318,9 @@ static u8 core_fuzzing(char** argv) { } } { +#ifndef IGNORE_FINDS havoc_stage_puppet: +#endif stage_cur_byte = -1; @@ -11316,6 +11306,13 @@ static void check_term_size(void) { static void usage(u8* argv0) { +#ifdef USE_PYTHON +#define PHYTON_SUPPORT \ + "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n" +#else +#define PHYTON_SUPPORT "" +#endif + SAYF("\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n" "Required parameters:\n" @@ -11327,8 +11324,8 @@ static void usage(u8* argv0) { " \n" " see docs/power_schedules.txt\n" " -f file - location read by the fuzzed program (stdin)\n" - " -t msec - timeout for each run (auto-scaled, 50-%u ms)\n" - " -m megs - memory limit for child process (%u MB)\n" + " -t msec - timeout for each run (auto-scaled, 50-%d ms)\n" + " -m megs - memory limit for child process (%d MB)\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" " -L minutes - use MOpt(imize) mode and set the limit time for entering the\n" @@ -11352,14 +11349,14 @@ static void usage(u8* argv0) { " -C - crash exploration mode (the peruvian rabbit thing)\n" " -e ext - File extension for the temporarily generated test case\n\n" -#ifdef USE_PYTHON - "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n" -#endif + PHYTON_SUPPORT + "For additional tips, please consult %s/README\n\n", argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); exit(1); +#undef PHYTON_SUPPORT } @@ -11673,8 +11670,6 @@ static void check_cpu_governor(void) { static void get_core_count(void) { - u32 cur_runnable = 0; - #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) size_t s = sizeof(cpu_core_count); @@ -11718,6 +11713,8 @@ static void get_core_count(void) { if (cpu_core_count > 0) { + u32 cur_runnable = 0; + cur_runnable = (u32)get_runnable_processes(); #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) @@ -11728,7 +11725,7 @@ static void get_core_count(void) { #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ - OKF("You have %u CPU core%s and %u runnable tasks (utilization: %0.0f%%).", + OKF("You have %d CPU core%s and %u runnable tasks (utilization: %0.0f%%).", cpu_core_count, cpu_core_count > 1 ? "s" : "", cur_runnable, cur_runnable * 100.0 / cpu_core_count); @@ -11977,8 +11974,8 @@ static void save_cmdline(u32 argc, char** argv) { } int stricmp(char const *a, char const *b) { - int d; for (;; ++a, ++b) { + int d; d = tolower(*a) - tolower(*b); if (d != 0 || !*a) return d; @@ -12443,15 +12440,15 @@ int main(int argc, char** argv) { setup_dirs_fds(); - u8 with_python_support = 0; #ifdef USE_PYTHON if (init_py()) FATAL("Failed to initialize Python module"); - with_python_support = 1; -#endif + u8 with_python_support = 1; +#else - if (getenv("AFL_PYTHON_MODULE") && !with_python_support) + if (getenv("AFL_PYTHON_MODULE")) FATAL("Your AFL binary was built without Python support"); +#endif setup_cmdline_file(argv + optind); diff --git a/afl-tmin.c b/afl-tmin.c index 94f3bb3f..4989d7ce 100644 --- a/afl-tmin.c +++ b/afl-tmin.c @@ -551,7 +551,6 @@ static void minimize(char** argv) { while (set_pos < in_len) { - u8 res; u32 use_len = MIN(set_len, in_len - set_pos); for (i = 0; i < use_len; i++) @@ -562,12 +561,13 @@ static void minimize(char** argv) { memcpy(tmp_buf, in_data, in_len); memset(tmp_buf + set_pos, '0', use_len); + u8 res; res = run_target(argv, tmp_buf, in_len, 0); if (res) { memset(in_data + set_pos, '0', use_len); - changed_any = 1; +/* changed_any = 1; value is not used */ alpha_del0 += use_len; } diff --git a/alloc-inl.h b/alloc-inl.h index 04f56d0d..2f98da0e 100644 --- a/alloc-inl.h +++ b/alloc-inl.h @@ -112,7 +112,7 @@ static inline void* DFL_ck_alloc_nozero(u32 size) { - void* ret; + u8* ret; if (!size) return NULL; @@ -126,7 +126,7 @@ static inline void* DFL_ck_alloc_nozero(u32 size) { ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; - return ret; + return (void *)ret; } @@ -163,7 +163,8 @@ static inline void DFL_ck_free(void* mem) { ALLOC_C1(mem) = ALLOC_MAGIC_F; - free(mem - ALLOC_OFF_HEAD); + u8 *realStart = mem; + free(realStart - ALLOC_OFF_HEAD); } @@ -174,7 +175,7 @@ static inline void DFL_ck_free(void* mem) { static inline void* DFL_ck_realloc(void* orig, u32 size) { - void* ret; + u8* ret; u32 old_size = 0; if (!size) { @@ -193,7 +194,9 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { #endif /* !DEBUG_BUILD */ old_size = ALLOC_S(orig); - orig -= ALLOC_OFF_HEAD; + u8 *origu8 = orig; + origu8 -= ALLOC_OFF_HEAD; + orig = origu8; ALLOC_CHECK_SIZE(old_size); @@ -216,10 +219,11 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { if (orig) { - memcpy(ret + ALLOC_OFF_HEAD, orig + ALLOC_OFF_HEAD, MIN(size, old_size)); - memset(orig + ALLOC_OFF_HEAD, 0xFF, old_size); + u8 *origu8 = orig; + memcpy(ret + ALLOC_OFF_HEAD, origu8 + ALLOC_OFF_HEAD, MIN(size, old_size)); + memset(origu8 + ALLOC_OFF_HEAD, 0xFF, old_size); - ALLOC_C1(orig + ALLOC_OFF_HEAD) = ALLOC_MAGIC_F; + ALLOC_C1(origu8 + ALLOC_OFF_HEAD) = ALLOC_MAGIC_F; free(orig); @@ -236,7 +240,7 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { if (size > old_size) memset(ret + old_size, 0, size - old_size); - return ret; + return (void *)ret; } @@ -269,7 +273,7 @@ static inline void* DFL_ck_realloc_block(void* orig, u32 size) { static inline u8* DFL_ck_strdup(u8* str) { - void* ret; + u8* ret; u32 size; if (!str) return NULL; @@ -296,7 +300,7 @@ static inline u8* DFL_ck_strdup(u8* str) { static inline void* DFL_ck_memdup(void* mem, u32 size) { - void* ret; + u8* ret; if (!mem || !size) return NULL; diff --git a/debug.h b/debug.h index a943a573..349aa650 100644 --- a/debug.h +++ b/debug.h @@ -199,7 +199,7 @@ #define FATAL(x...) do { \ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \ cBRI x); \ - SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", \ + SAYF(cLRD "\n Location : " cRST "%s(), %s:%d\n\n", \ __FUNCTION__, __FILE__, __LINE__); \ exit(1); \ } while (0) @@ -209,7 +209,7 @@ #define ABORT(x...) do { \ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \ cBRI x); \ - SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%d\n\n", \ __FUNCTION__, __FILE__, __LINE__); \ abort(); \ } while (0) @@ -220,7 +220,7 @@ fflush(stdout); \ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] SYSTEM ERROR : " \ cBRI x); \ - SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%d\n", \ __FUNCTION__, __FILE__, __LINE__); \ SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \ exit(1); \ From 925cfba424095a70e8476dfe25a94704639fc597 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Mon, 12 Aug 2019 10:52:45 +0200 Subject: [PATCH 19/83] signedness in print formats corrected --- afl-fuzz.c | 12 ++++++------ afl-showmap.c | 2 +- afl-tmin.c | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/afl-fuzz.c b/afl-fuzz.c index 3cae3fc5..2accde86 100644 --- a/afl-fuzz.c +++ b/afl-fuzz.c @@ -822,7 +822,7 @@ static void bind_to_free_cpu(void) { if (i == cpu_core_count) { SAYF("\n" cLRD "[-] " cRST - "Uh-oh, looks like all %u CPU cores on your system are allocated to\n" + "Uh-oh, looks like all %d CPU cores on your system are allocated to\n" " other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n" " another fuzzer on this machine is probably a bad plan, but if you are\n" " absolutely sure, you can set AFL_NO_AFFINITY and try again.\n", @@ -3758,7 +3758,7 @@ static void write_stats_file(double bitmap_cvg, double stability, double eps) { fprintf(f, "start_time : %llu\n" "last_update : %llu\n" - "fuzzer_pid : %u\n" + "fuzzer_pid : %d\n" "cycles_done : %llu\n" "execs_done : %llu\n" "execs_per_sec : %0.02f\n" @@ -4121,13 +4121,13 @@ static void maybe_delete_out_dir(void) { #ifndef SIMPLE_FILES - u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn, + u8* nfn = alloc_printf("%s.%04d-%02d-%02d-%02d:%02d:%02d", fn, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); #else - u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn, + u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); @@ -4152,13 +4152,13 @@ static void maybe_delete_out_dir(void) { #ifndef SIMPLE_FILES - u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn, + u8* nfn = alloc_printf("%s.%04d-%02d-%02d-%02d:%02d:%02d", fn, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); #else - u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn, + u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); diff --git a/afl-showmap.c b/afl-showmap.c index bce7cb4e..af3b36ee 100644 --- a/afl-showmap.c +++ b/afl-showmap.c @@ -409,7 +409,7 @@ static void usage(u8* argv0) { "Execution control settings:\n\n" " -t msec - timeout for each run (none)\n" - " -m megs - memory limit for child process (%u MB)\n" + " -m megs - memory limit for child process (%d MB)\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use Unicorn-based instrumentation (Unicorn mode)\n" " (Not necessary, here for consistency with other afl-* tools)\n\n" diff --git a/afl-tmin.c b/afl-tmin.c index 4989d7ce..09ce8c62 100644 --- a/afl-tmin.c +++ b/afl-tmin.c @@ -896,8 +896,8 @@ static void usage(u8* argv0) { "Execution control settings:\n\n" " -f file - input file read by the tested program (stdin)\n" - " -t msec - timeout for each run (%u ms)\n" - " -m megs - memory limit for child process (%u MB)\n" + " -t msec - timeout for each run (%d ms)\n" + " -m megs - memory limit for child process (%d MB)\n" " -Q - use binary-only instrumentation (QEMU mode)\n" " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" " (Not necessary, here for consistency with other afl-* tools)\n\n" From 96c76a8333d39b06096e4cbb668a52ffa5575e9a Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Wed, 14 Aug 2019 22:41:39 +0200 Subject: [PATCH 20/83] more sed compatibility for Freebsd, avoid grouping --- llvm_mode/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index fc7a6fd9..160a8fe6 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -75,7 +75,7 @@ endif # sanity check. # Are versions of clang --version and llvm-config --version equal? -CLANGVER = $(shell $(CC) --version | sed -E -ne '1{/^.*([0-9]\.[0-9]\.[0-9]).*/s//\1/p}') +CLANGVER = $(shell $(CC) --version | sed -E -ne '/^.*version\ ([0-9]\.[0-9]\.[0-9]).*/s//\1/p') ifeq "$(shell echo '\#include @\#include @int main() { int _id = shmget(IPC_PRIVATE, 65536, IPC_CREAT | IPC_EXCL | 0600); shmctl(_id, IPC_RMID, 0); return 0;}' | tr @ '\n' | $(CC) -x c - -o .test2 2>/dev/null && echo 1 || echo 0 )" "1" From 7cb0658b0091ecae9458c250505a05d1402a4fc8 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Wed, 14 Aug 2019 22:41:39 +0200 Subject: [PATCH 21/83] more sed compatibility for Freebsd, avoid grouping --- llvm_mode/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index fc7a6fd9..160a8fe6 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -75,7 +75,7 @@ endif # sanity check. # Are versions of clang --version and llvm-config --version equal? -CLANGVER = $(shell $(CC) --version | sed -E -ne '1{/^.*([0-9]\.[0-9]\.[0-9]).*/s//\1/p}') +CLANGVER = $(shell $(CC) --version | sed -E -ne '/^.*version\ ([0-9]\.[0-9]\.[0-9]).*/s//\1/p') ifeq "$(shell echo '\#include @\#include @int main() { int _id = shmget(IPC_PRIVATE, 65536, IPC_CREAT | IPC_EXCL | 0600); shmctl(_id, IPC_RMID, 0); return 0;}' | tr @ '\n' | $(CC) -x c - -o .test2 2>/dev/null && echo 1 || echo 0 )" "1" From 2053731ebc9a4c881f52c1de51fab51f79bcf980 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Sat, 17 Aug 2019 12:07:22 +0200 Subject: [PATCH 22/83] update readme and todo --- README.md | 4 ++++ TODO | 10 +++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index dff6463b..9ff7c24b 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,10 @@ afl++ is maintained by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi . + Note that although afl now has a Google afl repository [https://github.com/Google/afl](https://github.com/Google/afl), + it is unlikely to receive any noteable enhancements: [https://twitter.com/Dor3s/status/1154737061787660288](https://twitter.com/Dor3s/status/1154737061787660288) + + ## The enhancements compared to the original stock afl Many improvements were made over the official afl release - which did not diff --git a/TODO b/TODO index 42987cb9..692f6609 100644 --- a/TODO +++ b/TODO @@ -10,7 +10,6 @@ afl-fuzz: gcc_plugin: - needs to be rewritten - - fix crashes when compiling :( - whitelist support - skip over uninteresting blocks - laf-intel @@ -29,7 +28,8 @@ Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges. At afl's default map that means ~16 collisions and ~3 wrappings. Solution #1: increase map size. every +1 decreases fuzzing speed by ~10% and halfs the collisions - birthday paradox predicts at collisions at this # of edges: + birthday paradox predicts collisions at this # of edges: + mapsize => collisions 2^16 = 302 2^17 = 427 2^18 = 603 @@ -39,10 +39,10 @@ Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges. 2^22 = 2412 2^23 = 3411 2^24 = 4823 - Its an easy solution but also not a good one. + Increasing the map is an easy solution but also not a good one. Solution #2: use dynamic map size and collision free basic block IDs This only works in llvm_mode and llvm >= 9 though - A potential good future solution + A potential good future solution. Heiko/hexcoder follows this up Solution #3: write instruction pointers to a big shared map 512kb/1MB shared map and the instrumented code writes the instruction pointer into the map. Map must be big enough but could be command line @@ -51,7 +51,7 @@ Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges. impacts speed, but this can be decided by user options Neutral: a little bit slower but no loss of coverage Bad: completely changes how afl uses the map and the scheduling. - Overall another very good solution + Overall another very good solution, Marc Heuse/vanHauser follows this up qemu_mode: - persistent mode patching the return address (WinAFL style) From dd734a01dc65e2a5dc5ae7498658743e70b14f6a Mon Sep 17 00:00:00 2001 From: David Carlier Date: Sun, 18 Aug 2019 09:40:33 +0100 Subject: [PATCH 23/83] system-config: making it more compatible with BSD systems. The following knobs are Linux specifics but have few counterparts in those systems. --- afl-system-config | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/afl-system-config b/afl-system-config index 366762ef..28793c5b 100755 --- a/afl-system-config +++ b/afl-system-config @@ -1,9 +1,11 @@ #!/bin/sh +PLATFORM=`uname -s` echo This reconfigures the system to have a better fuzzing performance if [ '!' "$EUID" = 0 ] && [ '!' `id -u` = 0 ] ; then echo Error you need to be root to run this exit 1 fi +if [ "$PLATFORM" = "Linux" ] ; then sysctl -w kernel.core_pattern=core sysctl -w kernel.randomize_va_space=0 sysctl -w kernel.sched_child_runs_first=1 @@ -19,5 +21,19 @@ test -e /sys/devices/system/cpu/cpufreq/boost && echo 1 > /sys/devices/system/cp echo echo It is recommended to boot the kernel with lots of security off - if you are running a machine that is in a secured network - so set this: echo '/etc/default/grub:GRUB_CMDLINE_LINUX_DEFAULT="ibpb=off ibrs=off kpti=off l1tf=off mds=off mitigations=off no_stf_barrier noibpb noibrs nopcid nopti nospec_store_bypass_disable nospectre_v1 nospectre_v2 pcid=off pti=off spec_store_bypass_disable=off spectre_v2=off stf_barrier=off"' +fi +if [ "$PLATFORM" = "FreeBSD" ] ; then +sysctl kern.elf32.aslr.enable=0 +sysctl kern.elf64.aslr.enable=0 +echo +echo It is recommended to boot the kernel with lots of security off - if you are running a machine that is in a secured network - so set this: +echo 'sysctl hw.ibrs_disable=1' +echo +echo 'Setting kern.pmap.pg_ps_enabled=0 into /boot/loader.conf might be helpful too.' +fi +if [ "$PLATFORM" = "OpenBSD" ] ; then +echo +echo 'System security features cannot be disabled on OpenBSD.' +fi echo echo Also use AFL_TMPDIR to use a tmpfs for the input file From d3d0682310b840b027083133837bcd9be0638281 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 19 Aug 2019 12:54:55 +0200 Subject: [PATCH 24/83] seperated the forkserver from afl-fuzz and afl-tmin --- Makefile | 23 +- TODO | 23 +- afl-analyze.c | 2 +- afl-common.c | 2 + afl-forkserver.c | 401 +++++++++++++++++++++++++++++++++ afl-forkserver.h | 25 ++ afl-fuzz.c | 344 ++-------------------------- sharedmem.c => afl-sharedmem.c | 2 +- sharedmem.h => afl-sharedmem.h | 5 +- afl-showmap.c | 2 +- afl-tmin.c | 124 +++++----- docs/ChangeLog | 5 +- 12 files changed, 553 insertions(+), 405 deletions(-) create mode 100644 afl-forkserver.c create mode 100644 afl-forkserver.h rename sharedmem.c => afl-sharedmem.c (99%) rename sharedmem.h => afl-sharedmem.h (57%) diff --git a/Makefile b/Makefile index e6e3af85..3d5059f7 100644 --- a/Makefile +++ b/Makefile @@ -134,20 +134,23 @@ afl-as: afl-as.c afl-as.h $(COMM_HDR) | test_x86 afl-common.o : afl-common.c $(CC) $(CFLAGS) -c afl-common.c -sharedmem.o : sharedmem.c - $(CC) $(CFLAGS) -c sharedmem.c +afl-forkserver.o : afl-forkserver.c + $(CC) $(CFLAGS) -c afl-forkserver.c -afl-fuzz: afl-fuzz.c afl-common.o sharedmem.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c afl-common.o sharedmem.o -o $@ $(LDFLAGS) $(PYFLAGS) +afl-sharedmem.o : afl-sharedmem.c + $(CC) $(CFLAGS) -c afl-sharedmem.c -afl-showmap: afl-showmap.c afl-common.o sharedmem.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c afl-common.o sharedmem.o -o $@ $(LDFLAGS) +afl-fuzz: afl-fuzz.c afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) $@.c afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS) $(PYFLAGS) -afl-tmin: afl-tmin.c afl-common.o sharedmem.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c afl-common.o sharedmem.o -o $@ $(LDFLAGS) +afl-showmap: afl-showmap.c afl-common.o afl-sharedmem.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) $@.c afl-common.o afl-sharedmem.o -o $@ $(LDFLAGS) -afl-analyze: afl-analyze.c afl-common.o sharedmem.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c afl-common.o sharedmem.o -o $@ $(LDFLAGS) +afl-tmin: afl-tmin.c afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) $@.c afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS) + +afl-analyze: afl-analyze.c afl-common.o afl-sharedmem.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) $@.c afl-common.o afl-sharedmem.o -o $@ $(LDFLAGS) afl-gotcpu: afl-gotcpu.c $(COMM_HDR) | test_x86 $(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS) diff --git a/TODO b/TODO index 42987cb9..cc075abd 100644 --- a/TODO +++ b/TODO @@ -1,12 +1,24 @@ Roadmap 2.53d: ============== - - indent all the code: .clang-format - - update docs/sister_projects.txt +all: + - indent all the code: .clang-format? + (vh: tried, the variable definion look very ugly then, what to do?) afl-fuzz: - - put mutator, scheduler, forkserver and input channels in individual files - - reuse forkserver for showmap, afl-cmin, etc. + - modularize: forkserver is in a module + others: + mutator - is deeply integrated and would loose performance if split + scheduler - is within this and as the values it operates on are afl + specific it does not make sense to seperate this + input - if we get different input vectors then this would make sense, + e.g. network (which we have seen is super non-performant and using + desock is much faster) + so for the moment we are done? (vh) + +docs/: + - update docs/sister_projects.txt + - doc + example for AFL_CUSTOM_MUTATOR_LIBRARY gcc_plugin: - needs to be rewritten @@ -17,8 +29,9 @@ gcc_plugin: - neverZero qemu_mode: + - update to 4.x - deferred mode with AFL_DEFERRED_QEMU=0xaddress - @andrea - dont we have that already with AFL_ENTRYPOINT? + (vh: @andrea - dont we have that already with AFL_ENTRYPOINT?) unit testing / or large testcase campaign diff --git a/afl-analyze.c b/afl-analyze.c index af93150e..18b7456d 100644 --- a/afl-analyze.c +++ b/afl-analyze.c @@ -26,7 +26,7 @@ #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "sharedmem.h" +#include "afl-sharedmem.h" #include "afl-common.h" #include diff --git a/afl-common.c b/afl-common.c index 5e2d0628..f3bbdfb4 100644 --- a/afl-common.c +++ b/afl-common.c @@ -15,6 +15,8 @@ #ifndef __glibc__ #include #endif + + void detect_file_args(char** argv, u8* prog_in) { u32 i = 0; diff --git a/afl-forkserver.c b/afl-forkserver.c new file mode 100644 index 00000000..226175e1 --- /dev/null +++ b/afl-forkserver.c @@ -0,0 +1,401 @@ +#include "config.h" +#include "types.h" +#include "debug.h" +#include "afl-forkserver.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* a program that includes afl-forkserver needs to define these */ +extern u8 uses_asan; +extern u8 *trace_bits; +extern s32 forksrv_pid, child_pid, fsrv_ctl_fd, fsrv_st_fd; +extern s32 out_fd, out_dir_fd, dev_urandom_fd, dev_null_fd; /* initialize these with -1 */ +extern u32 exec_tmout; +extern u64 mem_limit; +extern u8 *out_file, *target_path, *doc_path; +extern FILE *plot_file; + +/* we need this internally but can be defined and read extern in the main source */ +u8 child_timed_out; + + +/* Describe integer as memory size. */ + +u8* forkserver_DMS(u64 val) { + + static u8 tmp[12][16]; + static u8 cur; + +#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \ + if (val < (_divisor) * (_limit_mult)) { \ + sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \ + return tmp[cur]; \ + } \ + } while (0) + + + cur = (cur + 1) % 12; + + /* 0-9999 */ + CHK_FORMAT(1, 10000, "%llu B", u64); + + /* 10.0k - 99.9k */ + CHK_FORMAT(1024, 99.95, "%0.01f kB", double); + + /* 100k - 999k */ + CHK_FORMAT(1024, 1000, "%llu kB", u64); + + /* 1.00M - 9.99M */ + CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double); + + /* 10.0M - 99.9M */ + CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double); + + /* 100M - 999M */ + CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64); + + /* 1.00G - 9.99G */ + CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double); + + /* 10.0G - 99.9G */ + CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double); + + /* 100G - 999G */ + CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64); + + /* 1.00T - 9.99G */ + CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double); + + /* 10.0T - 99.9T */ + CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double); + +#undef CHK_FORMAT + + /* 100T+ */ + strcpy(tmp[cur], "infty"); + return tmp[cur]; + +} + + + +/* the timeout handler */ + +void handle_timeout(int sig) { + if (child_pid > 0) { + child_timed_out = 1; + kill(child_pid, SIGKILL); + } else if (child_pid == -1 && forksrv_pid > 0) { + child_timed_out = 1; + kill(forksrv_pid, SIGKILL); + } +} + + +/* Spin up fork server (instrumented mode only). The idea is explained here: + + http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html + + In essence, the instrumentation allows us to skip execve(), and just keep + cloning a stopped child. So, we just execute once, and then send commands + through a pipe. The other part of this logic is in afl-as.h / llvm_mode */ + +void init_forkserver(char **argv) { + + static struct itimerval it; + int st_pipe[2], ctl_pipe[2]; + int status; + s32 rlen; + + ACTF("Spinning up the fork server..."); + + if (pipe(st_pipe) || pipe(ctl_pipe)) + PFATAL("pipe() failed"); + + child_timed_out = 0; + forksrv_pid = fork(); + + if (forksrv_pid < 0) + PFATAL("fork() failed"); + + if (!forksrv_pid) { + + /* CHILD PROCESS */ + + struct rlimit r; + + /* Umpf. On OpenBSD, the default fd limit for root users is set to + soft 128. Let's try to fix that... */ + + if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) { + r.rlim_cur = FORKSRV_FD + 2; + setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ + } + + if (mem_limit) { + r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; + +#ifdef RLIMIT_AS + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ +#else + /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but + according to reliable sources, RLIMIT_DATA covers anonymous + maps - so we should be getting good protection against OOM bugs. */ + + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ +#endif /* ^RLIMIT_AS */ + } + + /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered + before the dump is complete. */ + +// r.rlim_max = r.rlim_cur = 0; +// setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + + /* Isolate the process and configure standard descriptors. If out_file is + specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ + + setsid(); + + if (!getenv("AFL_DEBUG_CHILD_OUTPUT")) { + dup2(dev_null_fd, 1); + dup2(dev_null_fd, 2); + } + + if (out_file) { + dup2(dev_null_fd, 0); + } else { + dup2(out_fd, 0); + close(out_fd); + } + + /* Set up control and status pipes, close the unneeded original fds. */ + + if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) + PFATAL("dup2() failed"); + if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) + PFATAL("dup2() failed"); + + close(ctl_pipe[0]); + close(ctl_pipe[1]); + close(st_pipe[0]); + close(st_pipe[1]); + + close(out_dir_fd); + close(dev_null_fd); + close(dev_urandom_fd); + close(plot_file == NULL ? -1 : fileno(plot_file)); + + /* This should improve performance a bit, since it stops the linker from + doing extra work post-fork(). */ + + if (!getenv("LD_BIND_LAZY")) + setenv("LD_BIND_NOW", "1", 0); + + /* Set sane defaults for ASAN if nothing else specified. */ + + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); + + /* MSAN is tricky, because it doesn't support abort_on_error=1 at this + point. So, we do this in a very hacky way. */ + + setenv("MSAN_OPTIONS", + "exit_code=" STRINGIFY(MSAN_ERROR) ":" + "symbolize=0:" + "abort_on_error=1:" + "allocator_may_return_null=1:" + "msan_track_origins=0", + 0); + + execv(target_path, argv); + + /* Use a distinctive bitmap signature to tell the parent about execv() + falling through. */ + + *(u32 *)trace_bits = EXEC_FAIL_SIG; + exit(0); + } + + /* PARENT PROCESS */ + + /* Close the unneeded endpoints. */ + + close(ctl_pipe[0]); + close(st_pipe[1]); + + fsrv_ctl_fd = ctl_pipe[1]; + fsrv_st_fd = st_pipe[0]; + + /* Wait for the fork server to come up, but don't wait too long. */ + + if (exec_tmout) { + it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000); + it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000; + } + + setitimer(ITIMER_REAL, &it, NULL); + + rlen = read(fsrv_st_fd, &status, 4); + + it.it_value.tv_sec = 0; + it.it_value.tv_usec = 0; + + setitimer(ITIMER_REAL, &it, NULL); + + /* If we have a four-byte "hello" message from the server, we're all set. + Otherwise, try to figure out what went wrong. */ + + if (rlen == 4) { + OKF("All right - fork server is up."); + return; + } + + if (child_timed_out) + FATAL("Timeout while initializing fork server (adjusting -t may help)"); + + if (waitpid(forksrv_pid, &status, 0) <= 0) + PFATAL("waitpid() failed"); + + if (WIFSIGNALED(status)) { + + if (mem_limit && mem_limit < 500 && uses_asan) { + + SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" + " from the fuzzer! Since it seems to be built with ASAN and you " + "have a\n" + " restrictive memory limit configured, this is expected; please " + "read\n" + " %s/notes_for_asan.txt for help.\n", + doc_path); + + } else if (!mem_limit) { + + SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" + " from the fuzzer! There are several probable explanations:\n\n" + + " - The binary is just buggy and explodes entirely on its own. " + "If so, you\n" + " need to fix the underlying problem or find a better " + "replacement.\n\n" + + MSG_FORK_ON_APPLE + + " - Less likely, there is a horrible bug in the fuzzer. If other " + "options\n" + " fail, poke for troubleshooting " + "tips.\n"); + + } else { + + SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" + " from the fuzzer! There are several probable explanations:\n\n" + + " - The current memory limit (%s) is too restrictive, causing " + "the\n" + " target to hit an OOM condition in the dynamic linker. Try " + "bumping up\n" + " the limit with the -m setting in the command line. A simple " + "way confirm\n" + " this diagnosis would be:\n\n" + + MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" + + " Tip: you can use http://jwilk.net/software/recidivm to " + "quickly\n" + " estimate the required amount of virtual memory for the " + "binary.\n\n" + + " - The binary is just buggy and explodes entirely on its own. " + "If so, you\n" + " need to fix the underlying problem or find a better " + "replacement.\n\n" + + MSG_FORK_ON_APPLE + + " - Less likely, there is a horrible bug in the fuzzer. If other " + "options\n" + " fail, poke for troubleshooting " + "tips.\n", + forkserver_DMS(mem_limit << 20), mem_limit - 1); + } + + FATAL("Fork server crashed with signal %d", WTERMSIG(status)); + } + + if (*(u32 *)trace_bits == EXEC_FAIL_SIG) + FATAL("Unable to execute target application ('%s')", argv[0]); + + if (mem_limit && mem_limit < 500 && uses_asan) { + + SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated " + "before we could complete a\n" + " handshake with the injected code. Since it seems to be built " + "with ASAN and\n" + " you have a restrictive memory limit configured, this is " + "expected; please\n" + " read %s/notes_for_asan.txt for help.\n", + doc_path); + + } else if (!mem_limit) { + + SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated " + "before we could complete a\n" + " handshake with the injected code. Perhaps there is a horrible " + "bug in the\n" + " fuzzer. Poke for troubleshooting " + "tips.\n"); + + } else { + + SAYF( + "\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated " + "before we could complete a\n" + " handshake with the injected code. There are %s probable " + "explanations:\n\n" + + "%s" + " - The current memory limit (%s) is too restrictive, causing an " + "OOM\n" + " fault in the dynamic linker. This can be fixed with the -m " + "option. A\n" + " simple way to confirm the diagnosis may be:\n\n" + + MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" + + " Tip: you can use http://jwilk.net/software/recidivm to quickly\n" + " estimate the required amount of virtual memory for the " + "binary.\n\n" + + " - Less likely, there is a horrible bug in the fuzzer. If other " + "options\n" + " fail, poke for troubleshooting " + "tips.\n", + getenv(DEFER_ENV_VAR) ? "three" : "two", + getenv(DEFER_ENV_VAR) + ? " - You are using deferred forkserver, but __AFL_INIT() is " + "never\n" + " reached before the program terminates.\n\n" + : "", + forkserver_DMS(mem_limit << 20), mem_limit - 1); + } + + FATAL("Fork server handshake failed"); +} + diff --git a/afl-forkserver.h b/afl-forkserver.h new file mode 100644 index 00000000..fa40d9c6 --- /dev/null +++ b/afl-forkserver.h @@ -0,0 +1,25 @@ +#ifndef __AFL_FORKSERVER_H +#define __AFL_FORKSERVER_H + +void handle_timeout(int sig); +void init_forkserver(char **argv); + +#ifdef __APPLE__ +#define MSG_FORK_ON_APPLE \ + " - On MacOS X, the semantics of fork() syscalls are non-standard and " \ + "may\n" \ + " break afl-fuzz performance optimizations when running " \ + "platform-specific\n" \ + " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n" +#else +#define MSG_FORK_ON_APPLE "" +#endif + +#ifdef RLIMIT_AS + #define MSG_ULIMIT_USAGE " ( ulimit -Sv $[%llu << 10];" +#else + #define MSG_ULIMIT_USAGE " ( ulimit -Sd $[%llu << 10];" +#endif /* ^RLIMIT_AS */ + + +#endif diff --git a/afl-fuzz.c b/afl-fuzz.c index 2accde86..ec54cc85 100644 --- a/afl-fuzz.c +++ b/afl-fuzz.c @@ -33,7 +33,8 @@ #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "sharedmem.h" +#include "afl-sharedmem.h" +#include "afl-forkserver.h" #include "afl-common.h" #include @@ -144,7 +145,6 @@ double period_pilot_tmp = 5000.0; int key_lv = 0; EXP_ST u8 *in_dir, /* Input directory with test cases */ - *out_file, /* File to fuzz, if any */ *out_dir, /* Working & output directory */ *tmp_dir , /* Temporary directory for input */ *sync_dir, /* Synchronization directory */ @@ -152,15 +152,16 @@ EXP_ST u8 *in_dir, /* Input directory with test cases */ *power_name, /* Power schedule name */ *use_banner, /* Display banner */ *in_bitmap, /* Input bitmap */ - *doc_path, /* Path to documentation dir */ - *target_path, /* Path to target binary */ *file_extension, /* File extension */ *orig_cmdline; /* Original command line */ + u8 *doc_path, /* Path to documentation dir */ + *target_path, /* Path to target binary */ + *out_file; /* File to fuzz, if any */ -EXP_ST u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */ + u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */ static u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */ -EXP_ST u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */ + u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */ EXP_ST u8 cal_cycles = CAL_CYCLES; /* Calibration cycles defaults */ EXP_ST u8 cal_cycles_long = CAL_CYCLES_LONG; @@ -200,7 +201,6 @@ EXP_ST u8 skip_deterministic, /* Skip deterministic stages? */ timeout_given, /* Specific timeout given? */ not_on_tty, /* stdout is not a tty */ term_too_small, /* terminal dimensions too small */ - uses_asan, /* Target uses ASAN? */ no_forkserver, /* Disable forkserver? */ crash_mode, /* Crash mode! Yeah! */ in_place_resume, /* Attempt in-place resume? */ @@ -217,14 +217,15 @@ EXP_ST u8 skip_deterministic, /* Skip deterministic stages? */ deferred_mode, /* Deferred forkserver mode? */ fixed_seed, /* do not reseed */ fast_cal; /* Try to calibrate faster? */ + u8 uses_asan; /* Target uses ASAN? */ -static s32 out_fd, /* Persistent fd for out_file */ + s32 out_fd, /* Persistent fd for out_file */ dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ dev_null_fd = -1, /* Persistent fd for /dev/null */ fsrv_ctl_fd, /* Fork server control pipe (write) */ fsrv_st_fd; /* Fork server status pipe (read) */ -static s32 forksrv_pid, /* PID of the fork server */ + s32 forksrv_pid, /* PID of the fork server */ child_pid = -1, /* PID of the fuzzed program */ out_dir_fd = -1; /* FD of the lock file */ @@ -313,7 +314,7 @@ static s32 cpu_aff = -1; /* Selected CPU core */ #endif /* HAVE_AFFINITY */ -static FILE* plot_file; /* Gnuplot output file */ +FILE* plot_file; /* Gnuplot output file */ struct queue_entry { @@ -2308,299 +2309,6 @@ static void destroy_extras(void) { } -/* Spin up fork server (instrumented mode only). The idea is explained here: - - http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html - - In essence, the instrumentation allows us to skip execve(), and just keep - cloning a stopped child. So, we just execute once, and then send commands - through a pipe. The other part of this logic is in afl-as.h. */ - -EXP_ST void init_forkserver(char** argv) { - - static struct itimerval it; - int st_pipe[2], ctl_pipe[2]; - int status; - s32 rlen; - - ACTF("Spinning up the fork server..."); - - if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed"); - - forksrv_pid = fork(); - - if (forksrv_pid < 0) PFATAL("fork() failed"); - - if (!forksrv_pid) { - - /* CHILD PROCESS */ - - struct rlimit r; - - /* Umpf. On OpenBSD, the default fd limit for root users is set to - soft 128. Let's try to fix that... */ - - if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) { - - r.rlim_cur = FORKSRV_FD + 2; - setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ - - } - - if (mem_limit) { - - r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; - -#ifdef RLIMIT_AS - - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ - -#else - - /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but - according to reliable sources, RLIMIT_DATA covers anonymous - maps - so we should be getting good protection against OOM bugs. */ - - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ - -#endif /* ^RLIMIT_AS */ - - - } - - /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered - before the dump is complete. */ - - r.rlim_max = r.rlim_cur = 0; - - setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ - - /* Isolate the process and configure standard descriptors. If out_file is - specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ - - setsid(); - - if (!getenv("AFL_DEBUG_CHILD_OUTPUT")) { - dup2(dev_null_fd, 1); - dup2(dev_null_fd, 2); - } - - if (out_file) { - - dup2(dev_null_fd, 0); - - } else { - - dup2(out_fd, 0); - close(out_fd); - - } - - /* Set up control and status pipes, close the unneeded original fds. */ - - if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed"); - if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed"); - - close(ctl_pipe[0]); - close(ctl_pipe[1]); - close(st_pipe[0]); - close(st_pipe[1]); - - close(out_dir_fd); - close(dev_null_fd); - close(dev_urandom_fd); - close(fileno(plot_file)); - - /* This should improve performance a bit, since it stops the linker from - doing extra work post-fork(). */ - - if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0); - - /* Set sane defaults for ASAN if nothing else specified. */ - - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); - - /* MSAN is tricky, because it doesn't support abort_on_error=1 at this - point. So, we do this in a very hacky way. */ - - setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" - "symbolize=0:" - "abort_on_error=1:" - "allocator_may_return_null=1:" - "msan_track_origins=0", 0); - - execv(target_path, argv); - - /* Use a distinctive bitmap signature to tell the parent about execv() - falling through. */ - - *(u32*)trace_bits = EXEC_FAIL_SIG; - exit(0); - - } - - /* PARENT PROCESS */ - - /* Close the unneeded endpoints. */ - - close(ctl_pipe[0]); - close(st_pipe[1]); - - fsrv_ctl_fd = ctl_pipe[1]; - fsrv_st_fd = st_pipe[0]; - - /* Wait for the fork server to come up, but don't wait too long. */ - - it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000); - it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000; - - setitimer(ITIMER_REAL, &it, NULL); - - rlen = read(fsrv_st_fd, &status, 4); - - it.it_value.tv_sec = 0; - it.it_value.tv_usec = 0; - - setitimer(ITIMER_REAL, &it, NULL); - - /* If we have a four-byte "hello" message from the server, we're all set. - Otherwise, try to figure out what went wrong. */ - - if (rlen == 4) { - OKF("All right - fork server is up."); - return; - } - - if (child_timed_out) - FATAL("Timeout while initializing fork server (adjusting -t may help)"); - - if (waitpid(forksrv_pid, &status, 0) <= 0) - PFATAL("waitpid() failed"); - - if (WIFSIGNALED(status)) { - - if (mem_limit && mem_limit < 500 && uses_asan) { - - SAYF("\n" cLRD "[-] " cRST - "Whoops, the target binary crashed suddenly, before receiving any input\n" - " from the fuzzer! Since it seems to be built with ASAN and you have a\n" - " restrictive memory limit configured, this is expected; please read\n" - " %s/notes_for_asan.txt for help.\n", doc_path); - - } else if (!mem_limit) { - -#ifdef __APPLE__ -#define MSG_FORK_ON_APPLE \ - " - On MacOS X, the semantics of fork() syscalls are non-standard and may\n" \ - " break afl-fuzz performance optimizations when running platform-specific\n" \ - " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n" -#else -#define MSG_FORK_ON_APPLE "" -#endif - - SAYF("\n" cLRD "[-] " cRST - "Whoops, the target binary crashed suddenly, before receiving any input\n" - " from the fuzzer! There are several probable explanations:\n\n" - - " - The binary is just buggy and explodes entirely on its own. If so, you\n" - " need to fix the underlying problem or find a better replacement.\n\n" - - MSG_FORK_ON_APPLE - - " - Less likely, there is a horrible bug in the fuzzer. If other options\n" - " fail, poke for troubleshooting tips.\n"); - - } else { - -#ifdef RLIMIT_AS -#define MSG_ULIMIT_USAGE \ - " ( ulimit -Sv $[%llu << 10];" -#else -#define MSG_ULIMIT_USAGE \ - " ( ulimit -Sd $[%llu << 10];" -#endif /* ^RLIMIT_AS */ - - SAYF("\n" cLRD "[-] " cRST - "Whoops, the target binary crashed suddenly, before receiving any input\n" - " from the fuzzer! There are several probable explanations:\n\n" - - " - The current memory limit (%s) is too restrictive, causing the\n" - " target to hit an OOM condition in the dynamic linker. Try bumping up\n" - " the limit with the -m setting in the command line. A simple way confirm\n" - " this diagnosis would be:\n\n" - - MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" - - " Tip: you can use http://jwilk.net/software/recidivm to quickly\n" - " estimate the required amount of virtual memory for the binary.\n\n" - - " - The binary is just buggy and explodes entirely on its own. If so, you\n" - " need to fix the underlying problem or find a better replacement.\n\n" - - MSG_FORK_ON_APPLE - - " - Less likely, there is a horrible bug in the fuzzer. If other options\n" - " fail, poke for troubleshooting tips.\n", - DMS(mem_limit << 20), mem_limit - 1); - - } - - - FATAL("Fork server crashed with signal %d", WTERMSIG(status)); - - } - - if (*(u32*)trace_bits == EXEC_FAIL_SIG) - FATAL("Unable to execute target application ('%s')", argv[0]); - - if (mem_limit && mem_limit < 500 && uses_asan) { - - SAYF("\n" cLRD "[-] " cRST - "Hmm, looks like the target binary terminated before we could complete a\n" - " handshake with the injected code. Since it seems to be built with ASAN and\n" - " you have a restrictive memory limit configured, this is expected; please\n" - " read %s/notes_for_asan.txt for help.\n", doc_path); - - } else if (!mem_limit) { - - SAYF("\n" cLRD "[-] " cRST - "Hmm, looks like the target binary terminated before we could complete a\n" - " handshake with the injected code. Perhaps there is a horrible bug in the\n" - " fuzzer. Poke for troubleshooting tips.\n"); - - } else { - - SAYF("\n" cLRD "[-] " cRST - "Hmm, looks like the target binary terminated before we could complete a\n" - " handshake with the injected code. There are %s probable explanations:\n\n" - - "%s" - " - The current memory limit (%s) is too restrictive, causing an OOM\n" - " fault in the dynamic linker. This can be fixed with the -m option. A\n" - " simple way to confirm the diagnosis may be:\n\n" - - MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" - - " Tip: you can use http://jwilk.net/software/recidivm to quickly\n" - " estimate the required amount of virtual memory for the binary.\n\n" - - " - Less likely, there is a horrible bug in the fuzzer. If other options\n" - " fail, poke for troubleshooting tips.\n", - getenv(DEFER_ENV_VAR) ? "three" : "two", - getenv(DEFER_ENV_VAR) ? - " - You are using deferred forkserver, but __AFL_INIT() is never\n" - " reached before the program terminates.\n\n" : "", - DMS(mem_limit << 20), mem_limit - 1); - - } - - FATAL("Fork server handshake failed"); - -} - /* Execute target application, monitoring for timeouts. Return status information. The called program will update trace_bits[]. */ @@ -5165,6 +4873,12 @@ static u32 calculate_score(struct queue_entry* q) { global average. Multiplier ranges from 0.1x to 3x. Fast inputs are less expensive to fuzz, so we're giving them more air time. */ + // TODO BUG FIXME: is this really a good idea? + // This sounds like looking for lost keys under a street light just because + // the light is better there. + // Longer execution time means longer work on the input, the deeper in + // coverage, the better the fuzzing, right? -mh + if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10; else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25; else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50; @@ -5188,15 +4902,11 @@ static u32 calculate_score(struct queue_entry* q) { for a bit longer until they catch up with the rest. */ if (q->handicap >= 4) { - perf_score *= 4; q->handicap -= 4; - } else if (q->handicap) { - perf_score *= 2; --q->handicap; - } /* Final adjustment based on input depth, under the assumption that fuzzing @@ -11041,24 +10751,6 @@ static void handle_skipreq(int sig) { } -/* Handle timeout (SIGALRM). */ - -static void handle_timeout(int sig) { - - if (child_pid > 0) { - - child_timed_out = 1; - kill(child_pid, SIGKILL); - - } else if (child_pid == -1 && forksrv_pid > 0) { - - child_timed_out = 1; - kill(forksrv_pid, SIGKILL); - - } - -} - /* Do a PATH search and find target binary to see that it exists and isn't a shell script - a common and painful mistake. We also check for @@ -12443,9 +12135,7 @@ int main(int argc, char** argv) { #ifdef USE_PYTHON if (init_py()) FATAL("Failed to initialize Python module"); - u8 with_python_support = 1; #else - if (getenv("AFL_PYTHON_MODULE")) FATAL("Your AFL binary was built without Python support"); #endif diff --git a/sharedmem.c b/afl-sharedmem.c similarity index 99% rename from sharedmem.c rename to afl-sharedmem.c index 3fd38444..400a0a46 100644 --- a/sharedmem.c +++ b/afl-sharedmem.c @@ -9,7 +9,7 @@ #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "sharedmem.h" +#include "afl-sharedmem.h" #include #include diff --git a/sharedmem.h b/afl-sharedmem.h similarity index 57% rename from sharedmem.h rename to afl-sharedmem.h index 53a85fcb..9aa44d0e 100644 --- a/sharedmem.h +++ b/afl-sharedmem.h @@ -1,6 +1,7 @@ -#ifndef __SHAREDMEM_H -#define __SHAREDMEM_H +#ifndef __AFL_SHAREDMEM_H +#define __AFL_SHAREDMEM_H void setup_shm(unsigned char dumb_mode); void remove_shm(void); + #endif diff --git a/afl-showmap.c b/afl-showmap.c index af3b36ee..96b7b5e0 100644 --- a/afl-showmap.c +++ b/afl-showmap.c @@ -28,7 +28,7 @@ #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "sharedmem.h" +#include "afl-sharedmem.h" #include "afl-common.h" #include diff --git a/afl-tmin.c b/afl-tmin.c index 09ce8c62..e83b217d 100644 --- a/afl-tmin.c +++ b/afl-tmin.c @@ -21,12 +21,14 @@ #define AFL_MAIN + #include "config.h" #include "types.h" #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "sharedmem.h" +#include "afl-forkserver.h" +#include "afl-sharedmem.h" #include "afl-common.h" #include @@ -46,22 +48,22 @@ #include #include -static s32 forksrv_pid, /* PID of the fork server */ - child_pid; /* PID of the tested program */ +s32 forksrv_pid, /* PID of the fork server */ + child_pid; /* PID of the tested program */ -static s32 fsrv_ctl_fd, /* Fork server control pipe (write) */ - fsrv_st_fd; /* Fork server status pipe (read) */ +s32 fsrv_ctl_fd, /* Fork server control pipe (write) */ + fsrv_st_fd; /* Fork server status pipe (read) */ u8 *trace_bits; /* SHM with instrumentation bitmap */ static u8 *mask_bitmap; /* Mask for trace bits (-B) */ -static u8 *in_file, /* Minimizer input test case */ - *out_file, /* Minimizer output file */ - *prog_in, /* Targeted program input file */ + u8 *in_file, /* Minimizer input test case */ + *output_file, /* Minimizer output file */ + *out_file, /* Targeted program input file */ *target_path, /* Path to target binary */ *doc_path; /* Path to docs */ -static s32 prog_in_fd; /* Persistent fd for prog_in */ + s32 out_fd; /* Persistent fd for out_file */ static u8* in_data; /* Input data for trimming */ @@ -70,12 +72,12 @@ static u32 in_len, /* Input data length */ total_execs, /* Total number of execs */ missed_hangs, /* Misses due to hangs */ missed_crashes, /* Misses due to crashes */ - missed_paths, /* Misses due to exec path diffs */ - exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ + missed_paths; /* Misses due to exec path diffs */ + u32 exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ -static u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ + u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ -static s32 dev_null_fd = -1; /* FD to /dev/null */ + s32 dev_null_fd = -1; /* FD to /dev/null */ static u8 crash_mode, /* Crash-centric mode? */ exit_crash, /* Treat non-zero exit as crash? */ @@ -84,8 +86,19 @@ static u8 crash_mode, /* Crash-centric mode? */ use_stdin = 1; /* Use stdin for program input? */ static volatile u8 - stop_soon, /* Ctrl-C pressed? */ - child_timed_out; /* Child timed out? */ + stop_soon; /* Ctrl-C pressed? */ + +/* + * forkserver section + */ + +/* we only need this to use afl-forkserver */ +FILE *plot_file; +u8 uses_asan; +s32 out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1; + +/* we import this as we need this information */ +extern u8 child_timed_out; /* Classify tuple counts. This is a slow & naive version, but good enough here. */ @@ -163,7 +176,7 @@ static inline u8 anything_set(void) { /* Get rid of temp files (atexit handler). */ static void at_exit_handler(void) { - if (prog_in) unlink(prog_in); /* Ignore errors */ + if (out_file) unlink(out_file); /* Ignore errors */ } /* Read initial file. */ @@ -214,24 +227,24 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) { } /* Write modified data to file for testing. If use_stdin is clear, the old file - is unlinked and a new one is created. Otherwise, prog_in_fd is rewound and + is unlinked and a new one is created. Otherwise, out_fd is rewound and truncated. */ static void write_to_testcase(void* mem, u32 len) { - s32 fd = prog_in_fd; + s32 fd = out_fd; if (!use_stdin) { - unlink(prog_in); /* Ignore errors. */ + unlink(out_file); /* Ignore errors. */ - fd = open(prog_in, O_WRONLY | O_CREAT | O_EXCL, 0600); + fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); - if (fd < 0) PFATAL("Unable to create '%s'", prog_in); + if (fd < 0) PFATAL("Unable to create '%s'", out_file); } else lseek(fd, 0, SEEK_SET); - ck_write(fd, mem, len, prog_in); + ck_write(fd, mem, len, out_file); if (use_stdin) { @@ -245,7 +258,7 @@ static void write_to_testcase(void* mem, u32 len) { /* Handle timeout signal. */ - +/* static void handle_timeout(int sig) { if (child_pid > 0) { @@ -261,8 +274,10 @@ static void handle_timeout(int sig) { } } +*/ /* start the app and it's forkserver */ +/* static void init_forkserver(char **argv) { static struct itimerval it; int st_pipe[2], ctl_pipe[2]; @@ -280,7 +295,7 @@ static void init_forkserver(char **argv) { struct rlimit r; - if (dup2(use_stdin ? prog_in_fd : dev_null_fd, 0) < 0 || + if (dup2(use_stdin ? out_fd : dev_null_fd, 0) < 0 || dup2(dev_null_fd, 1) < 0 || dup2(dev_null_fd, 2) < 0) { @@ -290,7 +305,7 @@ static void init_forkserver(char **argv) { } close(dev_null_fd); - close(prog_in_fd); + close(out_fd); setsid(); @@ -300,20 +315,20 @@ static void init_forkserver(char **argv) { #ifdef RLIMIT_AS - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + setrlimit(RLIMIT_AS, &r); // Ignore errors #else - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + setrlimit(RLIMIT_DATA, &r); // Ignore errors -#endif /* ^RLIMIT_AS */ +#endif // ^RLIMIT_AS } r.rlim_max = r.rlim_cur = 0; - setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + setrlimit(RLIMIT_CORE, &r); // Ignore errors - /* Set up control and status pipes, close the unneeded original fds. */ + // Set up control and status pipes, close the unneeded original fds. if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed"); if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed"); @@ -330,7 +345,7 @@ static void init_forkserver(char **argv) { } - /* Close the unneeded endpoints. */ + // Close the unneeded endpoints. close(ctl_pipe[0]); close(st_pipe[1]); @@ -338,7 +353,7 @@ static void init_forkserver(char **argv) { fsrv_ctl_fd = ctl_pipe[1]; fsrv_st_fd = st_pipe[0]; - /* Configure timeout, wait for child, cancel timeout. */ + // Configure timeout, wait for child, cancel timeout. if (exec_tmout) { @@ -356,8 +371,8 @@ static void init_forkserver(char **argv) { it.it_value.tv_usec = 0; setitimer(ITIMER_REAL, &it, NULL); - /* If we have a four-byte "hello" message from the server, we're all set. - Otherwise, try to figure out what went wrong. */ + // If we have a four-byte "hello" message from the server, we're all set. + // Otherwise, try to figure out what went wrong. if (rlen == 4) { ACTF("All right - fork server is up."); @@ -380,7 +395,7 @@ static void init_forkserver(char **argv) { SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status)); } - +*/ /* Execute target application. Returns 0 if the changes are a dud, or 1 if they should be kept. */ @@ -422,11 +437,8 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { /* Configure timeout, wait for child, cancel timeout. */ if (exec_tmout) { - - child_timed_out = 0; - it.it_value.tv_sec = (exec_tmout / 1000); - it.it_value.tv_usec = (exec_tmout % 1000) * 1000; - + it.it_value.tv_sec = (exec_tmout / 1000); + it.it_value.tv_usec = (exec_tmout % 1000) * 1000; } setitimer(ITIMER_REAL, &it, NULL); @@ -458,7 +470,7 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { if (stop_soon) { SAYF(cRST cLRD "\n+++ Minimization aborted by user +++\n" cRST); - close(write_to_file(out_file, in_data, in_len)); + close(write_to_file(output_file, in_data, in_len)); exit(1); } @@ -787,7 +799,7 @@ static void set_up_environment(void) { dev_null_fd = open("/dev/null", O_RDWR); if (dev_null_fd < 0) PFATAL("Unable to open /dev/null"); - if (!prog_in) { + if (!out_file) { u8* use_dir = "."; @@ -798,15 +810,15 @@ static void set_up_environment(void) { } - prog_in = alloc_printf("%s/.afl-tmin-temp-%u", use_dir, getpid()); + out_file = alloc_printf("%s/.afl-tmin-temp-%u", use_dir, getpid()); } - unlink(prog_in); + unlink(out_file); - prog_in_fd = open(prog_in, O_RDWR | O_CREAT | O_EXCL, 0600); + out_fd = open(out_file, O_RDWR | O_CREAT | O_EXCL, 0600); - if (prog_in_fd < 0) PFATAL("Unable to create '%s'", prog_in); + if (out_fd < 0) PFATAL("Unable to create '%s'", out_file); /* Set sane defaults... */ @@ -1067,15 +1079,15 @@ int main(int argc, char** argv) { case 'o': - if (out_file) FATAL("Multiple -o options not supported"); - out_file = optarg; + if (output_file) FATAL("Multiple -o options not supported"); + output_file = optarg; break; case 'f': - if (prog_in) FATAL("Multiple -f options not supported"); + if (out_file) FATAL("Multiple -f options not supported"); use_stdin = 0; - prog_in = optarg; + out_file = optarg; break; case 'e': @@ -1181,7 +1193,7 @@ int main(int argc, char** argv) { } - if (optind == argc || !in_file || !out_file) usage(argv[0]); + if (optind == argc || !in_file || !output_file) usage(argv[0]); setup_shm(0); atexit(at_exit_handler); @@ -1190,7 +1202,7 @@ int main(int argc, char** argv) { set_up_environment(); find_binary(argv[optind]); - detect_file_args(argv + optind, prog_in); + detect_file_args(argv + optind, out_file); if (qemu_mode) use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind); @@ -1229,12 +1241,12 @@ int main(int argc, char** argv) { minimize(use_argv); - ACTF("Writing output to '%s'...", out_file); + ACTF("Writing output to '%s'...", output_file); - unlink(prog_in); - prog_in = NULL; + unlink(out_file); + out_file = NULL; - close(write_to_file(out_file, in_data, in_len)); + close(write_to_file(output_file, in_data, in_len)); OKF("We're done here. Have a nice day!\n"); diff --git a/docs/ChangeLog b/docs/ChangeLog index dfebb68a..ed8e0022 100644 --- a/docs/ChangeLog +++ b/docs/ChangeLog @@ -20,12 +20,13 @@ Version ++2.53d (dev): - llvm 9 is now supported (still needs testing) - fix building qemu on some Ubuntus (thanks to floyd!) - custom mutator by a loaded library is now supported (thanks to kyakdan!) + - more support for *BSD (thanks to devnexen!) + - fix building on *BSD (thanks to tobias.kortkamp for the patch) - fix for a few features to support different map sized than 2^16 - afl-showmap: new option -r now shows the real values in the buckets (stock afl never did), plus shows tuple content summary information now - - fix building on *BSD (thanks to tobias.kortkamp for the patch) + - the forkserver is now in its own C file to be easily integratable - small docu updates - - ... your patch? :) -------------------------- From 742aed4f2e8d46cd9a92c4eafb89986049bedfe4 Mon Sep 17 00:00:00 2001 From: Joey Jiao Date: Thu, 25 Jul 2019 09:12:48 +0800 Subject: [PATCH 25/83] Add support for Android --- afl-analyze.c | 3 ++ afl-fuzz.c | 8 +++- afl-gotcpu.c | 3 ++ afl-showmap.c | 3 ++ afl-tmin.c | 3 ++ afl-whatsup | 2 +- android-ashmem.h | 81 +++++++++++++++++++++++++++++++++++ llvm_mode/afl-llvm-pass.so.cc | 5 +++ llvm_mode/afl-llvm-rt.o.c | 7 +++ sharedmem.c | 3 ++ 10 files changed, 115 insertions(+), 3 deletions(-) create mode 100644 android-ashmem.h diff --git a/afl-analyze.c b/afl-analyze.c index 53b694ec..0e8c9fb0 100644 --- a/afl-analyze.c +++ b/afl-analyze.c @@ -21,6 +21,9 @@ #define AFL_MAIN +#ifdef __ANDROID__ + #include "android-ashmem.h" +#endif #include "config.h" #include "types.h" #include "debug.h" diff --git a/afl-fuzz.c b/afl-fuzz.c index e9fb8bf0..0e252bea 100644 --- a/afl-fuzz.c +++ b/afl-fuzz.c @@ -28,6 +28,9 @@ #endif #define _FILE_OFFSET_BITS 64 +#ifdef __ANDROID__ + #include "android-ashmem.h" +#endif #include "config.h" #include "types.h" #include "debug.h" @@ -11318,6 +11321,7 @@ static void check_term_size(void) { if (ioctl(1, TIOCGWINSZ, &ws)) return; + if (ws.ws_row == 0 || ws.ws_col == 0) return; if (ws.ws_row < 24 || ws.ws_col < 79) term_too_small = 1; } @@ -12370,8 +12374,8 @@ int main(int argc, char** argv) { if (unicorn_mode) FATAL("-U and -n are mutually exclusive"); } - - if (index(argv[optind], '/') == NULL) WARNF(cLRD "Target binary called without a prefixed path, make sure you are fuzzing the right binary: " cRST "%s", argv[optind]); + + if (strchr(argv[optind], '/') == NULL) WARNF(cLRD "Target binary called without a prefixed path, make sure you are fuzzing the right binary: " cRST "%s", argv[optind]); OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" Eissfeldt and Andrea Fioraldi"); OKF("afl++ is open source, get it at https://github.com/vanhauser-thc/AFLplusplus"); diff --git a/afl-gotcpu.c b/afl-gotcpu.c index 4163ad65..8c04b205 100644 --- a/afl-gotcpu.c +++ b/afl-gotcpu.c @@ -28,6 +28,9 @@ #define AFL_MAIN #define _GNU_SOURCE +#ifdef __ANDROID__ + #include "android-ashmem.h" +#endif #include #include #include diff --git a/afl-showmap.c b/afl-showmap.c index bce7cb4e..a490bca6 100644 --- a/afl-showmap.c +++ b/afl-showmap.c @@ -23,6 +23,9 @@ #define AFL_MAIN +#ifdef __ANDROID__ + #include "android-ashmem.h" +#endif #include "config.h" #include "types.h" #include "debug.h" diff --git a/afl-tmin.c b/afl-tmin.c index 94f3bb3f..a36acd10 100644 --- a/afl-tmin.c +++ b/afl-tmin.c @@ -21,6 +21,9 @@ #define AFL_MAIN +#ifdef __ANDROID__ + #include "android-ashmem.h" +#endif #include "config.h" #include "types.h" #include "debug.h" diff --git a/afl-whatsup b/afl-whatsup index a4d30418..c1e41529 100755 --- a/afl-whatsup +++ b/afl-whatsup @@ -54,7 +54,7 @@ fi CUR_TIME=`date +%s` -TMP=`mktemp -t .afl-whatsup-XXXXXXXX` || exit 1 +TMP=`mktemp -t .afl-whatsup-XXXXXXXX` || TMP=`mktemp -p /data/local/tmp .afl-whatsup-XXXXXXXX` || exit 1 ALIVE_CNT=0 DEAD_CNT=0 diff --git a/android-ashmem.h b/android-ashmem.h new file mode 100644 index 00000000..a787c04b --- /dev/null +++ b/android-ashmem.h @@ -0,0 +1,81 @@ +#ifndef _ANDROID_ASHMEM_H +#define _ANDROID_ASHMEM_H + +#include +#include +#include +#include +#include + +#if __ANDROID_API__ >= 26 +#define shmat bionic_shmat +#define shmctl bionic_shmctl +#define shmdt bionic_shmdt +#define shmget bionic_shmget +#endif + #include +#undef shmat +#undef shmctl +#undef shmdt +#undef shmget +#include + +#define ASHMEM_DEVICE "/dev/ashmem" + +static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) +{ + int ret = 0; + if (__cmd == IPC_RMID) { + int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL); + struct ashmem_pin pin = {0, length}; + ret = ioctl(__shmid, ASHMEM_UNPIN, &pin); + close(__shmid); + } + + return ret; +} + +static inline int shmget (key_t __key, size_t __size, int __shmflg) +{ + int fd,ret; + char ourkey[11]; + + fd = open(ASHMEM_DEVICE, O_RDWR); + if (fd < 0) + return fd; + + sprintf(ourkey,"%d",__key); + ret = ioctl(fd, ASHMEM_SET_NAME, ourkey); + if (ret < 0) + goto error; + + ret = ioctl(fd, ASHMEM_SET_SIZE, __size); + if (ret < 0) + goto error; + + return fd; + +error: + close(fd); + return ret; +} + +static inline void *shmat (int __shmid, const void *__shmaddr, int __shmflg) +{ + int size; + void *ptr; + + size = ioctl(__shmid, ASHMEM_GET_SIZE, NULL); + if (size < 0) { + return NULL; + } + + ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, __shmid, 0); + if (ptr == MAP_FAILED) { + return NULL; + } + + return ptr; +} + +#endif diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc index cfeff968..bdad835f 100644 --- a/llvm_mode/afl-llvm-pass.so.cc +++ b/llvm_mode/afl-llvm-pass.so.cc @@ -129,9 +129,14 @@ bool AFLCoverage::runOnModule(Module &M) { new GlobalVariable(M, PointerType::get(Int8Ty, 0), false, GlobalValue::ExternalLinkage, 0, "__afl_area_ptr"); +#ifdef __ANDROID__ + GlobalVariable *AFLPrevLoc = new GlobalVariable( + M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc"); +#else GlobalVariable *AFLPrevLoc = new GlobalVariable( M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", 0, GlobalVariable::GeneralDynamicTLSModel, 0, false); +#endif /* Instrument all the things! */ diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index debde204..67208454 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -19,6 +19,9 @@ */ +#ifdef __ANDROID__ + #include "android-ashmem.h" +#endif #include "../config.h" #include "../types.h" @@ -55,7 +58,11 @@ u8 __afl_area_initial[MAP_SIZE]; u8* __afl_area_ptr = __afl_area_initial; +#ifdef __ANDROID__ +u32 __afl_prev_loc; +#else __thread u32 __afl_prev_loc; +#endif /* Running in persistent mode? */ diff --git a/sharedmem.c b/sharedmem.c index 3fd38444..ce3b76e6 100644 --- a/sharedmem.c +++ b/sharedmem.c @@ -4,6 +4,9 @@ #define AFL_MAIN +#ifdef __ANDROID__ + #include "android-ashmem.h" +#endif #include "config.h" #include "types.h" #include "debug.h" From a51d4227b6c1a6fec2a471aa9497b6d8201411ae Mon Sep 17 00:00:00 2001 From: Joey Jiao Date: Mon, 19 Aug 2019 09:31:50 +0800 Subject: [PATCH 26/83] Symlink Makefile to Android.mk --- Android.mk | 1 + 1 file changed, 1 insertion(+) create mode 120000 Android.mk diff --git a/Android.mk b/Android.mk new file mode 120000 index 00000000..33ceb8f0 --- /dev/null +++ b/Android.mk @@ -0,0 +1 @@ +Makefile \ No newline at end of file From cc55e5c6d8ead610606649fa5aad39671f55bece Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 21 Aug 2019 09:36:31 +0200 Subject: [PATCH 27/83] remove compcov immediates only instrumentation from TODO --- TODO | 2 -- 1 file changed, 2 deletions(-) diff --git a/TODO b/TODO index 692f6609..89e307cf 100644 --- a/TODO +++ b/TODO @@ -55,5 +55,3 @@ Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges. qemu_mode: - persistent mode patching the return address (WinAFL style) - - instrument only comparison with immediate values by default when using compcov - From b1ebd62c78e81bcd0731782f102276e4af459cea Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 21 Aug 2019 09:57:26 +0200 Subject: [PATCH 28/83] update env_variables.txt with compcov levels --- docs/env_variables.txt | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/env_variables.txt b/docs/env_variables.txt index 93066dbc..821463ae 100644 --- a/docs/env_variables.txt +++ b/docs/env_variables.txt @@ -245,9 +245,19 @@ The QEMU wrapper used to instrument binary-only code supports several settings: - Setting AFL_INST_LIBS causes the translator to also instrument the code inside any dynamically linked libraries (notably including glibc). + - Setting AFL_COMPCOV_LEVEL enables the CompareCoverage tracing of all cmp + and sub in x86 and x86_64 and memory comparions functions (e.g. strcmp, + memcmp, ...) when libcompcov is preloaded using AFL_PRELOAD. + More info at qemu_mode/libcompcov/README.compcov. + There are two levels at the moment, AFL_COMPCOV_LEVEL=1 that instruments + only comparisons with immediate values / read-only memory and + AFL_COMPCOV_LEVEL=2 that instruments all the comparions. Level 2 is more + accurate but may need a larger shared memory. + - Setting AFL_QEMU_COMPCOV enables the CompareCoverage tracing of all - cmp and sub in x86 and x86_64. Support for other architectures and - comparison functions (mem/strcmp et al.) is planned. + cmp and sub in x86 and x86_64. + This is an alias of AFL_COMPCOV_LEVEL=1 when AFL_COMPCOV_LEVEL is + not specified. - The underlying QEMU binary will recognize any standard "user space emulation" variables (e.g., QEMU_STACK_SIZE), but there should be no @@ -260,10 +270,7 @@ The QEMU wrapper used to instrument binary-only code supports several settings: - AFL_ENTRYPOINT allows you to specify a specific entrypoint into the binary (this can be very good for the performance!). The entrypoint is specified as hex address, e.g. 0x4004110 - - - AFL_QEMU_COMPCOV is for a sub-project in qemu_mode called ./libcompcov - which implements laf-intel for qemu. It also needs AFL_PRELOAD and - you can find more information in qemu_mode/libcompcov/README.compcov + Note that the address must be the address of a basic block. 5) Settings for afl-cmin ------------------------ From 790d717543ae415ee30224644dd45fa408bba0c5 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 21 Aug 2019 10:09:46 +0200 Subject: [PATCH 29/83] update README.qemu with compcov levels --- qemu_mode/README.qemu | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/qemu_mode/README.qemu b/qemu_mode/README.qemu index 754c0259..cd8559ad 100644 --- a/qemu_mode/README.qemu +++ b/qemu_mode/README.qemu @@ -16,14 +16,16 @@ with afl-gcc. The usual performance cost is 2-5x, which is considerably better than seen so far in experiments with tools such as DynamoRIO and PIN. -The idea and much of the implementation comes from Andrew Griffiths. +The idea and much of the initial implementation comes from Andrew Griffiths. +The actual implementation on QEMU 3 (shipped with afl++) is from +Andrea Fioraldi. Special thanks to abiondo that re-enabled TCG chaining. 2) How to use ------------- -The feature is implemented with a fairly simple patch to QEMU 2.10.0. The -simplest way to build it is to run ./build_qemu_support.sh. The script will -download, configure, and compile the QEMU binary for you. +The feature is implemented with a patch to QEMU 3.1.0. The simplest way +to build it is to run ./build_qemu_support.sh. The script will download, +configure, and compile the QEMU binary for you. QEMU is a big project, so this will take a while, and you may have to resolve a couple of dependencies (most notably, you will definitely need @@ -53,10 +55,18 @@ There is ./libcompcov/ which implements laf-intel (splitting memcmp, strncmp, etc. to make these conditions easier solvable by afl-fuzz). Highly recommended. +The option that enables QEMU CompareCoverage is QEMU_COMPCOV_LEVEL. +QEMU_COMPCOV_LEVEL=1 is to instrument comparisons with only immediate +values / read-only memory. QEMU_COMPCOV_LEVEL=2 instruments all +comparison instructions and memory comparison functions when libcompcov +is preloaded. Comparison instructions are currently instrumented only +on the x86 and x86_64 targets. + Another option is the environment variable AFL_ENTRYPOINT which allows move the forkserver to a different part, e.g. just before the file is opened (e.g. way after command line parsing and config file loading, etc) -which can be a huge speed improvement. +which can be a huge speed improvement. Note that the specified address +must be an address of a basic block. 4) Notes on linking ------------------- From e72d4a96bf50b9ae66b95203159f89e1adf2644a Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Mon, 26 Aug 2019 02:51:14 +0200 Subject: [PATCH 30/83] Make install script executable --- unicorn_mode/build_unicorn_support.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 unicorn_mode/build_unicorn_support.sh diff --git a/unicorn_mode/build_unicorn_support.sh b/unicorn_mode/build_unicorn_support.sh old mode 100644 new mode 100755 From 0e59a591693901ec6a69c7de2e9de2dcca52c101 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 13:31:35 +0200 Subject: [PATCH 31/83] include and src folders --- Makefile | 46 ++++++++++++------------ TODO | 2 +- alloc-inl.h => include/alloc-inl.h | 0 afl-as.h => include/as.h | 0 afl-common.h => include/common.h | 0 config.h => include/config.h | 0 debug.h => include/debug.h | 0 afl-forkserver.h => include/forkserver.h | 0 hash.h => include/hash.h | 0 afl-sharedmem.h => include/sharedmem.h | 0 types.h => include/types.h | 0 afl-analyze.c => src/afl-analyze.c | 4 +-- afl-as.c => src/afl-as.c | 2 +- afl-common.c => src/afl-common.c | 0 afl-forkserver.c => src/afl-forkserver.c | 2 +- afl-fuzz.c => src/afl-fuzz.c | 6 ++-- afl-gcc.c => src/afl-gcc.c | 0 afl-gotcpu.c => src/afl-gotcpu.c | 0 afl-sharedmem.c => src/afl-sharedmem.c | 2 +- afl-showmap.c => src/afl-showmap.c | 4 +-- afl-tmin.c => src/afl-tmin.c | 6 ++-- test-instr.c => src/test-instr.c | 0 22 files changed, 37 insertions(+), 37 deletions(-) rename alloc-inl.h => include/alloc-inl.h (100%) rename afl-as.h => include/as.h (100%) rename afl-common.h => include/common.h (100%) rename config.h => include/config.h (100%) rename debug.h => include/debug.h (100%) rename afl-forkserver.h => include/forkserver.h (100%) rename hash.h => include/hash.h (100%) rename afl-sharedmem.h => include/sharedmem.h (100%) rename types.h => include/types.h (100%) rename afl-analyze.c => src/afl-analyze.c (99%) rename afl-as.c => src/afl-as.c (99%) rename afl-common.c => src/afl-common.c (100%) rename afl-forkserver.c => src/afl-forkserver.c (99%) rename afl-fuzz.c => src/afl-fuzz.c (99%) rename afl-gcc.c => src/afl-gcc.c (100%) rename afl-gotcpu.c => src/afl-gotcpu.c (100%) rename afl-sharedmem.c => src/afl-sharedmem.c (99%) rename afl-showmap.c => src/afl-showmap.c (99%) rename afl-tmin.c => src/afl-tmin.c (99%) rename test-instr.c => src/test-instr.c (100%) diff --git a/Makefile b/Makefile index 3d5059f7..9699a0ad 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ #TEST_MMAP=1 PROGNAME = afl -VERSION = $(shell grep '^\#define VERSION ' config.h | cut -d '"' -f2) +VERSION = $(shell grep '^\#define VERSION ' include/config.h | cut -d '"' -f2) PREFIX ?= /usr/local BIN_PATH = $(PREFIX)/bin @@ -31,7 +31,7 @@ PROGS = afl-gcc afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze SH_PROGS = afl-plot afl-cmin afl-whatsup afl-system-config CFLAGS ?= -O3 -funroll-loops -CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \ +CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I include/ \ -DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \ -DBIN_PATH=\"$(BIN_PATH)\" @@ -47,7 +47,7 @@ else TEST_CC = afl-clang endif -COMM_HDR = alloc-inl.h config.h debug.h types.h +COMM_HDR = include/alloc-inl.h include/config.h include/debug.h include/types.h ifeq "$(shell echo '\#include @int main() {return 0; }' | tr @ '\n' | $(CC) -x c - -o .test -I$(PYTHON_INCLUDE) -lpython2.7 2>/dev/null && echo 1 || echo 0 )" "1" @@ -123,37 +123,37 @@ endif ready: @echo "[+] Everything seems to be working, ready to compile." -afl-gcc: afl-gcc.c $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS) +afl-gcc: src/afl-gcc.c $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) src/$@.c -o $@ $(LDFLAGS) set -e; for i in afl-g++ afl-clang afl-clang++; do ln -sf afl-gcc $$i; done -afl-as: afl-as.c afl-as.h $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS) +afl-as: src/afl-as.c include/afl-as.h $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) src/$@.c -o $@ $(LDFLAGS) ln -sf afl-as as -afl-common.o : afl-common.c - $(CC) $(CFLAGS) -c afl-common.c +afl-common.o : src/afl-common.c include/common.h + $(CC) $(CFLAGS) -c src/afl-common.c -afl-forkserver.o : afl-forkserver.c - $(CC) $(CFLAGS) -c afl-forkserver.c +afl-forkserver.o : src/afl-forkserver.c include/forkserver.h + $(CC) $(CFLAGS) -c src/afl-forkserver.c -afl-sharedmem.o : afl-sharedmem.c - $(CC) $(CFLAGS) -c afl-sharedmem.c +afl-sharedmem.o : src/afl-sharedmem.c include/sharedmem.h + $(CC) $(CFLAGS) -c src/afl-sharedmem.c -afl-fuzz: afl-fuzz.c afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS) $(PYFLAGS) +afl-fuzz: src/afl-fuzz.c afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) src/$@.c afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS) $(PYFLAGS) -afl-showmap: afl-showmap.c afl-common.o afl-sharedmem.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c afl-common.o afl-sharedmem.o -o $@ $(LDFLAGS) +afl-showmap: src/afl-showmap.c afl-common.o afl-sharedmem.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) src/$@.c afl-common.o afl-sharedmem.o -o $@ $(LDFLAGS) -afl-tmin: afl-tmin.c afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS) +afl-tmin: src/afl-tmin.c afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) src/$@.c afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS) -afl-analyze: afl-analyze.c afl-common.o afl-sharedmem.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c afl-common.o afl-sharedmem.o -o $@ $(LDFLAGS) +afl-analyze: src/afl-analyze.c afl-common.o afl-sharedmem.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) src/$@.c afl-common.o afl-sharedmem.o -o $@ $(LDFLAGS) -afl-gotcpu: afl-gotcpu.c $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) $@.c -o $@ $(LDFLAGS) +afl-gotcpu: src/afl-gotcpu.c $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) src/$@.c -o $@ $(LDFLAGS) ifndef AFL_NO_X86 diff --git a/TODO b/TODO index cc075abd..08dc1bb8 100644 --- a/TODO +++ b/TODO @@ -68,5 +68,5 @@ Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges. qemu_mode: - persistent mode patching the return address (WinAFL style) - - instrument only comparison with immediate values by default when using compcov + - instrument only comparison with immediate values by default when using compcov (done) diff --git a/alloc-inl.h b/include/alloc-inl.h similarity index 100% rename from alloc-inl.h rename to include/alloc-inl.h diff --git a/afl-as.h b/include/as.h similarity index 100% rename from afl-as.h rename to include/as.h diff --git a/afl-common.h b/include/common.h similarity index 100% rename from afl-common.h rename to include/common.h diff --git a/config.h b/include/config.h similarity index 100% rename from config.h rename to include/config.h diff --git a/debug.h b/include/debug.h similarity index 100% rename from debug.h rename to include/debug.h diff --git a/afl-forkserver.h b/include/forkserver.h similarity index 100% rename from afl-forkserver.h rename to include/forkserver.h diff --git a/hash.h b/include/hash.h similarity index 100% rename from hash.h rename to include/hash.h diff --git a/afl-sharedmem.h b/include/sharedmem.h similarity index 100% rename from afl-sharedmem.h rename to include/sharedmem.h diff --git a/types.h b/include/types.h similarity index 100% rename from types.h rename to include/types.h diff --git a/afl-analyze.c b/src/afl-analyze.c similarity index 99% rename from afl-analyze.c rename to src/afl-analyze.c index 18b7456d..e12f9194 100644 --- a/afl-analyze.c +++ b/src/afl-analyze.c @@ -26,8 +26,8 @@ #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "afl-sharedmem.h" -#include "afl-common.h" +#include "sharedmem.h" +#include "common.h" #include #include diff --git a/afl-as.c b/src/afl-as.c similarity index 99% rename from afl-as.c rename to src/afl-as.c index 94595f24..063e3bcd 100644 --- a/afl-as.c +++ b/src/afl-as.c @@ -35,7 +35,7 @@ #include "debug.h" #include "alloc-inl.h" -#include "afl-as.h" +#include "as.h" #include #include diff --git a/afl-common.c b/src/afl-common.c similarity index 100% rename from afl-common.c rename to src/afl-common.c diff --git a/afl-forkserver.c b/src/afl-forkserver.c similarity index 99% rename from afl-forkserver.c rename to src/afl-forkserver.c index 226175e1..0051f6b0 100644 --- a/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -1,7 +1,7 @@ #include "config.h" #include "types.h" #include "debug.h" -#include "afl-forkserver.h" +#include "forkserver.h" #include #include diff --git a/afl-fuzz.c b/src/afl-fuzz.c similarity index 99% rename from afl-fuzz.c rename to src/afl-fuzz.c index ec54cc85..0285a242 100644 --- a/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -33,9 +33,9 @@ #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "afl-sharedmem.h" -#include "afl-forkserver.h" -#include "afl-common.h" +#include "sharedmem.h" +#include "forkserver.h" +#include "common.h" #include #include diff --git a/afl-gcc.c b/src/afl-gcc.c similarity index 100% rename from afl-gcc.c rename to src/afl-gcc.c diff --git a/afl-gotcpu.c b/src/afl-gotcpu.c similarity index 100% rename from afl-gotcpu.c rename to src/afl-gotcpu.c diff --git a/afl-sharedmem.c b/src/afl-sharedmem.c similarity index 99% rename from afl-sharedmem.c rename to src/afl-sharedmem.c index 400a0a46..3fd38444 100644 --- a/afl-sharedmem.c +++ b/src/afl-sharedmem.c @@ -9,7 +9,7 @@ #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "afl-sharedmem.h" +#include "sharedmem.h" #include #include diff --git a/afl-showmap.c b/src/afl-showmap.c similarity index 99% rename from afl-showmap.c rename to src/afl-showmap.c index 96b7b5e0..66c77094 100644 --- a/afl-showmap.c +++ b/src/afl-showmap.c @@ -28,8 +28,8 @@ #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "afl-sharedmem.h" -#include "afl-common.h" +#include "sharedmem.h" +#include "common.h" #include #include diff --git a/afl-tmin.c b/src/afl-tmin.c similarity index 99% rename from afl-tmin.c rename to src/afl-tmin.c index e83b217d..663bb510 100644 --- a/afl-tmin.c +++ b/src/afl-tmin.c @@ -27,9 +27,9 @@ #include "debug.h" #include "alloc-inl.h" #include "hash.h" -#include "afl-forkserver.h" -#include "afl-sharedmem.h" -#include "afl-common.h" +#include "forkserver.h" +#include "sharedmem.h" +#include "common.h" #include #include diff --git a/test-instr.c b/src/test-instr.c similarity index 100% rename from test-instr.c rename to src/test-instr.c From b6f5e1635cbdcc3031c4af18ef3a877d2d7db77f Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 14:02:48 +0200 Subject: [PATCH 32/83] added afl++ patches authors to special thanks --- README.md | 8 +++++--- qemu_mode/patches/afl-qemu-cpu-inl.h | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 9ff7c24b..76bd98c8 100644 --- a/README.md +++ b/README.md @@ -559,8 +559,8 @@ Beyond this, see INSTALL for platform-specific tips. ## 15) Special thanks ------------------ -Many of the improvements to the original afl wouldn't be possible without -feedback, bug reports, or patches from: +Many of the improvements to the original afl and afl++ wouldn't be possible +without feedback, bug reports, or patches from: ``` Jann Horn Hanno Boeck @@ -602,7 +602,9 @@ feedback, bug reports, or patches from: Rene Freingruber Sergey Davidoff Sami Liedes Craig Young Andrzej Jackowski Daniel Hodson - Nathan Voss Dominik Maier + Nathan Voss Dominik Maier + Andrea Biondo Vincent Le Garrec + Khaled Yakdan Kuang-che Wu ``` Thank you! diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h index d7bb4d25..04d9007d 100644 --- a/qemu_mode/patches/afl-qemu-cpu-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-inl.h @@ -332,7 +332,7 @@ static void afl_wait_tsl(CPUState *cpu, int fd) { if (is_valid_addr(t.tb.pc)) { mmap_lock(); - tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, 0); + tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask); mmap_unlock(); } else { From 4adca18337a5036d93d78e0522bcaa066e0a85b9 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 15:04:27 +0200 Subject: [PATCH 33/83] afl-as is now alive --- include/{as.h => afl-as.h} | 0 src/afl-as.c | 2 +- src/test-instr.c => test-instr.c | 0 3 files changed, 1 insertion(+), 1 deletion(-) rename include/{as.h => afl-as.h} (100%) rename src/test-instr.c => test-instr.c (100%) diff --git a/include/as.h b/include/afl-as.h similarity index 100% rename from include/as.h rename to include/afl-as.h diff --git a/src/afl-as.c b/src/afl-as.c index 063e3bcd..94595f24 100644 --- a/src/afl-as.c +++ b/src/afl-as.c @@ -35,7 +35,7 @@ #include "debug.h" #include "alloc-inl.h" -#include "as.h" +#include "afl-as.h" #include #include diff --git a/src/test-instr.c b/test-instr.c similarity index 100% rename from src/test-instr.c rename to test-instr.c From 7338568125f4a3831079550294275ef18b603ab2 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 15:17:43 +0200 Subject: [PATCH 34/83] removed sepration lines from README --- README.md | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/README.md b/README.md index 76bd98c8..2bd31a54 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,6 @@ ## 1) Challenges of guided fuzzing -------------------------------- Fuzzing is one of the most powerful and proven strategies for identifying security issues in real-world software; it is responsible for the vast @@ -177,7 +176,6 @@ file for important caveats. ## 4) Instrumenting binary-only apps ---------------------------------- When source code is *NOT* available, the fuzzer offers experimental support for fast, on-the-fly instrumentation of black-box binaries. This is accomplished @@ -205,7 +203,6 @@ A more comprehensive description of these and other options can be found in ## 5) Power schedules ------------------- The power schedules were copied from Marcel Böhme's excellent AFLfast implementation and expands on the ability to discover new paths and @@ -237,7 +234,6 @@ Computer and Communications Security (CCS'16): ## 6) Choosing initial test cases ------------------------------- To operate correctly, the fuzzer requires one or more starting file that contains a good example of the input data normally expected by the targeted @@ -259,7 +255,6 @@ exercise different code paths in the target binary. ## 7) Fuzzing binaries -------------------- The fuzzing process itself is carried out by the afl-fuzz utility. This program requires a read-only directory with initial test cases, a separate place to @@ -298,7 +293,6 @@ fuzzers - add the -d option to the command line. ## 8) Interpreting output ----------------------- See the [docs/status_screen.txt](docs/status_screen.txt) file for information on how to interpret the displayed stats and monitor the health of the process. Be @@ -360,7 +354,6 @@ see [http://lcamtuf.coredump.cx/afl/plot/](http://lcamtuf.coredump.cx/afl/plot/) ## 9) Parallelized fuzzing ------------------------ Every instance of afl-fuzz takes up roughly one core. This means that on multi-core systems, parallelization is necessary to fully utilize the hardware. @@ -373,7 +366,6 @@ last section of [docs/parallel_fuzzing.txt](docs/parallel_fuzzing.txt) for tips. ## 10) Fuzzer dictionaries ----------------------- By default, afl-fuzz mutation engine is optimized for compact data formats - say, images, multimedia, compressed data, regular expression syntax, or shell @@ -410,7 +402,6 @@ utility with AFL. For that, see [libtokencap/README.tokencap](libtokencap/README ## 11) Crash triage ----------------- The coverage-based grouping of crashes usually produces a small data set that can be quickly triaged manually or with a very simple GDB or Valgrind script. @@ -459,7 +450,6 @@ near the end of [docs/technical_details.txt](docs/technical_details.txt). ## 12) Going beyond crashes ------------------------- Fuzzing is a wonderful and underutilized technique for discovering non-crashing design and implementation errors, too. Quite a few interesting bugs have been @@ -484,7 +474,6 @@ shared with libfuzzer) or `#ifdef __AFL_COMPILER` (this one is just for AFL). ## 13) Common-sense risks ----------------------- Please keep in mind that, similarly to many other computationally-intensive tasks, fuzzing may put strain on your hardware and on the OS. In particular: @@ -515,7 +504,6 @@ tasks, fuzzing may put strain on your hardware and on the OS. In particular: ## 14) Known limitations & areas for improvement ---------------------------------------------- Here are some of the most important caveats for AFL: @@ -557,7 +545,6 @@ Beyond this, see INSTALL for platform-specific tips. ## 15) Special thanks ------------------- Many of the improvements to the original afl and afl++ wouldn't be possible without feedback, bug reports, or patches from: @@ -611,7 +598,6 @@ Thank you! ## 16) Contact ------------ Questions? Concerns? Bug reports? The contributors can be reached via [https://github.com/vanhauser-thc/AFLplusplus](https://github.com/vanhauser-thc/AFLplusplus) From 10df5ad0ac3dcff705f6932487fecbdaf690e1f0 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 27 Aug 2019 16:22:25 +0200 Subject: [PATCH 35/83] docu update --- README.md | 3 ++- TODO | 2 ++ docs/ChangeLog | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2bd31a54..14e1ae59 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,8 @@ Among others afl++ has, e.g. more performant llvm_mode, supporting llvm up to version 9, Qemu 3.1, more speed and crashfixes for Qemu, - laf-intel feature for Qemu (with libcompcov) and more. + laf-intel feature for Qemu (with libcompcov), better *BSD and Android + support and more. Additionally the following patches have been integrated: diff --git a/TODO b/TODO index 89e307cf..cb95f899 100644 --- a/TODO +++ b/TODO @@ -7,6 +7,8 @@ Roadmap 2.53d: afl-fuzz: - put mutator, scheduler, forkserver and input channels in individual files - reuse forkserver for showmap, afl-cmin, etc. + - custom mutator lib: example and readme + - env var to exclusively run the custom lib/py mutator gcc_plugin: - needs to be rewritten diff --git a/docs/ChangeLog b/docs/ChangeLog index dfebb68a..6d56d314 100644 --- a/docs/ChangeLog +++ b/docs/ChangeLog @@ -18,6 +18,7 @@ Version ++2.53d (dev): ---------------------- - llvm 9 is now supported (still needs testing) + - Android is now supported (thank to JoeyJiao!) - still need to modify the Makefile though - fix building qemu on some Ubuntus (thanks to floyd!) - custom mutator by a loaded library is now supported (thanks to kyakdan!) - fix for a few features to support different map sized than 2^16 From 17228d27e52e3aaee168d7a3f06fd2afff57dcd0 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 17:02:26 +0200 Subject: [PATCH 36/83] config.h and types.h symlink in root --- config.h | 1 + types.h | 1 + 2 files changed, 2 insertions(+) create mode 120000 config.h create mode 120000 types.h diff --git a/config.h b/config.h new file mode 120000 index 00000000..046ab52a --- /dev/null +++ b/config.h @@ -0,0 +1 @@ +include/config.h \ No newline at end of file diff --git a/types.h b/types.h new file mode 120000 index 00000000..67149a67 --- /dev/null +++ b/types.h @@ -0,0 +1 @@ +include/types.h \ No newline at end of file From d7b707a71c187b2613e025bd337c19946ba9109e Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 17:04:23 +0200 Subject: [PATCH 37/83] symlink include/debug.h to root --- debug.h | 1 + 1 file changed, 1 insertion(+) create mode 120000 debug.h diff --git a/debug.h b/debug.h new file mode 120000 index 00000000..a00dd92f --- /dev/null +++ b/debug.h @@ -0,0 +1 @@ +include/debug.h \ No newline at end of file From cd259fe1180e39fe311abe48f32675f9feb72cb2 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 17:26:04 +0200 Subject: [PATCH 38/83] add custom format wrapping clang-format --- .clang-format | 148 ++++++++++++++++++++++++++++++++++++++++++++++ .custom-format.py | 71 ++++++++++++++++++++++ 2 files changed, 219 insertions(+) create mode 100644 .clang-format create mode 100755 .custom-format.py diff --git a/.clang-format b/.clang-format new file mode 100644 index 00000000..e506392d --- /dev/null +++ b/.clang-format @@ -0,0 +1,148 @@ +--- +Language: Cpp +# BasedOnStyle: Google +AccessModifierOffset: -1 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: true +AlignEscapedNewlines: Left +AlignOperands: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: false +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: Yes +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: BeforeColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: true +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^' + Priority: 2 + - Regex: '^<.*\.h>' + Priority: 1 + - Regex: '^<.*' + Priority: 2 + - Regex: '.*' + Priority: 3 +IncludeIsMainRegex: '([-_](test|unittest))?$' +IndentCaseLabels: true +IndentPPDirectives: None +IndentWidth: 2 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Never +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Right +RawStringFormats: + - Language: Cpp + Delimiters: + - cc + - CC + - cpp + - Cpp + - CPP + - 'c++' + - 'C++' + CanonicalDelimiter: '' + BasedOnStyle: google + - Language: TextProto + Delimiters: + - pb + - PB + - proto + - PROTO + EnclosingFunctions: + - EqualsProto + - EquivToProto + - PARSE_PARTIAL_TEXT_PROTO + - PARSE_TEST_PROTO + - PARSE_TEXT_PROTO + - ParseTextOrDie + - ParseTextProtoOrDie + CanonicalDelimiter: '' + BasedOnStyle: google +ReflowComments: true +SortIncludes: true +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Auto +TabWidth: 8 +UseTab: Never +... + diff --git a/.custom-format.py b/.custom-format.py new file mode 100755 index 00000000..b4a2c48a --- /dev/null +++ b/.custom-format.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 + +import subprocess +import sys + +with open(".clang-format") as f: + fmt = f.read() + +COLUMN_LIMIT = 80 +for line in fmt.split("\n"): + line = line.split(":") + if line[0].strip() == "ColumnLimit": + COLUMN_LIMIT = int(line[1].strip()) + +def custom_format(filename): + p = subprocess.Popen(['clang-format-7', filename], stdout=subprocess.PIPE) + src, _ = p.communicate() + src = str(src, "utf-8") + + macro_indent = 0 + + out = "" + for line in src.split("\n"): + if line.startswith("#"): + i = macro_indent + if line.startswith("#end") and macro_indent > 0: + macro_indent -= 1 + i -= 1 + elif line.startswith("#el") and macro_indent > 0: + i -= 1 + elif line.startswith("#if") and not (line.startswith("#ifndef") and line.endswith("_H")): + macro_indent += 1 + r = "#" + (i * " ") + line[1:] + if i != 0 and line.endswith("\\"): + r = r[:-1] + while r[-1].isspace() and len(r) != (len(line)-1): + r = r[:-1] + r += "\\" + if len(r) <= COLUMN_LIMIT: + line = r + + elif "/*" in line and not line.strip().startswith("/*") and line.endswith("*/") and len(line) < (COLUMN_LIMIT-2): + cmt_start = line.rfind("/*") + line = line[:cmt_start] + " " * (COLUMN_LIMIT-2 - len(line)) + line[cmt_start:] + + out += line + "\n" + + return (out) + +args = sys.argv[1:] +if len(args) == 0: + print ("Usage: ./format.py [-i] ") + print () + print (" The -i option, if specified, let the script to modify in-place") + print (" the source files. By default the results are written to stdout.") + print() + exit(1) + +in_place = False +if args[0] == "-i": + in_place = True + args = args[1:] + +for filename in args: + code = custom_format(filename) + if in_place: + with open(filename, "w") as f: + f.write(code) + else: + print(code) + From 0d001c09c3cf23f087623e0fec438ed3a4237ce9 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 27 Aug 2019 18:49:58 +0200 Subject: [PATCH 39/83] fix to compile llvm_mode --- llvm_mode/afl-clang-fast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c index a4bb7539..28a9a853 100644 --- a/llvm_mode/afl-clang-fast.c +++ b/llvm_mode/afl-clang-fast.c @@ -26,7 +26,7 @@ #include "../config.h" #include "../types.h" #include "../debug.h" -#include "../alloc-inl.h" +#include "../include/alloc-inl.h" #include #include From bae398a9a4d14ccddde48591ba94d5c03970d741 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 19:22:53 +0200 Subject: [PATCH 40/83] -I include in makefiles --- debug.h | 1 - libdislocator/Makefile | 2 +- libdislocator/libdislocator.so.c | 4 ++-- libtokencap/Makefile | 2 +- llvm_mode/LLVMInsTrim.so.cc | 4 ++-- llvm_mode/Makefile | 4 ++-- llvm_mode/afl-clang-fast.c | 8 ++++---- llvm_mode/afl-llvm-pass.so.cc | 4 ++-- llvm_mode/afl-llvm-rt.o.c | 4 ++-- qemu_mode/libcompcov/Makefile | 2 +- qemu_mode/libcompcov/libcompcov.so.c | 4 ++-- 11 files changed, 19 insertions(+), 20 deletions(-) delete mode 120000 debug.h diff --git a/debug.h b/debug.h deleted file mode 120000 index a00dd92f..00000000 --- a/debug.h +++ /dev/null @@ -1 +0,0 @@ -include/debug.h \ No newline at end of file diff --git a/libdislocator/Makefile b/libdislocator/Makefile index a4116780..236667ec 100644 --- a/libdislocator/Makefile +++ b/libdislocator/Makefile @@ -18,7 +18,7 @@ HELPER_PATH = $(PREFIX)/lib/afl VERSION = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2) -CFLAGS ?= -O3 -funroll-loops +CFLAGS ?= -O3 -funroll-loops -I ../include/ CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign all: libdislocator.so diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c index 043480a6..71620b17 100644 --- a/libdislocator/libdislocator.so.c +++ b/libdislocator/libdislocator.so.c @@ -25,8 +25,8 @@ #include #include -#include "../config.h" -#include "../types.h" +#include "config.h" +#include "types.h" #ifndef PAGE_SIZE # define PAGE_SIZE 4096 diff --git a/libtokencap/Makefile b/libtokencap/Makefile index a464f76d..ec4c8f95 100644 --- a/libtokencap/Makefile +++ b/libtokencap/Makefile @@ -18,7 +18,7 @@ HELPER_PATH = $(PREFIX)/lib/afl VERSION = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2) -CFLAGS ?= -O3 -funroll-loops +CFLAGS ?= -O3 -funroll-loops -I ../include/ CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign all: libtokencap.so diff --git a/llvm_mode/LLVMInsTrim.so.cc b/llvm_mode/LLVMInsTrim.so.cc index 0a15680d..95b52d48 100644 --- a/llvm_mode/LLVMInsTrim.so.cc +++ b/llvm_mode/LLVMInsTrim.so.cc @@ -24,8 +24,8 @@ #include #include -#include "../config.h" -#include "../debug.h" +#include "config.h" +#include "debug.h" #include "MarkNodes.h" diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index 160a8fe6..e51803c8 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -40,7 +40,7 @@ ifeq "$(LLVM_MAJOR)" "9" endif CFLAGS ?= -O3 -funroll-loops -CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign \ +CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I ../include/ \ -DAFL_PATH=\"$(HELPER_PATH)\" -DBIN_PATH=\"$(BIN_PATH)\" \ -DVERSION=\"$(VERSION)\" ifdef AFL_TRACE_PC @@ -48,7 +48,7 @@ ifdef AFL_TRACE_PC endif CXXFLAGS ?= -O3 -funroll-loops -CXXFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g \ +CXXFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -I ../include/ \ -DVERSION=\"$(VERSION)\" -Wno-variadic-macros CLANG_CFL = `$(LLVM_CONFIG) --cxxflags` -Wl,-znodelete -fno-rtti -fpic $(CXXFLAGS) diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c index 28a9a853..1b810edf 100644 --- a/llvm_mode/afl-clang-fast.c +++ b/llvm_mode/afl-clang-fast.c @@ -23,10 +23,10 @@ #define AFL_MAIN -#include "../config.h" -#include "../types.h" -#include "../debug.h" -#include "../include/alloc-inl.h" +#include "config.h" +#include "types.h" +#include "debug.h" +#include "alloc-inl.h" #include #include diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc index cfeff968..3fe7f83e 100644 --- a/llvm_mode/afl-llvm-pass.so.cc +++ b/llvm_mode/afl-llvm-pass.so.cc @@ -24,8 +24,8 @@ #define AFL_LLVM_PASS -#include "../config.h" -#include "../debug.h" +#include "config.h" +#include "debug.h" #include #include diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index debde204..1564ae9d 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -19,8 +19,8 @@ */ -#include "../config.h" -#include "../types.h" +#include "config.h" +#include "types.h" #include #include diff --git a/qemu_mode/libcompcov/Makefile b/qemu_mode/libcompcov/Makefile index c984588b..a1f4e31f 100644 --- a/qemu_mode/libcompcov/Makefile +++ b/qemu_mode/libcompcov/Makefile @@ -18,7 +18,7 @@ HELPER_PATH = $(PREFIX)/lib/afl VERSION = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2) -CFLAGS ?= -O3 -funroll-loops +CFLAGS ?= -O3 -funroll-loops -I ../../include/ CFLAGS += -Wall -Wno-unused-result -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign LDFLAGS += -ldl diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c index 582230db..44045d39 100644 --- a/qemu_mode/libcompcov/libcompcov.so.c +++ b/qemu_mode/libcompcov/libcompcov.so.c @@ -27,8 +27,8 @@ #include #include -#include "../../types.h" -#include "../../config.h" +#include "types.h" +#include "config.h" #include "pmparser.h" From aca63d4986540ca6c51cc90321f54509aee2ce45 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 19:35:44 +0200 Subject: [PATCH 41/83] custom format now search for the best clang-format version --- .clang-format | 2 +- .custom-format.py | 28 ++++++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/.clang-format b/.clang-format index e506392d..f691d684 100644 --- a/.clang-format +++ b/.clang-format @@ -124,7 +124,7 @@ RawStringFormats: CanonicalDelimiter: '' BasedOnStyle: google ReflowComments: true -SortIncludes: true +SortIncludes: false SortUsingDeclarations: true SpaceAfterCStyleCast: false SpaceAfterTemplateKeyword: true diff --git a/.custom-format.py b/.custom-format.py index b4a2c48a..a73d92ab 100755 --- a/.custom-format.py +++ b/.custom-format.py @@ -2,10 +2,34 @@ import subprocess import sys +import os with open(".clang-format") as f: fmt = f.read() +CLANG_FORMAT_BIN = os.getenv("CLANG_FORMAT_BIN") +if CLANG_FORMAT_BIN is None: + p = subprocess.Popen(["clang-format", "--version"], stdout=subprocess.PIPE) + o, _ = p.communicate() + o = str(o, "utf-8") + o = o[len("clang-format version "):].strip() + o = o[:o.find(".")] + o = int(o) + if o < 7: + if subprocess.call(['which', 'clang-format-7'], stdout=subprocess.PIPE) == 0: + CLANG_FORMAT_BIN = 'clang-format-7' + elif subprocess.call(['which', 'clang-format-8'], stdout=subprocess.PIPE) == 0: + CLANG_FORMAT_BIN = 'clang-format-8' + elif subprocess.call(['which', 'clang-format-9'], stdout=subprocess.PIPE) == 0: + CLANG_FORMAT_BIN = 'clang-format-9' + elif subprocess.call(['which', 'clang-format-10'], stdout=subprocess.PIPE) == 0: + CLANG_FORMAT_BIN = 'clang-format-10' + else: + print ("clang-format 7 or above is needed. Aborted.") + exit(1) + else: + CLANG_FORMAT_BIN = 'clang-format' + COLUMN_LIMIT = 80 for line in fmt.split("\n"): line = line.split(":") @@ -13,7 +37,7 @@ for line in fmt.split("\n"): COLUMN_LIMIT = int(line[1].strip()) def custom_format(filename): - p = subprocess.Popen(['clang-format-7', filename], stdout=subprocess.PIPE) + p = subprocess.Popen([CLANG_FORMAT_BIN, filename], stdout=subprocess.PIPE) src, _ = p.communicate() src = str(src, "utf-8") @@ -28,7 +52,7 @@ def custom_format(filename): i -= 1 elif line.startswith("#el") and macro_indent > 0: i -= 1 - elif line.startswith("#if") and not (line.startswith("#ifndef") and line.endswith("_H")): + elif line.startswith("#if") and not (line.startswith("#ifndef") and (line.endswith("_H") or line.endswith("H_"))): macro_indent += 1 r = "#" + (i * " ") + line[1:] if i != 0 and line.endswith("\\"): From bec9b307db299b586c2574031d3cc1a491dc00c3 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 20:57:52 +0200 Subject: [PATCH 42/83] neverzero qemu for x86/x86_64 --- config.h | 4 ++++ qemu_mode/patches/afl-qemu-translate-inl.h | 18 ++++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/config.h b/config.h index 37a2a794..29c33d46 100644 --- a/config.h +++ b/config.h @@ -339,6 +339,10 @@ #define CTEST_CORE_TRG_MS 1000 #define CTEST_BUSY_CYCLES (10 * 1000 * 1000) +/* Enable NeverZero counters in QEMU mode */ + +#define AFL_QEMU_NOT_ZERO + /* Uncomment this to use inferior block-coverage-based instrumentation. Note that you need to recompile the target binary for this to have any effect: */ diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h index bfb2897e..9c3580e5 100644 --- a/qemu_mode/patches/afl-qemu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-translate-inl.h @@ -42,11 +42,25 @@ extern abi_ulong afl_start_code, afl_end_code; void tcg_gen_afl_maybe_log_call(target_ulong cur_loc); -void afl_maybe_log(target_ulong cur_loc) { +void afl_maybe_log(target_ulong cur_loc) { static __thread abi_ulong prev_loc; - afl_area_ptr[cur_loc ^ prev_loc]++; + register target_ulong afl_idx = cur_loc ^ prev_loc; + +#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) + asm volatile ( + "incb (%0, %1, 1)\n" + "seto %%al\n" + "addb %%al, (%0, %1, 1)\n" + : /* no out */ + : "r" (afl_area_ptr), "r" (afl_idx) + : "memory", "eax" + ); +#else + afl_area_ptr[afl_idx]++; +#endif + prev_loc = cur_loc >> 1; } From c5e0b29a22a126a90942fd31a85fcfe8486fa67c Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 27 Aug 2019 21:10:51 +0200 Subject: [PATCH 43/83] neverzero for unicorn_mode --- qemu_mode/patches/afl-qemu-translate-inl.h | 2 +- unicorn_mode/build_unicorn_support.sh | 2 +- unicorn_mode/patches/afl-unicorn-cpu-inl.h | 16 +++++++++++++++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h index 9c3580e5..f82d1217 100644 --- a/qemu_mode/patches/afl-qemu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-translate-inl.h @@ -46,7 +46,7 @@ void afl_maybe_log(target_ulong cur_loc) { static __thread abi_ulong prev_loc; - register target_ulong afl_idx = cur_loc ^ prev_loc; + register uintptr_t afl_idx = cur_loc ^ prev_loc; #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) asm volatile ( diff --git a/unicorn_mode/build_unicorn_support.sh b/unicorn_mode/build_unicorn_support.sh index 9dcf6773..3219e54c 100755 --- a/unicorn_mode/build_unicorn_support.sh +++ b/unicorn_mode/build_unicorn_support.sh @@ -144,7 +144,7 @@ echo "[+] Configuration complete." echo "[*] Attempting to build Unicorn (fingers crossed!)..." -UNICORN_QEMU_FLAGS='--python=python2' make || exit 1 +UNICORN_QEMU_FLAGS='--python=python2' make -j `nproc` || exit 1 echo "[+] Build process successful!" diff --git a/unicorn_mode/patches/afl-unicorn-cpu-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-inl.h index 892c3f72..ed422725 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-inl.h @@ -241,7 +241,21 @@ static inline void afl_maybe_log(unsigned long cur_loc) { // DEBUG //printf("cur_loc = 0x%lx\n", cur_loc); - afl_area_ptr[cur_loc ^ prev_loc]++; + register uintptr_t afl_idx = cur_loc ^ prev_loc; + +#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) + asm volatile ( + "incb (%0, %1, 1)\n" + "seto %%al\n" + "addb %%al, (%0, %1, 1)\n" + : /* no out */ + : "r" (afl_area_ptr), "r" (afl_idx) + : "memory", "eax" + ); +#else + afl_area_ptr[afl_idx]++; +#endif + prev_loc = cur_loc >> 1; } From 80f175daac0e2dac12aad908abb19316e85552c8 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 28 Aug 2019 13:45:37 +0200 Subject: [PATCH 44/83] unicorn compcov for x86 --- .gitignore | 1 + unicorn_mode/build_unicorn_support.sh | 7 +- unicorn_mode/patches/afl-unicorn-cpu-inl.h | 52 +++--- .../patches/afl-unicorn-cpu-translate-inl.h | 62 +++++++ unicorn_mode/patches/afl-unicorn-tcg-op-inl.h | 56 ++++++ .../patches/afl-unicorn-tcg-runtime-inl.h | 88 +++++++++ unicorn_mode/patches/compcov.diff | 113 ++++++++++++ unicorn_mode/samples/compcov_x64/COMPILE.md | 20 +++ .../samples/compcov_x64/compcov_target.bin | Bin 0 -> 86 bytes .../samples/compcov_x64/compcov_target.c | 28 +++ .../samples/compcov_x64/compcov_target.elf | Bin 0 -> 5728 bytes .../compcov_x64/compcov_test_harness.py | 170 ++++++++++++++++++ .../compcov_x64/sample_inputs/sample1.bin | 1 + 13 files changed, 572 insertions(+), 26 deletions(-) create mode 100644 unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h create mode 100644 unicorn_mode/patches/afl-unicorn-tcg-op-inl.h create mode 100644 unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h create mode 100644 unicorn_mode/patches/compcov.diff create mode 100644 unicorn_mode/samples/compcov_x64/COMPILE.md create mode 100644 unicorn_mode/samples/compcov_x64/compcov_target.bin create mode 100644 unicorn_mode/samples/compcov_x64/compcov_target.c create mode 100755 unicorn_mode/samples/compcov_x64/compcov_target.elf create mode 100644 unicorn_mode/samples/compcov_x64/compcov_test_harness.py create mode 100644 unicorn_mode/samples/compcov_x64/sample_inputs/sample1.bin diff --git a/.gitignore b/.gitignore index bb3c82eb..2ee40f62 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ qemu_mode/qemu-3.1.0 qemu_mode/qemu-3.1.0.tar.xz unicorn_mode/unicorn unicorn_mode/unicorn-* +unicorn_mode/*.tar.gz diff --git a/unicorn_mode/build_unicorn_support.sh b/unicorn_mode/build_unicorn_support.sh index 3219e54c..2c0fe4b1 100755 --- a/unicorn_mode/build_unicorn_support.sh +++ b/unicorn_mode/build_unicorn_support.sh @@ -127,12 +127,13 @@ tar xzf "$ARCHIVE" -C ./unicorn --strip-components=1 || exit 1 echo "[+] Unpacking successful." -rm -rf "$ARCHIVE" || exit 1 +#rm -rf "$ARCHIVE" || exit 1 echo "[*] Applying patches..." -cp patches/afl-unicorn-cpu-inl.h unicorn || exit 1 -patch -p1 --directory unicorn uc); \ afl_forkserver(env); \ afl_first_instr = 1; \ } \ - afl_maybe_log(tb->pc); \ + afl_maybe_log(env->uc, tb->pc); \ } while (0) /* We use one additional file descriptor to relay "needs translation" @@ -66,24 +66,16 @@ #define TSL_FD (FORKSRV_FD - 1) -/* This is equivalent to afl-as.h: */ - -static unsigned char *afl_area_ptr; - /* Set in the child process in forkserver mode: */ static unsigned char afl_fork_child; static unsigned int afl_forksrv_pid; -/* Instrumentation ratio: */ - -static unsigned int afl_inst_rms = MAP_SIZE; - /* Function declarations. */ -static void afl_setup(void); +static void afl_setup(struct uc_struct* uc); static void afl_forkserver(CPUArchState*); -static inline void afl_maybe_log(unsigned long); +static inline void afl_maybe_log(struct uc_struct* uc, unsigned long); static void afl_wait_tsl(CPUArchState*, int); static void afl_request_tsl(target_ulong, target_ulong, uint64_t); @@ -105,7 +97,7 @@ struct afl_tsl { /* Set up SHM region and initialize other stuff. */ -static void afl_setup(void) { +static void afl_setup(struct uc_struct* uc) { char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO"); @@ -121,21 +113,35 @@ static void afl_setup(void) { if (r > 100) r = 100; if (!r) r = 1; - afl_inst_rms = MAP_SIZE * r / 100; + uc->afl_inst_rms = MAP_SIZE * r / 100; + } else { + + uc->afl_inst_rms = MAP_SIZE; + } if (id_str) { shm_id = atoi(id_str); - afl_area_ptr = shmat(shm_id, NULL, 0); + uc->afl_area_ptr = shmat(shm_id, NULL, 0); - if (afl_area_ptr == (void*)-1) exit(1); + if (uc->afl_area_ptr == (void*)-1) exit(1); /* With AFL_INST_RATIO set to a low value, we want to touch the bitmap so that the parent doesn't give up on us. */ - if (inst_r) afl_area_ptr[0] = 1; + if (inst_r) uc->afl_area_ptr[0] = 1; + } + + /* Maintain for compatibility */ + if (getenv("AFL_QEMU_COMPCOV")) { + + uc->afl_compcov_level = 1; + } + if (getenv("AFL_COMPCOV_LEVEL")) { + + uc->afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); } } @@ -145,7 +151,7 @@ static void afl_forkserver(CPUArchState *env) { static unsigned char tmp[4]; - if (!afl_area_ptr) return; + if (!env->uc->afl_area_ptr) return; /* Tell the parent that we're alive. If the parent doesn't want to talk, assume that we're not running in forkserver mode. */ @@ -208,7 +214,7 @@ static void afl_forkserver(CPUArchState *env) { /* The equivalent of the tuple logging routine from afl-as.h. */ -static inline void afl_maybe_log(unsigned long cur_loc) { +static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { static __thread unsigned long prev_loc; @@ -217,7 +223,7 @@ static inline void afl_maybe_log(unsigned long cur_loc) { // MODIFIED FOR UNICORN MODE -> We want to log all addresses, // so the checks for 'start < addr < end' are removed - if(!afl_area_ptr) + if(!uc->afl_area_ptr) return; // DEBUG @@ -236,7 +242,7 @@ static inline void afl_maybe_log(unsigned long cur_loc) { // DEBUG //printf("afl_inst_rms = 0x%lx\n", afl_inst_rms); - if (cur_loc >= afl_inst_rms) return; + if (cur_loc >= uc->afl_inst_rms) return; // DEBUG //printf("cur_loc = 0x%lx\n", cur_loc); @@ -249,11 +255,11 @@ static inline void afl_maybe_log(unsigned long cur_loc) { "seto %%al\n" "addb %%al, (%0, %1, 1)\n" : /* no out */ - : "r" (afl_area_ptr), "r" (afl_idx) + : "r" (uc->afl_area_ptr), "r" (afl_idx) : "memory", "eax" ); #else - afl_area_ptr[afl_idx]++; + uc->afl_area_ptr[afl_idx]++; #endif prev_loc = cur_loc >> 1; diff --git a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h new file mode 100644 index 00000000..9c7a14dc --- /dev/null +++ b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h @@ -0,0 +1,62 @@ +/* + american fuzzy lop - high-performance binary-only instrumentation + ----------------------------------------------------------------- + + Written by Andrew Griffiths and + Michal Zalewski + + TCG instrumentation and block chaining support by Andrea Biondo + + Adapted for afl-unicorn by Dominik Maier + + Idea & design very much by Andrew Griffiths. + + Copyright 2015, 2016 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This code is a shim patched into the separately-distributed source + code of Unicorn 1.0.1. It leverages the built-in QEMU tracing functionality + to implement AFL-style instrumentation and to take care of the remaining + parts of the AFL fork server logic. + + The resulting QEMU binary is essentially a standalone instrumentation + tool; for an example of how to leverage it for other purposes, you can + have a look at afl-showmap.c. + + */ + +#include "../../config.h" + +static void afl_gen_compcov(TCGContext *s, uint64_t cur_loc, TCGv_i64 arg1, + TCGv_i64 arg2, TCGMemOp ot, int is_imm) { + + if (!s->uc->afl_compcov_level || !s->uc->afl_area_ptr) + return; + + if (!is_imm && s->uc->afl_compcov_level < 2) + return; + + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc &= MAP_SIZE - 1; + + if (cur_loc >= s->uc->afl_inst_rms) return; + + switch (ot) { + case MO_64: + gen_afl_compcov_log_64(s, cur_loc, arg1, arg2); + break; + case MO_32: + gen_afl_compcov_log_32(s, cur_loc, arg1, arg2); + break; + case MO_16: + gen_afl_compcov_log_16(s, cur_loc, arg1, arg2); + break; + default: + return; + } +} diff --git a/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h new file mode 100644 index 00000000..d5a29cce --- /dev/null +++ b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h @@ -0,0 +1,56 @@ +/* + american fuzzy lop - high-performance binary-only instrumentation + ----------------------------------------------------------------- + + Written by Andrew Griffiths and + Michal Zalewski + + TCG instrumentation and block chaining support by Andrea Biondo + + Adapted for afl-unicorn by Dominik Maier + + Idea & design very much by Andrew Griffiths. + + Copyright 2015, 2016 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This code is a shim patched into the separately-distributed source + code of Unicorn 1.0.1. It leverages the built-in QEMU tracing functionality + to implement AFL-style instrumentation and to take care of the remaining + parts of the AFL fork server logic. + + The resulting QEMU binary is essentially a standalone instrumentation + tool; for an example of how to leverage it for other purposes, you can + have a look at afl-showmap.c. + + */ + +static inline void gen_afl_compcov_log_16(TCGContext *tcg_ctx, uint64_t cur_loc, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); + TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); + gen_helper_afl_compcov_log_16(tcg_ctx, tuc, tcur_loc, arg1, arg2); +} + +static inline void gen_afl_compcov_log_32(TCGContext *tcg_ctx, uint64_t cur_loc, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); + TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); + gen_helper_afl_compcov_log_32(tcg_ctx, tuc, tcur_loc, arg1, arg2); +} + +static inline void gen_afl_compcov_log_64(TCGContext *tcg_ctx, uint64_t cur_loc, + TCGv_i64 arg1, TCGv_i64 arg2) +{ + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); + TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); + gen_helper_afl_compcov_log_64(tcg_ctx, tuc, tcur_loc, arg1, arg2); +} + diff --git a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h new file mode 100644 index 00000000..9e56484b --- /dev/null +++ b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h @@ -0,0 +1,88 @@ +/* + american fuzzy lop - high-performance binary-only instrumentation + ----------------------------------------------------------------- + + Written by Andrew Griffiths and + Michal Zalewski + + TCG instrumentation and block chaining support by Andrea Biondo + + Adapted for afl-unicorn by Dominik Maier + + Idea & design very much by Andrew Griffiths. + + Copyright 2015, 2016 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This code is a shim patched into the separately-distributed source + code of Unicorn 1.0.1. It leverages the built-in QEMU tracing functionality + to implement AFL-style instrumentation and to take care of the remaining + parts of the AFL fork server logic. + + The resulting QEMU binary is essentially a standalone instrumentation + tool; for an example of how to leverage it for other purposes, you can + have a look at afl-showmap.c. + + */ + +#include "uc_priv.h" + +void HELPER(afl_compcov_log_16)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, + uint64_t arg2) { + + struct uc_struct* uc = uc_ptr; + + if ((arg1 & 0xff) == (arg2 & 0xff)) { + uc->afl_area_ptr[cur_loc]++; + } +} + +void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, + uint64_t arg2) { + + struct uc_struct* uc = uc_ptr; + + if ((arg1 & 0xff) == (arg2 & 0xff)) { + uc->afl_area_ptr[cur_loc]++; + if ((arg1 & 0xffff) == (arg2 & 0xffff)) { + uc->afl_area_ptr[cur_loc +1]++; + if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { + uc->afl_area_ptr[cur_loc +2]++; + } + } + } +} + +void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, + uint64_t arg2) { + + struct uc_struct* uc = uc_ptr; + + if ((arg1 & 0xff) == (arg2 & 0xff)) { + uc->afl_area_ptr[cur_loc]++; + if ((arg1 & 0xffff) == (arg2 & 0xffff)) { + uc->afl_area_ptr[cur_loc +1]++; + if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { + uc->afl_area_ptr[cur_loc +2]++; + if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) { + uc->afl_area_ptr[cur_loc +3]++; + if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) { + uc->afl_area_ptr[cur_loc +4]++; + if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) { + uc->afl_area_ptr[cur_loc +5]++; + if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) { + uc->afl_area_ptr[cur_loc +6]++; + } + } + } + } + } + } + } +} + diff --git a/unicorn_mode/patches/compcov.diff b/unicorn_mode/patches/compcov.diff new file mode 100644 index 00000000..8ec867d1 --- /dev/null +++ b/unicorn_mode/patches/compcov.diff @@ -0,0 +1,113 @@ +diff --git a/include/uc_priv.h b/include/uc_priv.h +index 22f494e..1aa7b3a 100644 +--- a/include/uc_priv.h ++++ b/include/uc_priv.h +@@ -245,6 +245,12 @@ struct uc_struct { + uint32_t target_page_align; + uint64_t next_pc; // save next PC for some special cases + bool hook_insert; // insert new hook at begin of the hook list (append by default) ++ ++#ifdef UNICORN_AFL ++ unsigned char *afl_area_ptr; ++ int afl_compcov_level; ++ unsigned int afl_inst_rms; ++#endif + }; + + // Metadata stub for the variable-size cpu context used with uc_context_*() +diff --git a/qemu/target-i386/translate.c b/qemu/target-i386/translate.c +index 36fae09..196d346 100644 +--- a/qemu/target-i386/translate.c ++++ b/qemu/target-i386/translate.c +@@ -33,6 +33,12 @@ + + #include "uc_priv.h" + ++#if defined(UNICORN_AFL) ++#include "../../afl-unicorn-cpu-translate-inl.h" ++#else ++#define afl_gen_compcov(a,b,c,d,e,f) do {} while (0) ++#endif ++ + #define PREFIX_REPZ 0x01 + #define PREFIX_REPNZ 0x02 + #define PREFIX_LOCK 0x04 +@@ -1555,6 +1561,7 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) + case OP_SUBL: + tcg_gen_mov_tl(tcg_ctx, cpu_cc_srcT, *cpu_T[0]); + tcg_gen_sub_tl(tcg_ctx, *cpu_T[0], *cpu_T[0], *cpu_T[1]); ++ afl_gen_compcov(tcg_ctx, s->pc, *cpu_T[0], *cpu_T[1], ot, d == OR_EAX); + gen_op_st_rm_T0_A0(s, ot, d); + gen_op_update2_cc(tcg_ctx); + set_cc_op(s, CC_OP_SUBB + ot); +@@ -1582,6 +1589,7 @@ static void gen_op(DisasContext *s, int op, TCGMemOp ot, int d) + tcg_gen_mov_tl(tcg_ctx, cpu_cc_src, *cpu_T[1]); + tcg_gen_mov_tl(tcg_ctx, cpu_cc_srcT, *cpu_T[0]); + tcg_gen_sub_tl(tcg_ctx, cpu_cc_dst, *cpu_T[0], *cpu_T[1]); ++ afl_gen_compcov(tcg_ctx, s->pc, *cpu_T[0], *cpu_T[1], ot, d == OR_EAX); + set_cc_op(s, CC_OP_SUBB + ot); + break; + } +diff --git a/qemu/tcg-runtime.c b/qemu/tcg-runtime.c +index 21b022a..14d7891 100644 +--- a/qemu/tcg-runtime.c ++++ b/qemu/tcg-runtime.c +@@ -31,9 +31,14 @@ + + #define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \ + dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)); ++#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \ ++ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), dh_ctype(t4)); + + #include "tcg-runtime.h" + ++#ifdef UNICORN_AFL ++#include "../afl-unicorn-tcg-runtime-inl.h" ++#endif + + /* 32-bit helpers */ + +diff --git a/qemu/tcg/tcg-op.h b/qemu/tcg/tcg-op.h +index 38b7dd9..c5a9af9 100644 +--- a/qemu/tcg/tcg-op.h ++++ b/qemu/tcg/tcg-op.h +@@ -27,6 +27,10 @@ + + int gen_new_label(TCGContext *); + ++#ifdef UNICORN_AFL ++#include "../../afl-unicorn-tcg-op-inl.h" ++#endif ++ + static inline void gen_uc_tracecode(TCGContext *tcg_ctx, int32_t size, int32_t type, void *uc, uint64_t pc) + { + TCGv_i32 tsize = tcg_const_i32(tcg_ctx, size); +diff --git a/qemu/tcg/tcg-runtime.h b/qemu/tcg/tcg-runtime.h +index 23a0c37..90b993c 100644 +--- a/qemu/tcg/tcg-runtime.h ++++ b/qemu/tcg/tcg-runtime.h +@@ -14,3 +14,9 @@ DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) + + DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64) + DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64) ++ ++#ifdef UNICORN_AFL ++DEF_HELPER_FLAGS_4(afl_compcov_log_16, 0, void, ptr, i64, i64, i64) ++DEF_HELPER_FLAGS_4(afl_compcov_log_32, 0, void, ptr, i64, i64, i64) ++DEF_HELPER_FLAGS_4(afl_compcov_log_64, 0, void, ptr, i64, i64, i64) ++#endif +diff --git a/qemu/unicorn_common.h b/qemu/unicorn_common.h +index 8dcbb3e..11e18b4 100644 +--- a/qemu/unicorn_common.h ++++ b/qemu/unicorn_common.h +@@ -84,6 +84,10 @@ static inline void uc_common_init(struct uc_struct* uc) + + if (!uc->release) + uc->release = release_common; ++ ++#ifdef UNICORN_AFL ++ uc->afl_area_ptr = 0; ++#endif + } + + #endif diff --git a/unicorn_mode/samples/compcov_x64/COMPILE.md b/unicorn_mode/samples/compcov_x64/COMPILE.md new file mode 100644 index 00000000..db488d30 --- /dev/null +++ b/unicorn_mode/samples/compcov_x64/COMPILE.md @@ -0,0 +1,20 @@ +Compiling compcov_target.c +========================== + +compcov_target.c was compiled without optimization, position-independent, +and without standard libraries using the following command line: + +``` +gcc -o compcov_target.elf compcov_target.c -fPIC -O0 -nostdlib +``` + +The .text section from the resulting ELF binary was then extracted to create +the raw binary blob that is loaded and emulated by compcov_test_harness.py: + +``` +objcopy -O binary --only-section=.text compcov_target.elf compcov_target.bin +``` + +Note that the output of this is padded with nulls for 16-byte alignment. This is +important when emulating it, as NOPs will be added after the return of main() +as necessary. diff --git a/unicorn_mode/samples/compcov_x64/compcov_target.bin b/unicorn_mode/samples/compcov_x64/compcov_target.bin new file mode 100644 index 0000000000000000000000000000000000000000..091bf1db5ae383ed061927128d887bcd1d4ee5f9 GIT binary patch literal 86 zcmWIb=zQvN-1P?og8_p_x9g8?2HSt9)~+t)-N66^{M#5hT)(~61j%?bA7JSQstE + */ + +// Magic address where mutated data will be placed +#define DATA_ADDRESS 0x00300000 + +int main(void) { + unsigned int *data_buf = (unsigned int *) DATA_ADDRESS; + + if (data_buf[0] == 0xabadcafe) { + // Cause an 'invalid read' crash if data[0..3] == '\x01\x02\x03\x04' + unsigned char invalid_read = *(unsigned char *) 0x00000000; + } else if (data_buf[1] == data_buf[2] + 0x4141) { + // Cause an 'invalid read' crash if (0x10 < data[0] < 0x20) and data[1] > data[2] + unsigned char invalid_read = *(unsigned char *) 0x00000000; + } + + return 0; +} diff --git a/unicorn_mode/samples/compcov_x64/compcov_target.elf b/unicorn_mode/samples/compcov_x64/compcov_target.elf new file mode 100755 index 0000000000000000000000000000000000000000..7015fb46661776654c4bba59f7926d39b6ba6da8 GIT binary patch literal 5728 zcmeHL&ubG=5S}EpP1V|16oiTd^xzM=rdndGs0lSSqSdxoQAC!ciA|u{)MTU4gFg^N z$;G38f+B)m#G}-M;K@Is7f+r%s0E>MX7_#D?Al@lJqY_C{pNjdX5Y;0zE@`Mq|b~7 z0|CMa(J_+QtDwy!)xtub5*U({pl<%ViF&A$=N%SjuP1HbReLO|M+3u(4>5{eLha+l z?Me>_4+Yn$wUywaJeUWNC~D2cPJE5_T2scTB>)#}nt-qq_x{8V@kwm*JFnvN;x1m3 z0Tf*s$ zi$pQXG}DVsI=b}X#q)a48?oB0=P1c!WX+GTSyEmUo5gq|Iu z-4d_7qdm{{&Btj0{J2Pvz={S&tslhP2qgVBMMx-X2V7 zu_0}6kInyn`w&-8sN>tULlkuCU^gFf^6KV02>EpL>j-&w^PNt=!p(O%b+nrgQ&iR0 zZob>;3%Gf_)n!)RZwGy8Ei=o~O1!jB0^Bu0zX96)mhh{e2bo8W4qZXb;=6enpGQ%kV=MGsLqjQRLp`l zW7M@=y~MqQQl6|?F#SjP8Pb_>{sgpK_C4b%;PM4<2wQSH02 z8cDX7I|%kTS3ulLa<7T+um_)EPL2xfasGhbc)jy+^<3PO_&$O7PFc+wUVB*=4*Rdx z53{Z9o*QIA{tR9AIQPIryE4m&AGE=O_>SuW=NJ951*M4p3$Vx8y}|3^Kl({Z-8tO* z9Kse5e>)=fcGZ~LU_mKv3pKKYLC?e-DX&pq8vvW$H}Z)O3pgA=^0?C-Xd IEPL&L0s<_}SpWb4 literal 0 HcmV?d00001 diff --git a/unicorn_mode/samples/compcov_x64/compcov_test_harness.py b/unicorn_mode/samples/compcov_x64/compcov_test_harness.py new file mode 100644 index 00000000..5698cbc8 --- /dev/null +++ b/unicorn_mode/samples/compcov_x64/compcov_test_harness.py @@ -0,0 +1,170 @@ +""" + Simple test harness for AFL's Unicorn Mode. + + This loads the compcov_target.bin binary (precompiled as MIPS code) into + Unicorn's memory map for emulation, places the specified input into + compcov_target's buffer (hardcoded to be at 0x300000), and executes 'main()'. + If any crashes occur during emulation, this script throws a matching signal + to tell AFL that a crash occurred. + + Run under AFL as follows: + + $ cd /unicorn_mode/samples/simple/ + $ ../../../afl-fuzz -U -m none -i ./sample_inputs -o ./output -- python compcov_test_harness.py @@ +""" + +import argparse +import os +import signal + +from unicorn import * +from unicorn.x86_const import * + +# Path to the file containing the binary to emulate +BINARY_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'compcov_target.bin') + +# Memory map for the code to be tested +CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded +CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb) +STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen) +STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen) +DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed +DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data + +try: + # If Capstone is installed then we'll dump disassembly, otherwise just dump the binary. + from capstone import * + cs = Cs(CS_ARCH_X86, CS_MODE_64) + def unicorn_debug_instruction(uc, address, size, user_data): + mem = uc.mem_read(address, size) + for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(bytes(mem), size): + print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr)) +except ImportError: + def unicorn_debug_instruction(uc, address, size, user_data): + print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size)) + +def unicorn_debug_block(uc, address, size, user_data): + print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size)) + +def unicorn_debug_mem_access(uc, access, address, size, value, user_data): + if access == UC_MEM_WRITE: + print(" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value)) + else: + print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size)) + +def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data): + if access == UC_MEM_WRITE_UNMAPPED: + print(" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value)) + else: + print(" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size)) + +def force_crash(uc_error): + # This function should be called to indicate to AFL that a crash occurred during emulation. + # Pass in the exception received from Uc.emu_start() + mem_errors = [ + UC_ERR_READ_UNMAPPED, UC_ERR_READ_PROT, UC_ERR_READ_UNALIGNED, + UC_ERR_WRITE_UNMAPPED, UC_ERR_WRITE_PROT, UC_ERR_WRITE_UNALIGNED, + UC_ERR_FETCH_UNMAPPED, UC_ERR_FETCH_PROT, UC_ERR_FETCH_UNALIGNED, + ] + if uc_error.errno in mem_errors: + # Memory error - throw SIGSEGV + os.kill(os.getpid(), signal.SIGSEGV) + elif uc_error.errno == UC_ERR_INSN_INVALID: + # Invalid instruction - throw SIGILL + os.kill(os.getpid(), signal.SIGILL) + else: + # Not sure what happened - throw SIGABRT + os.kill(os.getpid(), signal.SIGABRT) + +def main(): + + parser = argparse.ArgumentParser(description="Test harness for compcov_target.bin") + parser.add_argument('input_file', type=str, help="Path to the file containing the mutated input to load") + parser.add_argument('-d', '--debug', default=False, action="store_true", help="Enables debug tracing") + args = parser.parse_args() + + # Instantiate a MIPS32 big endian Unicorn Engine instance + uc = Uc(UC_ARCH_X86, UC_MODE_64) + + if args.debug: + uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block) + uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction) + uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access) + uc.hook_add(UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID, unicorn_debug_mem_invalid_access) + + #--------------------------------------------------- + # Load the binary to emulate and map it into memory + + print("Loading data input from {}".format(args.input_file)) + binary_file = open(BINARY_FILE, 'rb') + binary_code = binary_file.read() + binary_file.close() + + # Apply constraints to the mutated input + if len(binary_code) > CODE_SIZE_MAX: + print("Binary code is too large (> {} bytes)".format(CODE_SIZE_MAX)) + return + + # Write the mutated command into the data buffer + uc.mem_map(CODE_ADDRESS, CODE_SIZE_MAX) + uc.mem_write(CODE_ADDRESS, binary_code) + + # Set the program counter to the start of the code + start_address = CODE_ADDRESS # Address of entry point of main() + end_address = CODE_ADDRESS + 0x55 # Address of last instruction in main() + uc.reg_write(UC_X86_REG_RIP, start_address) + + #----------------- + # Setup the stack + + uc.mem_map(STACK_ADDRESS, STACK_SIZE) + uc.reg_write(UC_X86_REG_RSP, STACK_ADDRESS + STACK_SIZE) + + #----------------------------------------------------- + # Emulate 1 instruction to kick off AFL's fork server + # THIS MUST BE DONE BEFORE LOADING USER DATA! + # If this isn't done every single run, the AFL fork server + # will not be started appropriately and you'll get erratic results! + # It doesn't matter what this returns with, it just has to execute at + # least one instruction in order to get the fork server started. + + # Execute 1 instruction just to startup the forkserver + print("Starting the AFL forkserver by executing 1 instruction") + try: + uc.emu_start(uc.reg_read(UC_X86_REG_RIP), 0, 0, count=1) + except UcError as e: + print("ERROR: Failed to execute a single instruction (error: {})!".format(e)) + return + + #----------------------------------------------- + # Load the mutated input and map it into memory + + # Load the mutated input from disk + print("Loading data input from {}".format(args.input_file)) + input_file = open(args.input_file, 'rb') + input = input_file.read() + input_file.close() + + # Apply constraints to the mutated input + if len(input) > DATA_SIZE_MAX: + print("Test input is too long (> {} bytes)".format(DATA_SIZE_MAX)) + return + + # Write the mutated command into the data buffer + uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX) + uc.mem_write(DATA_ADDRESS, input) + + #------------------------------------------------------------ + # Emulate the code, allowing it to process the mutated input + + print("Executing until a crash or execution reaches 0x{0:016x}".format(end_address)) + try: + result = uc.emu_start(uc.reg_read(UC_X86_REG_RIP), end_address, timeout=0, count=0) + except UcError as e: + print("Execution failed with error: {}".format(e)) + force_crash(e) + + print("Done.") + +if __name__ == "__main__": + main() diff --git a/unicorn_mode/samples/compcov_x64/sample_inputs/sample1.bin b/unicorn_mode/samples/compcov_x64/sample_inputs/sample1.bin new file mode 100644 index 00000000..445c7245 --- /dev/null +++ b/unicorn_mode/samples/compcov_x64/sample_inputs/sample1.bin @@ -0,0 +1 @@ +00000000000000000000000000000000 \ No newline at end of file From 733c8e4c349562fd02d0238be486ecbdf0640fd0 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 28 Aug 2019 18:42:21 +0200 Subject: [PATCH 45/83] better neverzero with adc + neverzero for compcov --- .../patches/afl-qemu-cpu-translate-inl.h | 36 +++++++++++++------ qemu_mode/patches/afl-qemu-translate-inl.h | 3 +- unicorn_mode/patches/afl-unicorn-cpu-inl.h | 3 +- .../patches/afl-unicorn-tcg-runtime-inl.h | 36 +++++++++++++------ 4 files changed, 52 insertions(+), 26 deletions(-) diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h index 4716c2ac..f85a86d7 100644 --- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h @@ -45,11 +45,25 @@ extern u8 afl_compcov_level; void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2); +#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) +# define INC_AFL_AREA(loc) \ + asm volatile ( \ + "incb (%0, %1, 1)\n" \ + "adc $0, (%0, %1, 1)\n" \ + : /* no out */ \ + : "r" (afl_area_ptr), "r" (loc) \ + : "memory", "eax" \ + ) +#else +# define INC_AFL_AREA(loc) \ + afl_area_ptr[loc]++ +#endif + static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { if ((arg1 & 0xff) == (arg2 & 0xff)) { - afl_area_ptr[cur_loc]++; + INC_AFL_AREA(cur_loc); } } @@ -57,11 +71,11 @@ static void afl_compcov_log_32(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { if ((arg1 & 0xff) == (arg2 & 0xff)) { - afl_area_ptr[cur_loc]++; + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - afl_area_ptr[cur_loc +1]++; + INC_AFL_AREA(cur_loc +1); if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - afl_area_ptr[cur_loc +2]++; + INC_AFL_AREA(cur_loc +2); } } } @@ -71,19 +85,19 @@ static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { if ((arg1 & 0xff) == (arg2 & 0xff)) { - afl_area_ptr[cur_loc]++; + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - afl_area_ptr[cur_loc +1]++; + INC_AFL_AREA(cur_loc +1); if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - afl_area_ptr[cur_loc +2]++; + INC_AFL_AREA(cur_loc +2); if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) { - afl_area_ptr[cur_loc +3]++; + INC_AFL_AREA(cur_loc +3); if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) { - afl_area_ptr[cur_loc +4]++; + INC_AFL_AREA(cur_loc +4); if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) { - afl_area_ptr[cur_loc +5]++; + INC_AFL_AREA(cur_loc +5); if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) { - afl_area_ptr[cur_loc +6]++; + INC_AFL_AREA(cur_loc +6); } } } diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h index f82d1217..48d05179 100644 --- a/qemu_mode/patches/afl-qemu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-translate-inl.h @@ -51,8 +51,7 @@ void afl_maybe_log(target_ulong cur_loc) { #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) asm volatile ( "incb (%0, %1, 1)\n" - "seto %%al\n" - "addb %%al, (%0, %1, 1)\n" + "adc $0, (%0, %1, 1)\n" : /* no out */ : "r" (afl_area_ptr), "r" (afl_idx) : "memory", "eax" diff --git a/unicorn_mode/patches/afl-unicorn-cpu-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-inl.h index 28400357..187a0ce6 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-inl.h @@ -252,8 +252,7 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) asm volatile ( "incb (%0, %1, 1)\n" - "seto %%al\n" - "addb %%al, (%0, %1, 1)\n" + "adc $0, (%0, %1, 1)\n" : /* no out */ : "r" (uc->afl_area_ptr), "r" (afl_idx) : "memory", "eax" diff --git a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h index 9e56484b..e59d7b15 100644 --- a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h +++ b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h @@ -32,13 +32,27 @@ #include "uc_priv.h" +#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) +# define INC_AFL_AREA(loc) \ + asm volatile ( \ + "incb (%0, %1, 1)\n" \ + "adc $0, (%0, %1, 1)\n" \ + : /* no out */ \ + : "r" (uc->afl_area_ptr), "r" (loc) \ + : "memory", "eax" \ + ) +#else +# define INC_AFL_AREA(loc) \ + uc->afl_area_ptr[loc]++ +#endif + void HELPER(afl_compcov_log_16)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, uint64_t arg2) { struct uc_struct* uc = uc_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { - uc->afl_area_ptr[cur_loc]++; + INC_AFL_AREA(cur_loc); } } @@ -48,11 +62,11 @@ void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, struct uc_struct* uc = uc_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { - uc->afl_area_ptr[cur_loc]++; + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - uc->afl_area_ptr[cur_loc +1]++; + INC_AFL_AREA(cur_loc +1); if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - uc->afl_area_ptr[cur_loc +2]++; + INC_AFL_AREA(cur_loc +2); } } } @@ -64,19 +78,19 @@ void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, struct uc_struct* uc = uc_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { - uc->afl_area_ptr[cur_loc]++; + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - uc->afl_area_ptr[cur_loc +1]++; + INC_AFL_AREA(cur_loc +1); if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - uc->afl_area_ptr[cur_loc +2]++; + INC_AFL_AREA(cur_loc +2); if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) { - uc->afl_area_ptr[cur_loc +3]++; + INC_AFL_AREA(cur_loc +3); if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) { - uc->afl_area_ptr[cur_loc +4]++; + INC_AFL_AREA(cur_loc +4); if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) { - uc->afl_area_ptr[cur_loc +5]++; + INC_AFL_AREA(cur_loc +5); if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) { - uc->afl_area_ptr[cur_loc +6]++; + INC_AFL_AREA(cur_loc +6); } } } From 892513708bb5f68b15610fe0c74b892d4421c8cd Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 28 Aug 2019 19:07:19 +0200 Subject: [PATCH 46/83] solved MAP_SIZE overflow --- qemu_mode/patches/afl-qemu-cpu-translate-inl.h | 2 +- unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h index f85a86d7..c0caeefc 100644 --- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h @@ -134,7 +134,7 @@ static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2, } cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); - cur_loc &= MAP_SIZE - 1; + cur_loc &= MAP_SIZE - 7; if (cur_loc >= afl_inst_rms) return; diff --git a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h index 9c7a14dc..7e8f47c9 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h @@ -42,7 +42,7 @@ static void afl_gen_compcov(TCGContext *s, uint64_t cur_loc, TCGv_i64 arg1, return; cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); - cur_loc &= MAP_SIZE - 1; + cur_loc &= MAP_SIZE - 7; if (cur_loc >= s->uc->afl_inst_rms) return; From 3f2a317af09982a47340593b224a10b79a81d303 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Thu, 29 Aug 2019 03:06:24 +0200 Subject: [PATCH 47/83] Fixed SIGSEV due to wrong pointer size --- qemu_mode/patches/afl-qemu-cpu-translate-inl.h | 2 +- qemu_mode/patches/afl-qemu-translate-inl.h | 2 +- unicorn_mode/patches/afl-unicorn-cpu-inl.h | 2 +- unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h index c0caeefc..e91e9ffa 100644 --- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h @@ -49,7 +49,7 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, # define INC_AFL_AREA(loc) \ asm volatile ( \ "incb (%0, %1, 1)\n" \ - "adc $0, (%0, %1, 1)\n" \ + "adcb $0, (%0, %1, 1)\n" \ : /* no out */ \ : "r" (afl_area_ptr), "r" (loc) \ : "memory", "eax" \ diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h index 48d05179..a33e17b7 100644 --- a/qemu_mode/patches/afl-qemu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-translate-inl.h @@ -51,7 +51,7 @@ void afl_maybe_log(target_ulong cur_loc) { #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) asm volatile ( "incb (%0, %1, 1)\n" - "adc $0, (%0, %1, 1)\n" + "adcb $0, (%0, %1, 1)\n" : /* no out */ : "r" (afl_area_ptr), "r" (afl_idx) : "memory", "eax" diff --git a/unicorn_mode/patches/afl-unicorn-cpu-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-inl.h index 187a0ce6..ff194696 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-inl.h @@ -252,7 +252,7 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) asm volatile ( "incb (%0, %1, 1)\n" - "adc $0, (%0, %1, 1)\n" + "adcb $0, (%0, %1, 1)\n" : /* no out */ : "r" (uc->afl_area_ptr), "r" (afl_idx) : "memory", "eax" diff --git a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h index e59d7b15..52cc1afb 100644 --- a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h +++ b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h @@ -36,7 +36,7 @@ # define INC_AFL_AREA(loc) \ asm volatile ( \ "incb (%0, %1, 1)\n" \ - "adc $0, (%0, %1, 1)\n" \ + "adcb $0, (%0, %1, 1)\n" \ : /* no out */ \ : "r" (uc->afl_area_ptr), "r" (loc) \ : "memory", "eax" \ From 132ad08885f95abfdcbafdf1fa33b3f12ac59538 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Thu, 29 Aug 2019 15:28:42 +0200 Subject: [PATCH 48/83] common header for qemu and unicorn --- qemu_mode/patches/afl-qemu-common.h | 52 +++++++++++++++++++ .../patches/afl-qemu-cpu-translate-inl.h | 16 +----- qemu_mode/patches/afl-qemu-translate-inl.h | 14 +---- unicorn_mode/patches/afl-unicorn-common.h | 50 ++++++++++++++++++ unicorn_mode/patches/afl-unicorn-cpu-inl.h | 30 ++--------- .../patches/afl-unicorn-tcg-runtime-inl.h | 21 ++------ 6 files changed, 113 insertions(+), 70 deletions(-) create mode 100644 qemu_mode/patches/afl-qemu-common.h create mode 100644 unicorn_mode/patches/afl-unicorn-common.h diff --git a/qemu_mode/patches/afl-qemu-common.h b/qemu_mode/patches/afl-qemu-common.h new file mode 100644 index 00000000..8013800d --- /dev/null +++ b/qemu_mode/patches/afl-qemu-common.h @@ -0,0 +1,52 @@ +/* + american fuzzy lop - high-performance binary-only instrumentation + ----------------------------------------------------------------- + + Written by Andrew Griffiths and + Michal Zalewski + + Idea & design very much by Andrew Griffiths. + + TCG instrumentation and block chaining support by Andrea Biondo + + + QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi + + + Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This code is a shim patched into the separately-distributed source + code of QEMU 3.1.0. It leverages the built-in QEMU tracing functionality + to implement AFL-style instrumentation and to take care of the remaining + parts of the AFL fork server logic. + + The resulting QEMU binary is essentially a standalone instrumentation + tool; for an example of how to leverage it for other purposes, you can + have a look at afl-showmap.c. + + */ + +#include "../../config.h" + +/* NeverZero */ + +#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) +# define INC_AFL_AREA(loc) \ + asm volatile ( \ + "incb (%0, %1, 1)\n" \ + "adcb $0, (%0, %1, 1)\n" \ + : /* no out */ \ + : "r" (afl_area_ptr), "r" (loc) \ + : "memory", "eax" \ + ) +#else +# define INC_AFL_AREA(loc) \ + afl_area_ptr[loc]++ +#endif + diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h index e91e9ffa..fc78e652 100644 --- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h @@ -32,7 +32,7 @@ */ -#include "../../config.h" +#include "afl-qemu-common.h" #include "tcg.h" #include "tcg-op.h" @@ -45,20 +45,6 @@ extern u8 afl_compcov_level; void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2); -#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) -# define INC_AFL_AREA(loc) \ - asm volatile ( \ - "incb (%0, %1, 1)\n" \ - "adcb $0, (%0, %1, 1)\n" \ - : /* no out */ \ - : "r" (afl_area_ptr), "r" (loc) \ - : "memory", "eax" \ - ) -#else -# define INC_AFL_AREA(loc) \ - afl_area_ptr[loc]++ -#endif - static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h index a33e17b7..d63c5167 100644 --- a/qemu_mode/patches/afl-qemu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-translate-inl.h @@ -32,7 +32,7 @@ */ -#include "../../config.h" +#include "afl-qemu-common.h" #include "tcg-op.h" /* Declared in afl-qemu-cpu-inl.h */ @@ -48,17 +48,7 @@ void afl_maybe_log(target_ulong cur_loc) { register uintptr_t afl_idx = cur_loc ^ prev_loc; -#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) - asm volatile ( - "incb (%0, %1, 1)\n" - "adcb $0, (%0, %1, 1)\n" - : /* no out */ - : "r" (afl_area_ptr), "r" (afl_idx) - : "memory", "eax" - ); -#else - afl_area_ptr[afl_idx]++; -#endif + INC_AFL_AREA(afl_idx); prev_loc = cur_loc >> 1; diff --git a/unicorn_mode/patches/afl-unicorn-common.h b/unicorn_mode/patches/afl-unicorn-common.h new file mode 100644 index 00000000..9a1b2a6c --- /dev/null +++ b/unicorn_mode/patches/afl-unicorn-common.h @@ -0,0 +1,50 @@ +/* + american fuzzy lop - high-performance binary-only instrumentation + ----------------------------------------------------------------- + + Written by Andrew Griffiths and + Michal Zalewski + + TCG instrumentation and block chaining support by Andrea Biondo + + Adapted for afl-unicorn by Dominik Maier + + Idea & design very much by Andrew Griffiths. + + Copyright 2015, 2016 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This code is a shim patched into the separately-distributed source + code of Unicorn 1.0.1. It leverages the built-in QEMU tracing functionality + to implement AFL-style instrumentation and to take care of the remaining + parts of the AFL fork server logic. + + The resulting QEMU binary is essentially a standalone instrumentation + tool; for an example of how to leverage it for other purposes, you can + have a look at afl-showmap.c. + + */ + +#include "../../config.h" + +/* NeverZero */ + +#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) +# define INC_AFL_AREA(loc) \ + asm volatile ( \ + "incb (%0, %1, 1)\n" \ + "adcb $0, (%0, %1, 1)\n" \ + : /* no out */ \ + : "r" (afl_area_ptr), "r" (loc) \ + : "memory", "eax" \ + ) +#else +# define INC_AFL_AREA(loc) \ + afl_area_ptr[loc]++ +#endif + diff --git a/unicorn_mode/patches/afl-unicorn-cpu-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-inl.h index ff194696..90937a17 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-inl.h @@ -33,7 +33,7 @@ #include #include #include -#include "../../config.h" +#include "afl-unicorn-common.h" /*************************** * VARIOUS AUXILIARY STUFF * @@ -218,17 +218,11 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { static __thread unsigned long prev_loc; - // DEBUG - //printf("IN AFL_MAYBE_LOG 0x%lx\n", cur_loc); + u8* afl_area_ptr = uc->afl_area_ptr; - // MODIFIED FOR UNICORN MODE -> We want to log all addresses, - // so the checks for 'start < addr < end' are removed - if(!uc->afl_area_ptr) + if(!afl_area_ptr) return; - // DEBUG - //printf("afl_area_ptr = %p\n", afl_area_ptr); - /* Looks like QEMU always maps to fixed locations, so ASAN is not a concern. Phew. But instruction addresses may be aligned. Let's mangle the value to get something quasi-uniform. */ @@ -239,27 +233,11 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { /* Implement probabilistic instrumentation by looking at scrambled block address. This keeps the instrumented locations stable across runs. */ - // DEBUG - //printf("afl_inst_rms = 0x%lx\n", afl_inst_rms); - if (cur_loc >= uc->afl_inst_rms) return; - // DEBUG - //printf("cur_loc = 0x%lx\n", cur_loc); - register uintptr_t afl_idx = cur_loc ^ prev_loc; -#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) - asm volatile ( - "incb (%0, %1, 1)\n" - "adcb $0, (%0, %1, 1)\n" - : /* no out */ - : "r" (uc->afl_area_ptr), "r" (afl_idx) - : "memory", "eax" - ); -#else - uc->afl_area_ptr[afl_idx]++; -#endif + INC_AFL_AREA(afl_idx); prev_loc = cur_loc >> 1; diff --git a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h index 52cc1afb..0019bbfa 100644 --- a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h +++ b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h @@ -31,25 +31,12 @@ */ #include "uc_priv.h" - -#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) -# define INC_AFL_AREA(loc) \ - asm volatile ( \ - "incb (%0, %1, 1)\n" \ - "adcb $0, (%0, %1, 1)\n" \ - : /* no out */ \ - : "r" (uc->afl_area_ptr), "r" (loc) \ - : "memory", "eax" \ - ) -#else -# define INC_AFL_AREA(loc) \ - uc->afl_area_ptr[loc]++ -#endif +#include "afl-unicorn-common.h" void HELPER(afl_compcov_log_16)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, uint64_t arg2) { - struct uc_struct* uc = uc_ptr; + u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); @@ -59,7 +46,7 @@ void HELPER(afl_compcov_log_16)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, uint64_t arg2) { - struct uc_struct* uc = uc_ptr; + u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); @@ -75,7 +62,7 @@ void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, uint64_t arg2) { - struct uc_struct* uc = uc_ptr; + u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); From 7b36afd5f16894257c92695d200e59eb51d08e1c Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 30 Aug 2019 11:38:33 +0200 Subject: [PATCH 49/83] modernize docs and readme for qemu and unicorn --- docs/unicorn_mode.txt | 109 --------------- qemu_mode/{README.qemu => README.md} | 31 ++--- qemu_mode/build_qemu_support.sh | 11 +- qemu_mode/libcompcov/libcompcov.so.c | 2 +- qemu_mode/patches/afl-qemu-common.h | 15 +- qemu_mode/patches/afl-qemu-cpu-inl.h | 15 +- .../patches/afl-qemu-cpu-translate-inl.h | 17 ++- qemu_mode/patches/afl-qemu-tcg-inl.h | 15 +- qemu_mode/patches/afl-qemu-translate-inl.h | 15 +- unicorn_mode/README.md | 130 +++++++++++++++--- unicorn_mode/build_unicorn_support.sh | 12 +- unicorn_mode/patches/afl-unicorn-common.h | 18 +-- unicorn_mode/patches/afl-unicorn-cpu-inl.h | 18 +-- .../patches/afl-unicorn-cpu-translate-inl.h | 18 +-- unicorn_mode/patches/afl-unicorn-tcg-op-inl.h | 18 +-- .../patches/afl-unicorn-tcg-runtime-inl.h | 18 +-- unicorn_mode/samples/compcov_x64/COMPILE.md | 3 +- .../samples/compcov_x64/compcov_target.c | 2 +- unicorn_mode/samples/simple/COMPILE.md | 5 +- 19 files changed, 227 insertions(+), 245 deletions(-) delete mode 100644 docs/unicorn_mode.txt rename qemu_mode/{README.qemu => README.md} (89%) diff --git a/docs/unicorn_mode.txt b/docs/unicorn_mode.txt deleted file mode 100644 index b691fff8..00000000 --- a/docs/unicorn_mode.txt +++ /dev/null @@ -1,109 +0,0 @@ -========================================================= -Unicorn-based binary-only instrumentation for afl-fuzz -========================================================= - -1) Introduction ---------------- - -The code in ./unicorn_mode allows you to build a standalone feature that -leverages the Unicorn Engine and allows callers to obtain instrumentation -output for black-box, closed-source binary code snippets. This mechanism -can be then used by afl-fuzz to stress-test targets that couldn't be built -with afl-gcc or used in QEMU mode, or with other extensions such as -TriforceAFL. - -There is a significant performance penalty compared to native AFL, -but at least we're able to use AFL on these binaries, right? - -The idea and much of the implementation comes from Nathan Voss . - -2) How to use -------------- - -Requirements: you need an installed python2 environment. - -*** Building AFL's Unicorn Mode *** - -First, make afl as usual. -Once that completes successfully you need to build and add in the Unicorn Mode -features: - - $ cd unicorn_mode - $ ./build_unicorn_support.sh - -NOTE: This script downloads a recent Unicorn Engine commit that has been tested -and is stable-ish from the Unicorn github page. If you are offline, you'll need -to hack up this script a little bit and supply your own copy of Unicorn's latest -stable release. It's not very hard, just check out the beginning of the -build_unicorn_support.sh script and adjust as necessary. - -Building Unicorn will take a little bit (~5-10 minutes). Once it completes -it automatically compiles a sample application and verify that it works. - -*** Fuzzing with Unicorn Mode *** - -To really use unicorn-mode effectively you need to prepare the following: - - * Relevant binary code to be fuzzed - * Knowledge of the memory map and good starting state - * Folder containing sample inputs to start fuzzing with - - Same ideas as any other AFL inputs - - Quality/speed of results will depend greatly on quality of starting - samples - - See AFL's guidance on how to create a sample corpus - * Unicorn-based test harness which: - - Adds memory map regions - - Loads binary code into memory - - Emulates at least one instruction* - - Yeah, this is lame. See 'Gotchas' section below for more info - - Loads and verifies data to fuzz from a command-line specified file - - AFL will provide mutated inputs by changing the file passed to - the test harness - - Presumably the data to be fuzzed is at a fixed buffer address - - If input constraints (size, invalid bytes, etc.) are known they - should be checked after the file is loaded. If a constraint - fails, just exit the test harness. AFL will treat the input as - 'uninteresting' and move on. - - Sets up registers and memory state for beginning of test - - Emulates the interested code from beginning to end - - If a crash is detected, the test harness must 'crash' by - throwing a signal (SIGSEGV, SIGKILL, SIGABORT, etc.) - -Once you have all those things ready to go you just need to run afl-fuzz in -'unicorn-mode' by passing in the '-U' flag: - - $ afl-fuzz -U -m none -i /path/to/inputs -o /path/to/results -- ./test_harness @@ - -The normal afl-fuzz command line format applies to everything here. Refer to -AFL's main documentation for more info about how to use afl-fuzz effectively. - -For a much clearer vision of what all of this looks like, please refer to the -sample provided in the 'unicorn_mode/samples' directory. There is also a blog -post that goes over the basics at: - -https://medium.com/@njvoss299/afl-unicorn-fuzzing-arbitrary-binary-code-563ca28936bf - -The 'helper_scripts' directory also contains several helper scripts that allow you -to dump context from a running process, load it, and hook heap allocations. For details -on how to use this check out the follow-up blog post to the one linked above. - -A example use of AFL-Unicorn mode is discussed in the Paper Unicorefuzz: -https://www.usenix.org/conference/woot19/presentation/maier - -3) Gotchas, feedback, bugs --------------------------- - -To make sure that AFL's fork server starts up correctly the Unicorn test -harness script must emulate at least one instruction before loading the -data that will be fuzzed from the input file. It doesn't matter what the -instruction is, nor if it is valid. This is an artifact of how the fork-server -is started and could likely be fixed with some clever re-arranging of the -patches applied to Unicorn. - -Running the build script builds Unicorn and its python bindings and installs -them on your system. This installation will supersede any existing Unicorn -installation with the patched afl-unicorn version. - -Refer to the unicorn_mode/samples/arm_example/arm_tester.c for an example -of how to do this properly! If you don't get this right, AFL will not -load any mutated inputs and your fuzzing will be useless! diff --git a/qemu_mode/README.qemu b/qemu_mode/README.md similarity index 89% rename from qemu_mode/README.qemu rename to qemu_mode/README.md index cd8559ad..610f6860 100644 --- a/qemu_mode/README.qemu +++ b/qemu_mode/README.md @@ -1,11 +1,8 @@ -========================================================= -High-performance binary-only instrumentation for afl-fuzz -========================================================= +# High-performance binary-only instrumentation for afl-fuzz (See ../docs/README for the general instruction manual.) -1) Introduction ---------------- +## 1) Introduction The code in this directory allows you to build a standalone feature that leverages the QEMU "user emulation" mode and allows callers to obtain @@ -20,8 +17,7 @@ The idea and much of the initial implementation comes from Andrew Griffiths. The actual implementation on QEMU 3 (shipped with afl++) is from Andrea Fioraldi. Special thanks to abiondo that re-enabled TCG chaining. -2) How to use -------------- +## 2) How to use The feature is implemented with a patch to QEMU 3.1.0. The simplest way to build it is to run ./build_qemu_support.sh. The script will download, @@ -48,16 +44,15 @@ Note: if you want the QEMU helper to be installed on your system for all users, you need to build it before issuing 'make install' in the parent directory. -3) Options ----------- +## 3) Options There is ./libcompcov/ which implements laf-intel (splitting memcmp, strncmp, etc. to make these conditions easier solvable by afl-fuzz). Highly recommended. -The option that enables QEMU CompareCoverage is QEMU_COMPCOV_LEVEL. -QEMU_COMPCOV_LEVEL=1 is to instrument comparisons with only immediate -values / read-only memory. QEMU_COMPCOV_LEVEL=2 instruments all +The option that enables QEMU CompareCoverage is AFL_COMPCOV_LEVEL. +AFL_COMPCOV_LEVEL=1 is to instrument comparisons with only immediate +values / read-only memory. AFL_COMPCOV_LEVEL=2 instruments all comparison instructions and memory comparison functions when libcompcov is preloaded. Comparison instructions are currently instrumented only on the x86 and x86_64 targets. @@ -68,8 +63,7 @@ opened (e.g. way after command line parsing and config file loading, etc) which can be a huge speed improvement. Note that the specified address must be an address of a basic block. -4) Notes on linking -------------------- +## 4) Notes on linking The feature is supported only on Linux. Supporting BSD may amount to porting the changes made to linux-user/elfload.c and applying them to @@ -90,8 +84,7 @@ practice, this means two things: Setting AFL_INST_LIBS=1 can be used to circumvent the .text detection logic and instrument every basic block encountered. -5) Benchmarking ---------------- +## 5) Benchmarking If you want to compare the performance of the QEMU instrumentation with that of afl-gcc compiled code against the same target, you need to build the @@ -106,8 +99,7 @@ Comparative measurements of execution speed or instrumentation coverage will be fairly meaningless if the optimization levels or instrumentation scopes don't match. -6) Gotchas, feedback, bugs --------------------------- +## 6) Gotchas, feedback, bugs If you need to fix up checksums or do other cleanup on mutated test cases, see experimental/post_library/ for a viable solution. @@ -128,8 +120,7 @@ with -march=core2, can help. Beyond that, this is an early-stage mechanism, so fields reports are welcome. You can send them to . -7) Alternatives: static rewriting ---------------------------------- +## 7) Alternatives: static rewriting Statically rewriting binaries just once, instead of attempting to translate them at run time, can be a faster alternative. That said, static rewriting is diff --git a/qemu_mode/build_qemu_support.sh b/qemu_mode/build_qemu_support.sh index 78ad5680..35f5b8ca 100755 --- a/qemu_mode/build_qemu_support.sh +++ b/qemu_mode/build_qemu_support.sh @@ -3,10 +3,17 @@ # american fuzzy lop - QEMU build script # -------------------------------------- # -# Written by Andrew Griffiths and -# Michal Zalewski +# Originally written by Andrew Griffiths and +# Michal Zalewski +# +# TCG instrumentation and block chaining support by Andrea Biondo +# +# +# QEMU 3.1.0 port, TCG thread-safety, CompareCoverage and NeverZero +# counters by Andrea Fioraldi # # Copyright 2015, 2016, 2017 Google Inc. All rights reserved. +# Copyright 2019 AFLplusplus Project. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c index 92e4dbaa..0ccda927 100644 --- a/qemu_mode/libcompcov/libcompcov.so.c +++ b/qemu_mode/libcompcov/libcompcov.so.c @@ -5,7 +5,7 @@ Written and maintained by Andrea Fioraldi - Copyright 2019 Andrea Fioraldi. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/qemu_mode/patches/afl-qemu-common.h b/qemu_mode/patches/afl-qemu-common.h index 8013800d..c475cb58 100644 --- a/qemu_mode/patches/afl-qemu-common.h +++ b/qemu_mode/patches/afl-qemu-common.h @@ -1,19 +1,18 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - high-performance binary-only instrumentation + ------------------------------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski - - Idea & design very much by Andrew Griffiths. + Originally written by Andrew Griffiths and + Michal Zalewski TCG instrumentation and block chaining support by Andrea Biondo - QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi - + QEMU 3.1.0 port, TCG thread-safety, CompareCoverage and NeverZero + counters by Andrea Fioraldi Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h index 04d9007d..4ad31b60 100644 --- a/qemu_mode/patches/afl-qemu-cpu-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-inl.h @@ -1,19 +1,18 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - high-performance binary-only instrumentation + ------------------------------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski - - Idea & design very much by Andrew Griffiths. + Originally written by Andrew Griffiths and + Michal Zalewski TCG instrumentation and block chaining support by Andrea Biondo - QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi - + QEMU 3.1.0 port, TCG thread-safety, CompareCoverage and NeverZero + counters by Andrea Fioraldi Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h index fc78e652..09ecb9d2 100644 --- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h @@ -1,19 +1,18 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - high-performance binary-only instrumentation + ------------------------------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski - - Idea & design very much by Andrew Griffiths. + Originally written by Andrew Griffiths and + Michal Zalewski TCG instrumentation and block chaining support by Andrea Biondo - - QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi - + + QEMU 3.1.0 port, TCG thread-safety, CompareCoverage and NeverZero + counters by Andrea Fioraldi Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h index ff90d1b9..a9c53b8c 100644 --- a/qemu_mode/patches/afl-qemu-tcg-inl.h +++ b/qemu_mode/patches/afl-qemu-tcg-inl.h @@ -1,19 +1,18 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - high-performance binary-only instrumentation + ------------------------------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski - - Idea & design very much by Andrew Griffiths. + Originally written by Andrew Griffiths and + Michal Zalewski TCG instrumentation and block chaining support by Andrea Biondo - QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi - + QEMU 3.1.0 port, TCG thread-safety, CompareCoverage and NeverZero + counters by Andrea Fioraldi Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h index d63c5167..ffe43dba 100644 --- a/qemu_mode/patches/afl-qemu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-translate-inl.h @@ -1,19 +1,18 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - high-performance binary-only instrumentation + ------------------------------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski - - Idea & design very much by Andrew Griffiths. + Originally written by Andrew Griffiths and + Michal Zalewski TCG instrumentation and block chaining support by Andrea Biondo - QEMU 3.1.0 port, TCG thread-safety and CompareCoverage by Andrea Fioraldi - + QEMU 3.1.0 port, TCG thread-safety, CompareCoverage and NeverZero + counters by Andrea Fioraldi Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/unicorn_mode/README.md b/unicorn_mode/README.md index 9ee975ef..ea3e3c9b 100644 --- a/unicorn_mode/README.md +++ b/unicorn_mode/README.md @@ -1,23 +1,119 @@ -``` - __ _ _ - __ _ / _| | _ _ _ __ (_) ___ ___ _ __ _ __ - / _` | |_| |___| | | | '_ \| |/ __/ _ \| '__| '_ \ -| (_| | _| |___| |_| | | | | | (_| (_) | | | | | | - \__,_|_| |_| \__,_|_| |_|_|\___\___/|_| |_| |_| - -``` +# Unicorn-based binary-only instrumentation for afl-fuzz -afl-unicorn lets you fuzz any piece of binary that can be emulated by -[Unicorn Engine](http://www.unicorn-engine.org/). +The idea and much of the original implementation comes from Nathan Voss . -Requirements: Python2 +The port to afl++ if by Dominik Maier . -For the full readme please see docs/unicorn_mode.txt +The CompareCoverage and NeverZero counters features by Andrea Fioraldi . -For an in-depth description of what this is, how to install it, and how to use -it check out this [blog post](https://medium.com/@njvoss299/afl-unicorn-fuzzing-arbitrary-binary-code-563ca28936bf). +## 1) Introduction -For general help with AFL, please refer to the documents in the ./docs/ directory. +The code in ./unicorn_mode allows you to build a standalone feature that +leverages the Unicorn Engine and allows callers to obtain instrumentation +output for black-box, closed-source binary code snippets. This mechanism +can be then used by afl-fuzz to stress-test targets that couldn't be built +with afl-gcc or used in QEMU mode, or with other extensions such as +TriforceAFL. -Created by Nathan Voss, originally funded by -[Battelle](https://www.battelle.org/cyber). +There is a significant performance penalty compared to native AFL, +but at least we're able to use AFL on these binaries, right? + +## 2) How to use + +Requirements: you need an installed python2 environment. + +### Building AFL's Unicorn Mode + +First, make afl++ as usual. +Once that completes successfully you need to build and add in the Unicorn Mode +features: + + $ cd unicorn_mode + $ ./build_unicorn_support.sh + +NOTE: This script downloads a Unicorn Engine commit that has been tested +and is stable-ish from the Unicorn github page. If you are offline, you'll need +to hack up this script a little bit and supply your own copy of Unicorn's latest +stable release. It's not very hard, just check out the beginning of the +build_unicorn_support.sh script and adjust as necessary. + +Building Unicorn will take a little bit (~5-10 minutes). Once it completes +it automatically compiles a sample application and verify that it works. + +### Fuzzing with Unicorn Mode + +To really use unicorn-mode effectively you need to prepare the following: + + * Relevant binary code to be fuzzed + * Knowledge of the memory map and good starting state + * Folder containing sample inputs to start fuzzing with + + Same ideas as any other AFL inputs + + Quality/speed of results will depend greatly on quality of starting + samples + + See AFL's guidance on how to create a sample corpus + * Unicorn-based test harness which: + + Adds memory map regions + + Loads binary code into memory + + Emulates at least one instruction* + + Yeah, this is lame. See 'Gotchas' section below for more info + + Loads and verifies data to fuzz from a command-line specified file + + AFL will provide mutated inputs by changing the file passed to + the test harness + + Presumably the data to be fuzzed is at a fixed buffer address + + If input constraints (size, invalid bytes, etc.) are known they + should be checked after the file is loaded. If a constraint + fails, just exit the test harness. AFL will treat the input as + 'uninteresting' and move on. + + Sets up registers and memory state for beginning of test + + Emulates the interested code from beginning to end + + If a crash is detected, the test harness must 'crash' by + throwing a signal (SIGSEGV, SIGKILL, SIGABORT, etc.) + +Once you have all those things ready to go you just need to run afl-fuzz in +'unicorn-mode' by passing in the '-U' flag: + + $ afl-fuzz -U -m none -i /path/to/inputs -o /path/to/results -- ./test_harness @@ + +The normal afl-fuzz command line format applies to everything here. Refer to +AFL's main documentation for more info about how to use afl-fuzz effectively. + +For a much clearer vision of what all of this looks like, please refer to the +sample provided in the 'unicorn_mode/samples' directory. There is also a blog +post that goes over the basics at: + +https://medium.com/@njvoss299/afl-unicorn-fuzzing-arbitrary-binary-code-563ca28936bf + +The 'helper_scripts' directory also contains several helper scripts that allow you +to dump context from a running process, load it, and hook heap allocations. For details +on how to use this check out the follow-up blog post to the one linked above. + +A example use of AFL-Unicorn mode is discussed in the Paper Unicorefuzz: +https://www.usenix.org/conference/woot19/presentation/maier + +## 3) Options + +As for the QEMU-based instrumentation, the afl-unicorn twist of afl++ +comes with a sub-instruction based instrumentation similar in purpose to laf-intel. + +The options that enables Unicorn CompareCoverage are the same used for QEMU. +AFL_COMPCOV_LEVEL=1 is to instrument comparisons with only immediate +values. QEMU_COMPCOV_LEVEL=2 instruments all +comparison instructions. Comparison instructions are currently instrumented only +on the x86 and x86_64 targets. + +## 4) Gotchas, feedback, bugs + +To make sure that AFL's fork server starts up correctly the Unicorn test +harness script must emulate at least one instruction before loading the +data that will be fuzzed from the input file. It doesn't matter what the +instruction is, nor if it is valid. This is an artifact of how the fork-server +is started and could likely be fixed with some clever re-arranging of the +patches applied to Unicorn. + +Running the build script builds Unicorn and its python bindings and installs +them on your system. This installation will supersede any existing Unicorn +installation with the patched afl-unicorn version. + +Refer to the unicorn_mode/samples/arm_example/arm_tester.c for an example +of how to do this properly! If you don't get this right, AFL will not +load any mutated inputs and your fuzzing will be useless! diff --git a/unicorn_mode/build_unicorn_support.sh b/unicorn_mode/build_unicorn_support.sh index 2c0fe4b1..1575f66c 100755 --- a/unicorn_mode/build_unicorn_support.sh +++ b/unicorn_mode/build_unicorn_support.sh @@ -1,16 +1,20 @@ #!/bin/sh # -# american fuzzy lop - Unicorn-Mode build script -# -------------------------------------- +# american fuzzy lop++ - unicorn mode build script +# ------------------------------------------------ # -# Written by Nathan Voss +# Originally written by Nathan Voss # # Adapted from code by Andrew Griffiths and # Michal Zalewski # -# Adapted for Afl++ by Dominik Maier +# Adapted for AFLplusplus by Dominik Maier +# +# CompareCoverage and NeverZero counters by Andrea Fioraldi +# # # Copyright 2017 Battelle Memorial Institute. All rights reserved. +# Copyright 2019 AFLplusplus Project. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/unicorn_mode/patches/afl-unicorn-common.h b/unicorn_mode/patches/afl-unicorn-common.h index 9a1b2a6c..6798832c 100644 --- a/unicorn_mode/patches/afl-unicorn-common.h +++ b/unicorn_mode/patches/afl-unicorn-common.h @@ -1,17 +1,17 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - unicorn instrumentation + ---------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski + Originally written by Andrew Griffiths and + Michal Zalewski - TCG instrumentation and block chaining support by Andrea Biondo - Adapted for afl-unicorn by Dominik Maier - Idea & design very much by Andrew Griffiths. + CompareCoverage and NeverZero counters by Andrea Fioraldi + - Copyright 2015, 2016 Google Inc. All rights reserved. + Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ to implement AFL-style instrumentation and to take care of the remaining parts of the AFL fork server logic. - The resulting QEMU binary is essentially a standalone instrumentation + The resulting libunicorn binary is essentially a standalone instrumentation tool; for an example of how to leverage it for other purposes, you can have a look at afl-showmap.c. diff --git a/unicorn_mode/patches/afl-unicorn-cpu-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-inl.h index 90937a17..a713e4ca 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-inl.h @@ -1,17 +1,17 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - unicorn instrumentation + ---------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski + Originally written by Andrew Griffiths and + Michal Zalewski - TCG instrumentation and block chaining support by Andrea Biondo - Adapted for afl-unicorn by Dominik Maier - Idea & design very much by Andrew Griffiths. + CompareCoverage and NeverZero counters by Andrea Fioraldi + - Copyright 2015, 2016 Google Inc. All rights reserved. + Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ to implement AFL-style instrumentation and to take care of the remaining parts of the AFL fork server logic. - The resulting QEMU binary is essentially a standalone instrumentation + The resulting libunicorn binary is essentially a standalone instrumentation tool; for an example of how to leverage it for other purposes, you can have a look at afl-showmap.c. diff --git a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h index 7e8f47c9..69877c6b 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h @@ -1,17 +1,17 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - unicorn instrumentation + ---------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski + Originally written by Andrew Griffiths and + Michal Zalewski - TCG instrumentation and block chaining support by Andrea Biondo - Adapted for afl-unicorn by Dominik Maier - Idea & design very much by Andrew Griffiths. + CompareCoverage and NeverZero counters by Andrea Fioraldi + - Copyright 2015, 2016 Google Inc. All rights reserved. + Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ to implement AFL-style instrumentation and to take care of the remaining parts of the AFL fork server logic. - The resulting QEMU binary is essentially a standalone instrumentation + The resulting libunicorn binary is essentially a standalone instrumentation tool; for an example of how to leverage it for other purposes, you can have a look at afl-showmap.c. diff --git a/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h index d5a29cce..fa4974d6 100644 --- a/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h +++ b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h @@ -1,17 +1,17 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - unicorn instrumentation + ---------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski + Originally written by Andrew Griffiths and + Michal Zalewski - TCG instrumentation and block chaining support by Andrea Biondo - Adapted for afl-unicorn by Dominik Maier - Idea & design very much by Andrew Griffiths. + CompareCoverage and NeverZero counters by Andrea Fioraldi + - Copyright 2015, 2016 Google Inc. All rights reserved. + Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ to implement AFL-style instrumentation and to take care of the remaining parts of the AFL fork server logic. - The resulting QEMU binary is essentially a standalone instrumentation + The resulting libunicorn binary is essentially a standalone instrumentation tool; for an example of how to leverage it for other purposes, you can have a look at afl-showmap.c. diff --git a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h index 0019bbfa..1f0667ce 100644 --- a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h +++ b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h @@ -1,17 +1,17 @@ /* - american fuzzy lop - high-performance binary-only instrumentation - ----------------------------------------------------------------- + american fuzzy lop++ - unicorn instrumentation + ---------------------------------------------- - Written by Andrew Griffiths and - Michal Zalewski + Originally written by Andrew Griffiths and + Michal Zalewski - TCG instrumentation and block chaining support by Andrea Biondo - Adapted for afl-unicorn by Dominik Maier - Idea & design very much by Andrew Griffiths. + CompareCoverage and NeverZero counters by Andrea Fioraldi + - Copyright 2015, 2016 Google Inc. All rights reserved. + Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ to implement AFL-style instrumentation and to take care of the remaining parts of the AFL fork server logic. - The resulting QEMU binary is essentially a standalone instrumentation + The resulting libunicorn binary is essentially a standalone instrumentation tool; for an example of how to leverage it for other purposes, you can have a look at afl-showmap.c. diff --git a/unicorn_mode/samples/compcov_x64/COMPILE.md b/unicorn_mode/samples/compcov_x64/COMPILE.md index db488d30..35de7ad8 100644 --- a/unicorn_mode/samples/compcov_x64/COMPILE.md +++ b/unicorn_mode/samples/compcov_x64/COMPILE.md @@ -1,5 +1,4 @@ -Compiling compcov_target.c -========================== +# Compiling compcov_target.c compcov_target.c was compiled without optimization, position-independent, and without standard libraries using the following command line: diff --git a/unicorn_mode/samples/compcov_x64/compcov_target.c b/unicorn_mode/samples/compcov_x64/compcov_target.c index 71b4cb0e..eb1205b1 100644 --- a/unicorn_mode/samples/compcov_x64/compcov_target.c +++ b/unicorn_mode/samples/compcov_x64/compcov_target.c @@ -7,7 +7,7 @@ * (0x00300000), so make sure that your Unicorn emulation of this * puts user data there. * - * Written by Nathan Voss + * Written by Andrea Fioraldi */ // Magic address where mutated data will be placed diff --git a/unicorn_mode/samples/simple/COMPILE.md b/unicorn_mode/samples/simple/COMPILE.md index bd4a66c6..f7bf5b50 100644 --- a/unicorn_mode/samples/simple/COMPILE.md +++ b/unicorn_mode/samples/simple/COMPILE.md @@ -1,5 +1,4 @@ -Compiling simple_target.c -========================== +# Compiling simple_target.c You shouldn't need to compile simple_target.c since a MIPS binary version is pre-built and shipped with afl-unicorn. This file documents how the binary @@ -38,4 +37,4 @@ mips-linux-gnu-gcc -o simple_target.elf simple_target.c -fPIC -O0 -nostdlib Note that the output of this is padded with nulls for 16-byte alignment. This is important when emulating it, as NOPs will be added after the return of main() -as necessary. \ No newline at end of file +as necessary. From eadd378f6c54a7e021985bca041d9642fff41034 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 30 Aug 2019 11:42:30 +0200 Subject: [PATCH 50/83] update changelog --- docs/ChangeLog | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/ChangeLog b/docs/ChangeLog index 6d56d314..6d4c4792 100644 --- a/docs/ChangeLog +++ b/docs/ChangeLog @@ -26,6 +26,10 @@ Version ++2.53d (dev): afl never did), plus shows tuple content summary information now - fix building on *BSD (thanks to tobias.kortkamp for the patch) - small docu updates + - NeverZero counters for QEMU + - NeverZero counters for Unicorn + - CompareCoverage Unicorn + - Immediates-only instrumentation for CompareCoverage - ... your patch? :) From ca6ac09dcc1452bdab8b704cef736fed4a2f1156 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 30 Aug 2019 12:02:19 +0200 Subject: [PATCH 51/83] format like AFL style --- src/afl-gotcpu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c index 4163ad65..9db86933 100644 --- a/src/afl-gotcpu.c +++ b/src/afl-gotcpu.c @@ -26,7 +26,9 @@ */ #define AFL_MAIN -#define _GNU_SOURCE +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif #include #include From 2eeb07d164cb7874a64a48bd9c1bf4112636ac43 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 30 Aug 2019 12:03:11 +0200 Subject: [PATCH 52/83] format like AFL style (dotfiles) --- .clang-format | 8 ++++---- .custom-format.py | 26 +++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/.clang-format b/.clang-format index f691d684..ef4cb190 100644 --- a/.clang-format +++ b/.clang-format @@ -3,16 +3,16 @@ Language: Cpp # BasedOnStyle: Google AccessModifierOffset: -1 AlignAfterOpenBracket: Align -AlignConsecutiveAssignments: true +AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: true AlignEscapedNewlines: Left AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: false -AllowShortCaseLabelsOnASingleLine: false +AllowShortBlocksOnASingleLine: true +AllowShortCaseLabelsOnASingleLine: true AllowShortFunctionsOnASingleLine: false -AllowShortIfStatementsOnASingleLine: false +AllowShortIfStatementsOnASingleLine: true AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None diff --git a/.custom-format.py b/.custom-format.py index a73d92ab..32b8f7c9 100755 --- a/.custom-format.py +++ b/.custom-format.py @@ -3,6 +3,9 @@ import subprocess import sys import os +import re + +# string_re = re.compile('(\\"(\\\\.|[^"\\\\])*\\")') # future use with open(".clang-format") as f: fmt = f.read() @@ -36,14 +39,17 @@ for line in fmt.split("\n"): if line[0].strip() == "ColumnLimit": COLUMN_LIMIT = int(line[1].strip()) + def custom_format(filename): p = subprocess.Popen([CLANG_FORMAT_BIN, filename], stdout=subprocess.PIPE) src, _ = p.communicate() src = str(src, "utf-8") macro_indent = 0 - + in_define = False + last_line = None out = "" + for line in src.split("\n"): if line.startswith("#"): i = macro_indent @@ -54,6 +60,8 @@ def custom_format(filename): i -= 1 elif line.startswith("#if") and not (line.startswith("#ifndef") and (line.endswith("_H") or line.endswith("H_"))): macro_indent += 1 + elif line.startswith("#define"): + in_define = True r = "#" + (i * " ") + line[1:] if i != 0 and line.endswith("\\"): r = r[:-1] @@ -67,7 +75,23 @@ def custom_format(filename): cmt_start = line.rfind("/*") line = line[:cmt_start] + " " * (COLUMN_LIMIT-2 - len(line)) + line[cmt_start:] + define_padding = 0 + if last_line is not None and in_define and last_line.endswith("\\"): + last_line = last_line[:-1] + define_padding = max(0, len(last_line[last_line.rfind("\n")+1:])) + + if last_line is not None and last_line.strip().endswith("{") and line.strip() != "": + line = (" " * define_padding + "\\" if in_define else "") + "\n" + line + elif last_line is not None and last_line.strip().startswith("}") and line.strip() != "": + line = (" " * define_padding + "\\" if in_define else "") + "\n" + line + elif line.strip().startswith("}") and last_line is not None and last_line.strip() != "": + line = (" " * define_padding + "\\" if in_define else "") + "\n" + line + + if not line.endswith("\\"): + in_define = False + out += line + "\n" + last_line = line return (out) From 22454ce60b0253a6de260375c904895cd0efd1bc Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 30 Aug 2019 12:15:56 +0200 Subject: [PATCH 53/83] fix issue with static variables needed by forkserver in afl-fuzz --- src/afl-fuzz.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 1e8c5777..aa29e85a 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -224,7 +224,7 @@ EXP_ST u8 skip_deterministic, /* Skip deterministic stages? */ fast_cal; /* Try to calibrate faster? */ u8 uses_asan; /* Target uses ASAN? */ -static s32 out_fd, /* Persistent fd for out_file */ +s32 out_fd, /* Persistent fd for out_file */ #ifndef HAVE_ARC4RANDOM dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ #endif From bbd9441fc67fd969c111fdc1a9828d95adb71a30 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 30 Aug 2019 12:17:34 +0200 Subject: [PATCH 54/83] code-format in Makefile --- Makefile | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/Makefile b/Makefile index 9699a0ad..afe745e1 100644 --- a/Makefile +++ b/Makefile @@ -156,6 +156,23 @@ afl-gotcpu: src/afl-gotcpu.c $(COMM_HDR) | test_x86 $(CC) $(CFLAGS) src/$@.c -o $@ $(LDFLAGS) +code-format: + ./.custom-format.py -i src/* + ./.custom-format.py -i include/* + ./.custom-format.py -i libdislocator/*.c + ./.custom-format.py -i libtokencap/*.c + ./.custom-format.py -i llvm_mode/*.c + ./.custom-format.py -i llvm_mode/*.h + ./.custom-format.py -i llvm_mode/*.cc + ./.custom-format.py -i qemu_mode/patches/*.h + ./.custom-format.py -i qemu_mode/libcompcov/*.c + ./.custom-format.py -i qemu_mode/libcompcov/*.cc + ./.custom-format.py -i qemu_mode/libcompcov/*.h + ./.custom-format.py -i unicorn_mode/patches/*.h + ./.custom-format.py -i *.h + ./.custom-format.py -i *.c + + ifndef AFL_NO_X86 test_build: afl-gcc afl-as afl-showmap From 0ba49eacc9062622cb1aa55cf7b7e0b7d95aff9f Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 30 Aug 2019 12:20:33 +0200 Subject: [PATCH 55/83] move android-ashmem.h to include/ --- android-ashmem.h => include/android-ashmem.h | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename android-ashmem.h => include/android-ashmem.h (100%) diff --git a/android-ashmem.h b/include/android-ashmem.h similarity index 100% rename from android-ashmem.h rename to include/android-ashmem.h From 113fc168ab02f5241522a74c06bc8fb21fcafc55 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 30 Aug 2019 13:00:45 +0200 Subject: [PATCH 56/83] split afl-fuzz #1 (globls and python are now separate) --- Makefile | 6 +- include/afl-fuzz.h | 497 +++++++++++++++++++++++++ include/config.h | 6 + src/{ => afl-fuzz}/afl-fuzz.c | 672 +--------------------------------- 4 files changed, 517 insertions(+), 664 deletions(-) create mode 100644 include/afl-fuzz.h rename src/{ => afl-fuzz}/afl-fuzz.c (92%) diff --git a/Makefile b/Makefile index afe745e1..850a6a80 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,8 @@ CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I include/ \ -DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \ -DBIN_PATH=\"$(BIN_PATH)\" +AFL_FUZZ_FILES = $(wildcard src/afl-fuzz/*.c) + PYTHON_INCLUDE ?= /usr/include/python2.7 ifneq "$(filter Linux GNU%,$(shell uname))" "" @@ -140,8 +142,8 @@ afl-forkserver.o : src/afl-forkserver.c include/forkserver.h afl-sharedmem.o : src/afl-sharedmem.c include/sharedmem.h $(CC) $(CFLAGS) -c src/afl-sharedmem.c -afl-fuzz: src/afl-fuzz.c afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86 - $(CC) $(CFLAGS) src/$@.c afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS) $(PYFLAGS) +afl-fuzz: include/afl-fuzz.h $(AFL_FUZZ_FILES) afl-common.o afl-sharedmem.o afl-forkserver.o $(COMM_HDR) | test_x86 + $(CC) $(CFLAGS) $(AFL_FUZZ_FILES) afl-common.o afl-sharedmem.o afl-forkserver.o -o $@ $(LDFLAGS) $(PYFLAGS) afl-showmap: src/afl-showmap.c afl-common.o afl-sharedmem.o $(COMM_HDR) | test_x86 $(CC) $(CFLAGS) src/$@.c afl-common.o afl-sharedmem.o -o $@ $(LDFLAGS) diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h new file mode 100644 index 00000000..f243c7ba --- /dev/null +++ b/include/afl-fuzz.h @@ -0,0 +1,497 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#ifndef _AFL_FUZZ_H +#define _AFL_FUZZ_H + +#define AFL_MAIN +#define MESSAGES_TO_STDOUT + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#define _FILE_OFFSET_BITS 64 + +#ifdef __ANDROID__ + #include "android-ashmem.h" +#endif + +#include "config.h" +#include "types.h" +#include "debug.h" +#include "alloc-inl.h" +#include "hash.h" +#include "sharedmem.h" +#include "forkserver.h" +#include "common.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) +# include +# define HAVE_ARC4RANDOM 1 +#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ + +/* For systems that have sched_setaffinity; right now just Linux, but one + can hope... */ + +#ifdef __linux__ +# define HAVE_AFFINITY 1 +#endif /* __linux__ */ + + +struct queue_entry { + + u8* fname; /* File name for the test case */ + u32 len; /* Input length */ + + u8 cal_failed, /* Calibration failed? */ + trim_done, /* Trimmed? */ + was_fuzzed, /* historical, but needed for MOpt */ + passed_det, /* Deterministic stages passed? */ + has_new_cov, /* Triggers new coverage? */ + var_behavior, /* Variable behavior? */ + favored, /* Currently favored? */ + fs_redundant; /* Marked as redundant in the fs? */ + + u32 bitmap_size, /* Number of bits set in bitmap */ + fuzz_level, /* Number of fuzzing iterations */ + exec_cksum; /* Checksum of the execution trace */ + + u64 exec_us, /* Execution time (us) */ + handicap, /* Number of queue cycles behind */ + n_fuzz, /* Number of fuzz, does not overflow */ + depth; /* Path depth */ + + u8* trace_mini; /* Trace bytes, if kept */ + u32 tc_ref; /* Trace bytes ref count */ + + struct queue_entry *next, /* Next element, if any */ + *next_100; /* 100 elements ahead */ + +}; + +struct extra_data { + u8* data; /* Dictionary token data */ + u32 len; /* Dictionary token length */ + u32 hit_cnt; /* Use count in the corpus */ +}; + + +/* Fuzzing stages */ + +enum { + /* 00 */ STAGE_FLIP1, + /* 01 */ STAGE_FLIP2, + /* 02 */ STAGE_FLIP4, + /* 03 */ STAGE_FLIP8, + /* 04 */ STAGE_FLIP16, + /* 05 */ STAGE_FLIP32, + /* 06 */ STAGE_ARITH8, + /* 07 */ STAGE_ARITH16, + /* 08 */ STAGE_ARITH32, + /* 09 */ STAGE_INTEREST8, + /* 10 */ STAGE_INTEREST16, + /* 11 */ STAGE_INTEREST32, + /* 12 */ STAGE_EXTRAS_UO, + /* 13 */ STAGE_EXTRAS_UI, + /* 14 */ STAGE_EXTRAS_AO, + /* 15 */ STAGE_HAVOC, + /* 16 */ STAGE_SPLICE, + /* 17 */ STAGE_PYTHON, + /* 18 */ STAGE_CUSTOM_MUTATOR +}; + +/* Stage value types */ + +enum { + /* 00 */ STAGE_VAL_NONE, + /* 01 */ STAGE_VAL_LE, + /* 02 */ STAGE_VAL_BE +}; + +/* Execution status fault codes */ + +enum { + /* 00 */ FAULT_NONE, + /* 01 */ FAULT_TMOUT, + /* 02 */ FAULT_CRASH, + /* 03 */ FAULT_ERROR, + /* 04 */ FAULT_NOINST, + /* 05 */ FAULT_NOBITS +}; + + +/* MOpt: + Lots of globals, but mostly for the status UI and other things where it + really makes no sense to haul them around as function parameters. */ +extern u64 limit_time_puppet, + orig_hit_cnt_puppet, + last_limit_time_start, + tmp_pilot_time, + total_pacemaker_time, + total_puppet_find, + temp_puppet_find, + most_time_key, + most_time, + most_execs_key, + most_execs, + old_hit_count; + +extern s32 SPLICE_CYCLES_puppet, + limit_time_sig, + key_puppet, + key_module; + +extern double w_init, + w_end, + w_now; + +extern s32 g_now; +extern s32 g_max; + +#define operator_num 16 +#define swarm_num 5 +#define period_core 500000 + +extern u64 tmp_core_time; +extern s32 swarm_now; + +extern double x_now[swarm_num][operator_num], + L_best[swarm_num][operator_num], + eff_best[swarm_num][operator_num], + G_best[operator_num], + v_now[swarm_num][operator_num], + probability_now[swarm_num][operator_num], + swarm_fitness[swarm_num]; + +extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per fuzz stage */ + stage_finds_puppet_v2[swarm_num][operator_num], + stage_cycles_puppet_v2[swarm_num][operator_num], + stage_cycles_puppet_v3[swarm_num][operator_num], + stage_cycles_puppet[swarm_num][operator_num], + operator_finds_puppet[operator_num], + core_operator_finds_puppet[operator_num], + core_operator_finds_puppet_v2[operator_num], + core_operator_cycles_puppet[operator_num], + core_operator_cycles_puppet_v2[operator_num], + core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ + +#define RAND_C (rand()%1000*0.001) +#define v_max 1 +#define v_min 0.05 +#define limit_time_bound 1.1 +#define SPLICE_CYCLES_puppet_up 25 +#define SPLICE_CYCLES_puppet_low 5 +#define STAGE_RANDOMBYTE 12 +#define STAGE_DELETEBYTE 13 +#define STAGE_Clone75 14 +#define STAGE_OverWrite75 15 +#define period_pilot 50000 + +extern double period_pilot_tmp; +extern s32 key_lv; + +extern u8 *in_dir, /* Input directory with test cases */ + *out_dir, /* Working & output directory */ + *tmp_dir , /* Temporary directory for input */ + *sync_dir, /* Synchronization directory */ + *sync_id, /* Fuzzer ID */ + *power_name, /* Power schedule name */ + *use_banner, /* Display banner */ + *in_bitmap, /* Input bitmap */ + *file_extension, /* File extension */ + *orig_cmdline; /* Original command line */ +extern u8 *doc_path, /* Path to documentation dir */ + *target_path, /* Path to target binary */ + *out_file; /* File to fuzz, if any */ + +extern u32 exec_tmout; /* Configurable exec timeout (ms) */ +extern u32 hang_tmout; /* Timeout used for hang det (ms) */ + +extern u64 mem_limit; /* Memory cap for child (MB) */ + +extern u8 cal_cycles, /* Calibration cycles defaults */ + cal_cycles_long, + debug, /* Debug mode */ + python_only; /* Python-only mode */ + +extern u32 stats_update_freq; /* Stats update frequency (execs) */ + +enum { + /* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */ + /* 01 */ FAST, /* Exponential schedule */ + /* 02 */ COE, /* Cut-Off Exponential schedule */ + /* 03 */ LIN, /* Linear schedule */ + /* 04 */ QUAD, /* Quadratic schedule */ + /* 05 */ EXPLOIT, /* AFL's exploitation-based const. */ + + POWER_SCHEDULES_NUM +}; + +extern char *power_names[POWER_SCHEDULES_NUM]; + +extern u8 schedule; /* Power schedule (default: EXPLORE)*/ +extern u8 havoc_max_mult; + +extern u8 skip_deterministic, /* Skip deterministic stages? */ + force_deterministic, /* Force deterministic stages? */ + use_splicing, /* Recombine input files? */ + dumb_mode, /* Run in non-instrumented mode? */ + score_changed, /* Scoring for favorites changed? */ + kill_signal, /* Signal that killed the child */ + resuming_fuzz, /* Resuming an older fuzzing job? */ + timeout_given, /* Specific timeout given? */ + not_on_tty, /* stdout is not a tty */ + term_too_small, /* terminal dimensions too small */ + no_forkserver, /* Disable forkserver? */ + crash_mode, /* Crash mode! Yeah! */ + in_place_resume, /* Attempt in-place resume? */ + auto_changed, /* Auto-generated tokens changed? */ + no_cpu_meter_red, /* Feng shui on the status screen */ + no_arith, /* Skip most arithmetic ops */ + shuffle_queue, /* Shuffle input queue? */ + bitmap_changed, /* Time to update bitmap? */ + qemu_mode, /* Running in QEMU mode? */ + unicorn_mode, /* Running in Unicorn mode? */ + skip_requested, /* Skip request, via SIGUSR1 */ + run_over10m, /* Run time over 10 minutes? */ + persistent_mode, /* Running in persistent mode? */ + deferred_mode, /* Deferred forkserver mode? */ + fixed_seed, /* do not reseed */ + fast_cal, /* Try to calibrate faster? */ + uses_asan; /* Target uses ASAN? */ + +extern s32 out_fd, /* Persistent fd for out_file */ +#ifndef HAVE_ARC4RANDOM + dev_urandom_fd, /* Persistent fd for /dev/urandom */ +#endif + dev_null_fd, /* Persistent fd for /dev/null */ + fsrv_ctl_fd, /* Fork server control pipe (write) */ + fsrv_st_fd; /* Fork server status pipe (read) */ + +extern s32 forksrv_pid, /* PID of the fork server */ + child_pid, /* PID of the fuzzed program */ + out_dir_fd; /* FD of the lock file */ + +extern u8* trace_bits; /* SHM with instrumentation bitmap */ + +extern u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ + virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ + virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ + +extern u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ + +extern volatile u8 stop_soon, /* Ctrl-C pressed? */ + clear_screen, /* Window resized? */ + child_timed_out; /* Traced process timed out? */ + +extern u32 queued_paths, /* Total number of queued testcases */ + queued_variable, /* Testcases with variable behavior */ + queued_at_start, /* Total number of initial inputs */ + queued_discovered, /* Items discovered during this run */ + queued_imported, /* Items imported via -S */ + queued_favored, /* Paths deemed favorable */ + queued_with_cov, /* Paths with new coverage bytes */ + pending_not_fuzzed, /* Queued but not done yet */ + pending_favored, /* Pending favored paths */ + cur_skipped_paths, /* Abandoned inputs in cur cycle */ + cur_depth, /* Current path depth */ + max_depth, /* Max path depth */ + useless_at_start, /* Number of useless starting paths */ + var_byte_count, /* Bitmap bytes with var behavior */ + current_entry, /* Current queue entry ID */ + havoc_div; /* Cycle count divisor for havoc */ + +extern u64 total_crashes, /* Total number of crashes */ + unique_crashes, /* Crashes with unique signatures */ + total_tmouts, /* Total number of timeouts */ + unique_tmouts, /* Timeouts with unique signatures */ + unique_hangs, /* Hangs with unique signatures */ + total_execs, /* Total execve() calls */ + start_time, /* Unix start time (ms) */ + last_path_time, /* Time for most recent path (ms) */ + last_crash_time, /* Time for most recent crash (ms) */ + last_hang_time, /* Time for most recent hang (ms) */ + last_crash_execs, /* Exec counter at last crash */ + queue_cycle, /* Queue round counter */ + cycles_wo_finds, /* Cycles without any new paths */ + trim_execs, /* Execs done to trim input files */ + bytes_trim_in, /* Bytes coming into the trimmer */ + bytes_trim_out, /* Bytes coming outa the trimmer */ + blocks_eff_total, /* Blocks subject to effector maps */ + blocks_eff_select; /* Blocks selected as fuzzable */ + +extern u32 subseq_tmouts; /* Number of timeouts in a row */ + +extern u8 *stage_name, /* Name of the current fuzz stage */ + *stage_short, /* Short stage name */ + *syncing_party; /* Currently syncing with... */ + +extern s32 stage_cur, stage_max; /* Stage progression */ +extern s32 splicing_with; /* Splicing with which test case? */ + +extern u32 master_id, master_max; /* Master instance job splitting */ + +extern u32 syncing_case; /* Syncing with case #... */ + +extern s32 stage_cur_byte, /* Byte offset of current stage op */ + stage_cur_val; /* Value used for stage op */ + +extern u8 stage_val_type; /* Value type (STAGE_VAL_*) */ + +extern u64 stage_finds[32], /* Patterns found per fuzz stage */ + stage_cycles[32]; /* Execs per fuzz stage */ + +#ifndef HAVE_ARC4RANDOM +extern u32 rand_cnt; /* Random number counter */ +#endif + +extern u64 total_cal_us, /* Total calibration time (us) */ + total_cal_cycles; /* Total calibration cycles */ + +extern u64 total_bitmap_size, /* Total bit count for all bitmaps */ + total_bitmap_entries; /* Number of bitmaps counted */ + +extern s32 cpu_core_count; /* CPU core count */ + +#ifdef HAVE_AFFINITY + +extern s32 cpu_aff; /* Selected CPU core */ + +#endif /* HAVE_AFFINITY */ + +extern FILE* plot_file; /* Gnuplot output file */ + + + +extern struct queue_entry *queue, /* Fuzzing queue (linked list) */ + *queue_cur, /* Current offset within the queue */ + *queue_top, /* Top of the list */ + *q_prev100; /* Previous 100 marker */ + +extern struct queue_entry* + top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ + +extern struct extra_data* extras; /* Extra tokens to fuzz with */ +extern u32 extras_cnt; /* Total number of tokens read */ + +extern struct extra_data* a_extras; /* Automatically selected extras */ +extern u32 a_extras_cnt; /* Total number of tokens available */ + +u8* (*post_handler)(u8* buf, u32* len); + +/* hooks for the custom mutator function */ +size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed); +size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data); + +/* Interesting values, as per config.h */ + +extern s8 interesting_8[INTERESTING_8_LEN]; +extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN]; +extern s32 interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN]; + +/* Python stuff */ +#ifdef USE_PYTHON + +#include + +extern PyObject *py_module; + +enum { + /* 00 */ PY_FUNC_INIT, + /* 01 */ PY_FUNC_FUZZ, + /* 02 */ PY_FUNC_INIT_TRIM, + /* 03 */ PY_FUNC_POST_TRIM, + /* 04 */ PY_FUNC_TRIM, + PY_FUNC_COUNT +}; + +extern PyObject *py_functions[PY_FUNC_COUNT]; + +#endif + +/**** Prototypes ****/ + +/* Python stuff */ +#ifdef USE_PYTHON + +int init_py(); +void finalize_py(); +void fuzz_py(char*, size_t, char*, size_t, char**, size_t*); +u32 init_trim_py(char*, size_t); +u32 post_trim_py(char); +void trim_py(char**, size_t*); + +#endif + +/**** Inline routines ****/ + +/* Generate a random number (from 0 to limit - 1). This may + have slight bias. */ + +static inline u32 UR(u32 limit) { +#ifdef HAVE_ARC4RANDOM + if (fixed_seed) { + return random() % limit; + } + + /* The boundary not being necessarily a power of 2, + we need to ensure the result uniformity. */ + return arc4random_uniform(limit); +#else + if (!fixed_seed && unlikely(!rand_cnt--)) { + u32 seed[2]; + + ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom"); + srandom(seed[0]); + rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG); + } + + return random() % limit; +#endif +} + +#endif + diff --git a/include/config.h b/include/config.h index 37a2a794..324435b3 100644 --- a/include/config.h +++ b/include/config.h @@ -234,6 +234,8 @@ 100, /* One-off with common buffer size */ \ 127 /* Overflow signed 8-bit when incremented */ +#define INTERESTING_8_LEN 9 + #define INTERESTING_16 \ -32768, /* Overflow signed 16-bit when decremented */ \ -129, /* Overflow signed 8-bit */ \ @@ -246,6 +248,8 @@ 4096, /* One-off with common buffer size */ \ 32767 /* Overflow signed 16-bit when incremented */ +#define INTERESTING_16_LEN 10 + #define INTERESTING_32 \ -2147483648LL, /* Overflow signed 32-bit when decremented */ \ -100663046, /* Large negative number (endian-agnostic) */ \ @@ -256,6 +260,8 @@ 100663045, /* Large positive number (endian-agnostic) */ \ 2147483647 /* Overflow signed 32-bit when incremented */ +#define INTERESTING_32_LEN 8 + /*********************************************************** * * * Really exotic stuff you probably don't want to touch: * diff --git a/src/afl-fuzz.c b/src/afl-fuzz/afl-fuzz.c similarity index 92% rename from src/afl-fuzz.c rename to src/afl-fuzz/afl-fuzz.c index aa29e85a..b93c17c8 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz/afl-fuzz.c @@ -20,633 +20,7 @@ */ -#define AFL_MAIN -#define MESSAGES_TO_STDOUT - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif -#define _FILE_OFFSET_BITS 64 - -#ifdef __ANDROID__ - #include "android-ashmem.h" -#endif - -#include "config.h" -#include "types.h" -#include "debug.h" -#include "alloc-inl.h" -#include "hash.h" -#include "sharedmem.h" -#include "forkserver.h" -#include "common.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) -# include -# define HAVE_ARC4RANDOM 1 -#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ - -/* For systems that have sched_setaffinity; right now just Linux, but one - can hope... */ - -#ifdef __linux__ -# define HAVE_AFFINITY 1 -#endif /* __linux__ */ - -/* A toggle to export some variables when building as a library. Not very - useful for the general public. */ - -#ifdef AFL_LIB -# define EXP_ST -#else -# define EXP_ST static -#endif /* ^AFL_LIB */ - -/* MOpt: - Lots of globals, but mostly for the status UI and other things where it - really makes no sense to haul them around as function parameters. */ -EXP_ST u64 limit_time_puppet = 0; -u64 orig_hit_cnt_puppet = 0; -u64 last_limit_time_start = 0; -u64 tmp_pilot_time = 0; -u64 total_pacemaker_time = 0; -u64 total_puppet_find = 0; -u64 temp_puppet_find = 0; -u64 most_time_key = 0; -u64 most_time = 0; -u64 most_execs_key = 0; -u64 most_execs = 0; -u64 old_hit_count = 0; -int SPLICE_CYCLES_puppet; -int limit_time_sig = 0; -int key_puppet = 0; -int key_module = 0; -double w_init = 0.9; -double w_end = 0.3; -double w_now; -int g_now = 0; -int g_max = 5000; -#define operator_num 16 -#define swarm_num 5 -#define period_core 500000 -u64 tmp_core_time = 0; -int swarm_now = 0 ; -double x_now[swarm_num][operator_num], - L_best[swarm_num][operator_num], - eff_best[swarm_num][operator_num], - G_best[operator_num], - v_now[swarm_num][operator_num], - probability_now[swarm_num][operator_num], - swarm_fitness[swarm_num]; - - static u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per fuzz stage */ - stage_finds_puppet_v2[swarm_num][operator_num], - stage_cycles_puppet_v2[swarm_num][operator_num], - stage_cycles_puppet_v3[swarm_num][operator_num], - stage_cycles_puppet[swarm_num][operator_num], - operator_finds_puppet[operator_num], - core_operator_finds_puppet[operator_num], - core_operator_finds_puppet_v2[operator_num], - core_operator_cycles_puppet[operator_num], - core_operator_cycles_puppet_v2[operator_num], - core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ - -#define RAND_C (rand()%1000*0.001) -#define v_max 1 -#define v_min 0.05 -#define limit_time_bound 1.1 -#define SPLICE_CYCLES_puppet_up 25 -#define SPLICE_CYCLES_puppet_low 5 -#define STAGE_RANDOMBYTE 12 -#define STAGE_DELETEBYTE 13 -#define STAGE_Clone75 14 -#define STAGE_OverWrite75 15 -#define period_pilot 50000 -double period_pilot_tmp = 5000.0; -int key_lv = 0; - -EXP_ST u8 *in_dir, /* Input directory with test cases */ - *out_dir, /* Working & output directory */ - *tmp_dir , /* Temporary directory for input */ - *sync_dir, /* Synchronization directory */ - *sync_id, /* Fuzzer ID */ - *power_name, /* Power schedule name */ - *use_banner, /* Display banner */ - *in_bitmap, /* Input bitmap */ - *file_extension, /* File extension */ - *orig_cmdline; /* Original command line */ - u8 *doc_path, /* Path to documentation dir */ - *target_path, /* Path to target binary */ - *out_file; /* File to fuzz, if any */ - - u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */ -static u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */ - - u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */ - -EXP_ST u8 cal_cycles = CAL_CYCLES; /* Calibration cycles defaults */ -EXP_ST u8 cal_cycles_long = CAL_CYCLES_LONG; -EXP_ST u8 debug, /* Debug mode */ - python_only; /* Python-only mode */ - -static u32 stats_update_freq = 1; /* Stats update frequency (execs) */ - -enum { - /* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */ - /* 01 */ FAST, /* Exponential schedule */ - /* 02 */ COE, /* Cut-Off Exponential schedule */ - /* 03 */ LIN, /* Linear schedule */ - /* 04 */ QUAD, /* Quadratic schedule */ - /* 05 */ EXPLOIT /* AFL's exploitation-based const. */ -}; - -char *power_names[] = { - "explore", - "fast", - "coe", - "lin", - "quad", - "exploit" -}; - -static u8 schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/ -static u8 havoc_max_mult = HAVOC_MAX_MULT; - -EXP_ST u8 skip_deterministic, /* Skip deterministic stages? */ - force_deterministic, /* Force deterministic stages? */ - use_splicing, /* Recombine input files? */ - dumb_mode, /* Run in non-instrumented mode? */ - score_changed, /* Scoring for favorites changed? */ - kill_signal, /* Signal that killed the child */ - resuming_fuzz, /* Resuming an older fuzzing job? */ - timeout_given, /* Specific timeout given? */ - not_on_tty, /* stdout is not a tty */ - term_too_small, /* terminal dimensions too small */ - no_forkserver, /* Disable forkserver? */ - crash_mode, /* Crash mode! Yeah! */ - in_place_resume, /* Attempt in-place resume? */ - auto_changed, /* Auto-generated tokens changed? */ - no_cpu_meter_red, /* Feng shui on the status screen */ - no_arith, /* Skip most arithmetic ops */ - shuffle_queue, /* Shuffle input queue? */ - bitmap_changed = 1, /* Time to update bitmap? */ - qemu_mode, /* Running in QEMU mode? */ - unicorn_mode, /* Running in Unicorn mode? */ - skip_requested, /* Skip request, via SIGUSR1 */ - run_over10m, /* Run time over 10 minutes? */ - persistent_mode, /* Running in persistent mode? */ - deferred_mode, /* Deferred forkserver mode? */ - fixed_seed, /* do not reseed */ - fast_cal; /* Try to calibrate faster? */ - u8 uses_asan; /* Target uses ASAN? */ - -s32 out_fd, /* Persistent fd for out_file */ -#ifndef HAVE_ARC4RANDOM - dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ -#endif - dev_null_fd = -1, /* Persistent fd for /dev/null */ - fsrv_ctl_fd, /* Fork server control pipe (write) */ - fsrv_st_fd; /* Fork server status pipe (read) */ - - s32 forksrv_pid, /* PID of the fork server */ - child_pid = -1, /* PID of the fuzzed program */ - out_dir_fd = -1; /* FD of the lock file */ - - u8* trace_bits; /* SHM with instrumentation bitmap */ - -EXP_ST u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ - virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ - virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ - -static u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ - -static volatile u8 stop_soon, /* Ctrl-C pressed? */ - clear_screen = 1, /* Window resized? */ - child_timed_out; /* Traced process timed out? */ - -EXP_ST u32 queued_paths, /* Total number of queued testcases */ - queued_variable, /* Testcases with variable behavior */ - queued_at_start, /* Total number of initial inputs */ - queued_discovered, /* Items discovered during this run */ - queued_imported, /* Items imported via -S */ - queued_favored, /* Paths deemed favorable */ - queued_with_cov, /* Paths with new coverage bytes */ - pending_not_fuzzed, /* Queued but not done yet */ - pending_favored, /* Pending favored paths */ - cur_skipped_paths, /* Abandoned inputs in cur cycle */ - cur_depth, /* Current path depth */ - max_depth, /* Max path depth */ - useless_at_start, /* Number of useless starting paths */ - var_byte_count, /* Bitmap bytes with var behavior */ - current_entry, /* Current queue entry ID */ - havoc_div = 1; /* Cycle count divisor for havoc */ - -EXP_ST u64 total_crashes, /* Total number of crashes */ - unique_crashes, /* Crashes with unique signatures */ - total_tmouts, /* Total number of timeouts */ - unique_tmouts, /* Timeouts with unique signatures */ - unique_hangs, /* Hangs with unique signatures */ - total_execs, /* Total execve() calls */ - start_time, /* Unix start time (ms) */ - last_path_time, /* Time for most recent path (ms) */ - last_crash_time, /* Time for most recent crash (ms) */ - last_hang_time, /* Time for most recent hang (ms) */ - last_crash_execs, /* Exec counter at last crash */ - queue_cycle, /* Queue round counter */ - cycles_wo_finds, /* Cycles without any new paths */ - trim_execs, /* Execs done to trim input files */ - bytes_trim_in, /* Bytes coming into the trimmer */ - bytes_trim_out, /* Bytes coming outa the trimmer */ - blocks_eff_total, /* Blocks subject to effector maps */ - blocks_eff_select; /* Blocks selected as fuzzable */ - -static u32 subseq_tmouts; /* Number of timeouts in a row */ - -static u8 *stage_name = "init", /* Name of the current fuzz stage */ - *stage_short, /* Short stage name */ - *syncing_party; /* Currently syncing with... */ - -static s32 stage_cur, stage_max; /* Stage progression */ -static s32 splicing_with = -1; /* Splicing with which test case? */ - -static u32 master_id, master_max; /* Master instance job splitting */ - -static u32 syncing_case; /* Syncing with case #... */ - -static s32 stage_cur_byte, /* Byte offset of current stage op */ - stage_cur_val; /* Value used for stage op */ - -static u8 stage_val_type; /* Value type (STAGE_VAL_*) */ - -static u64 stage_finds[32], /* Patterns found per fuzz stage */ - stage_cycles[32]; /* Execs per fuzz stage */ - -#ifndef HAVE_ARC4RANDOM -static u32 rand_cnt; /* Random number counter */ -#endif - -static u64 total_cal_us, /* Total calibration time (us) */ - total_cal_cycles; /* Total calibration cycles */ - -static u64 total_bitmap_size, /* Total bit count for all bitmaps */ - total_bitmap_entries; /* Number of bitmaps counted */ - -static s32 cpu_core_count; /* CPU core count */ - -#ifdef HAVE_AFFINITY - -static s32 cpu_aff = -1; /* Selected CPU core */ - -#endif /* HAVE_AFFINITY */ - -FILE* plot_file; /* Gnuplot output file */ - -struct queue_entry { - - u8* fname; /* File name for the test case */ - u32 len; /* Input length */ - - u8 cal_failed, /* Calibration failed? */ - trim_done, /* Trimmed? */ - was_fuzzed, /* historical, but needed for MOpt */ - passed_det, /* Deterministic stages passed? */ - has_new_cov, /* Triggers new coverage? */ - var_behavior, /* Variable behavior? */ - favored, /* Currently favored? */ - fs_redundant; /* Marked as redundant in the fs? */ - - u32 bitmap_size, /* Number of bits set in bitmap */ - fuzz_level, /* Number of fuzzing iterations */ - exec_cksum; /* Checksum of the execution trace */ - - u64 exec_us, /* Execution time (us) */ - handicap, /* Number of queue cycles behind */ - n_fuzz, /* Number of fuzz, does not overflow */ - depth; /* Path depth */ - - u8* trace_mini; /* Trace bytes, if kept */ - u32 tc_ref; /* Trace bytes ref count */ - - struct queue_entry *next, /* Next element, if any */ - *next_100; /* 100 elements ahead */ - -}; - -static struct queue_entry *queue, /* Fuzzing queue (linked list) */ - *queue_cur, /* Current offset within the queue */ - *queue_top, /* Top of the list */ - *q_prev100; /* Previous 100 marker */ - -static struct queue_entry* - top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ - -struct extra_data { - u8* data; /* Dictionary token data */ - u32 len; /* Dictionary token length */ - u32 hit_cnt; /* Use count in the corpus */ -}; - -static struct extra_data* extras; /* Extra tokens to fuzz with */ -static u32 extras_cnt; /* Total number of tokens read */ - -static struct extra_data* a_extras; /* Automatically selected extras */ -static u32 a_extras_cnt; /* Total number of tokens available */ - -static u8* (*post_handler)(u8* buf, u32* len); - -/* hooks for the custom mutator function */ -static size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed); -static size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data); - - -/* Interesting values, as per config.h */ - -static s8 interesting_8[] = { INTERESTING_8 }; -static s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 }; -static s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 }; - -/* Fuzzing stages */ - -enum { - /* 00 */ STAGE_FLIP1, - /* 01 */ STAGE_FLIP2, - /* 02 */ STAGE_FLIP4, - /* 03 */ STAGE_FLIP8, - /* 04 */ STAGE_FLIP16, - /* 05 */ STAGE_FLIP32, - /* 06 */ STAGE_ARITH8, - /* 07 */ STAGE_ARITH16, - /* 08 */ STAGE_ARITH32, - /* 09 */ STAGE_INTEREST8, - /* 10 */ STAGE_INTEREST16, - /* 11 */ STAGE_INTEREST32, - /* 12 */ STAGE_EXTRAS_UO, - /* 13 */ STAGE_EXTRAS_UI, - /* 14 */ STAGE_EXTRAS_AO, - /* 15 */ STAGE_HAVOC, - /* 16 */ STAGE_SPLICE, - /* 17 */ STAGE_PYTHON, - /* 18 */ STAGE_CUSTOM_MUTATOR -}; - -/* Stage value types */ - -enum { - /* 00 */ STAGE_VAL_NONE, - /* 01 */ STAGE_VAL_LE, - /* 02 */ STAGE_VAL_BE -}; - -/* Execution status fault codes */ - -enum { - /* 00 */ FAULT_NONE, - /* 01 */ FAULT_TMOUT, - /* 02 */ FAULT_CRASH, - /* 03 */ FAULT_ERROR, - /* 04 */ FAULT_NOINST, - /* 05 */ FAULT_NOBITS -}; - - -static inline u32 UR(u32 limit); - -/* Python stuff */ -#ifdef USE_PYTHON -#include - -static PyObject *py_module; - -enum { - /* 00 */ PY_FUNC_INIT, - /* 01 */ PY_FUNC_FUZZ, - /* 02 */ PY_FUNC_INIT_TRIM, - /* 03 */ PY_FUNC_POST_TRIM, - /* 04 */ PY_FUNC_TRIM, - PY_FUNC_COUNT -}; - -static PyObject *py_functions[PY_FUNC_COUNT]; - -static int init_py() { - Py_Initialize(); - u8* module_name = getenv("AFL_PYTHON_MODULE"); - - if (module_name) { - PyObject* py_name = PyString_FromString(module_name); - - py_module = PyImport_Import(py_name); - Py_DECREF(py_name); - - if (py_module != NULL) { - u8 py_notrim = 0; - py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init"); - py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz"); - py_functions[PY_FUNC_INIT_TRIM] = PyObject_GetAttrString(py_module, "init_trim"); - py_functions[PY_FUNC_POST_TRIM] = PyObject_GetAttrString(py_module, "post_trim"); - py_functions[PY_FUNC_TRIM] = PyObject_GetAttrString(py_module, "trim"); - - for (u8 py_idx = 0; py_idx < PY_FUNC_COUNT; ++py_idx) { - if (!py_functions[py_idx] || !PyCallable_Check(py_functions[py_idx])) { - if (py_idx >= PY_FUNC_INIT_TRIM && py_idx <= PY_FUNC_TRIM) { - // Implementing the trim API is optional for now - if (PyErr_Occurred()) - PyErr_Print(); - py_notrim = 1; - } else { - if (PyErr_Occurred()) - PyErr_Print(); - fprintf(stderr, "Cannot find/call function with index %d in external Python module.\n", py_idx); - return 1; - } - } - - } - - if (py_notrim) { - py_functions[PY_FUNC_INIT_TRIM] = NULL; - py_functions[PY_FUNC_POST_TRIM] = NULL; - py_functions[PY_FUNC_TRIM] = NULL; - WARNF("Python module does not implement trim API, standard trimming will be used."); - } - - PyObject *py_args, *py_value; - - /* Provide the init function a seed for the Python RNG */ - py_args = PyTuple_New(1); - py_value = PyInt_FromLong(UR(0xFFFFFFFF)); - if (!py_value) { - Py_DECREF(py_args); - fprintf(stderr, "Cannot convert argument\n"); - return 1; - } - - PyTuple_SetItem(py_args, 0, py_value); - - py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT], py_args); - - Py_DECREF(py_args); - - if (py_value == NULL) { - PyErr_Print(); - fprintf(stderr,"Call failed\n"); - return 1; - } - } else { - PyErr_Print(); - fprintf(stderr, "Failed to load \"%s\"\n", module_name); - return 1; - } - } - - return 0; -} - -static void finalize_py() { - if (py_module != NULL) { - u32 i; - for (i = 0; i < PY_FUNC_COUNT; ++i) - Py_XDECREF(py_functions[i]); - - Py_DECREF(py_module); - } - - Py_Finalize(); -} - -static void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** ret, size_t* retlen) { - - if (py_module != NULL) { - PyObject *py_args, *py_value; - py_args = PyTuple_New(2); - py_value = PyByteArray_FromStringAndSize(buf, buflen); - if (!py_value) { - Py_DECREF(py_args); - fprintf(stderr, "Cannot convert argument\n"); - return; - } - - PyTuple_SetItem(py_args, 0, py_value); - - py_value = PyByteArray_FromStringAndSize(add_buf, add_buflen); - if (!py_value) { - Py_DECREF(py_args); - fprintf(stderr, "Cannot convert argument\n"); - return; - } - - PyTuple_SetItem(py_args, 1, py_value); - - py_value = PyObject_CallObject(py_functions[PY_FUNC_FUZZ], py_args); - - Py_DECREF(py_args); - - if (py_value != NULL) { - *retlen = PyByteArray_Size(py_value); - *ret = malloc(*retlen); - memcpy(*ret, PyByteArray_AsString(py_value), *retlen); - Py_DECREF(py_value); - } else { - PyErr_Print(); - fprintf(stderr,"Call failed\n"); - return; - } - } -} -static u32 init_trim_py(char* buf, size_t buflen) { - PyObject *py_args, *py_value; - - py_args = PyTuple_New(1); - py_value = PyByteArray_FromStringAndSize(buf, buflen); - if (!py_value) { - Py_DECREF(py_args); - FATAL("Failed to convert arguments"); - } - - PyTuple_SetItem(py_args, 0, py_value); - - py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT_TRIM], py_args); - Py_DECREF(py_args); - - if (py_value != NULL) { - u32 retcnt = PyInt_AsLong(py_value); - Py_DECREF(py_value); - return retcnt; - } else { - PyErr_Print(); - FATAL("Call failed"); - } -} -static u32 post_trim_py(char success) { - PyObject *py_args, *py_value; - - py_args = PyTuple_New(1); - - py_value = PyBool_FromLong(success); - if (!py_value) { - Py_DECREF(py_args); - FATAL("Failed to convert arguments"); - } - - PyTuple_SetItem(py_args, 0, py_value); - - py_value = PyObject_CallObject(py_functions[PY_FUNC_POST_TRIM], py_args); - Py_DECREF(py_args); - - if (py_value != NULL) { - u32 retcnt = PyInt_AsLong(py_value); - Py_DECREF(py_value); - return retcnt; - } else { - PyErr_Print(); - FATAL("Call failed"); - } -} - -static void trim_py(char** ret, size_t* retlen) { - PyObject *py_args, *py_value; - - py_args = PyTuple_New(0); - py_value = PyObject_CallObject(py_functions[PY_FUNC_TRIM], py_args); - Py_DECREF(py_args); - - if (py_value != NULL) { - *retlen = PyByteArray_Size(py_value); - *ret = malloc(*retlen); - memcpy(*ret, PyByteArray_AsString(py_value), *retlen); - Py_DECREF(py_value); - } else { - PyErr_Print(); - FATAL("Call failed"); - } -} - -#endif /* USE_PYTHON */ - +#include "afl-fuzz.h" int select_algorithm(void) { @@ -699,32 +73,6 @@ static u64 get_cur_time_us(void) { } -/* Generate a random number (from 0 to limit - 1). This may - have slight bias. */ - -static inline u32 UR(u32 limit) { -#ifdef HAVE_ARC4RANDOM - if (fixed_seed) { - return random() % limit; - } - - /* The boundary not being necessarily a power of 2, - we need to ensure the result uniformity. */ - return arc4random_uniform(limit); -#else - if (!fixed_seed && unlikely(!rand_cnt--)) { - u32 seed[2]; - - ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom"); - srandom(seed[0]); - rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG); - } - - return random() % limit; -#endif -} - - /* Shuffle an array of pointers. Might be slightly biased. */ static void shuffle_ptrs(void** ptrs, u32 cnt) { @@ -1166,7 +514,7 @@ static void add_to_queue(u8* fname, u32 len, u8 passed_det) { /* Destroy the entire queue. */ -EXP_ST void destroy_queue(void) { +void destroy_queue(void) { struct queue_entry *q = queue, *n; @@ -1187,7 +535,7 @@ EXP_ST void destroy_queue(void) { -B option, to focus a separate fuzzing session on a particular interesting input without rediscovering all the others. */ -EXP_ST void write_bitmap(void) { +void write_bitmap(void) { u8* fname; s32 fd; @@ -1210,7 +558,7 @@ EXP_ST void write_bitmap(void) { /* Read bitmap from file. This is for the -B option again. */ -EXP_ST void read_bitmap(u8* fname) { +void read_bitmap(u8* fname) { s32 fd = open(fname, O_RDONLY); @@ -1484,7 +832,7 @@ static const u8 count_class_lookup8[256] = { static u16 count_class_lookup16[65536]; -EXP_ST void init_count_class16(void) { +void init_count_class16(void) { u32 b1, b2; @@ -4780,7 +4128,7 @@ abort_trimming: error conditions, returning 1 if it's time to bail out. This is a helper function for fuzz_one(). */ -EXP_ST u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) { +u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) { u8 fault; @@ -10769,7 +10117,7 @@ static void handle_skipreq(int sig) { isn't a shell script - a common and painful mistake. We also check for a valid ELF header and for evidence of AFL instrumentation. */ -EXP_ST void check_binary(u8* fname) { +void check_binary(u8* fname) { u8* env_path = 0; struct stat st; @@ -11069,7 +10417,7 @@ static void usage(u8* argv0) { /* Prepare output directories and fds. */ -EXP_ST void setup_dirs_fds(void) { +void setup_dirs_fds(void) { u8* tmp; s32 fd; @@ -11218,7 +10566,7 @@ static void setup_cmdline_file(char** argv) { /* Setup the output file for fuzzed data, if not using -f. */ -EXP_ST void setup_stdio_file(void) { +void setup_stdio_file(void) { u8* fn; if (file_extension) { @@ -11545,7 +10893,7 @@ static void check_asan_opts(void) { Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call siginterrupt(), and does other stupid things. */ -EXP_ST void setup_signal_handlers(void) { +void setup_signal_handlers(void) { struct sigaction sa; From 4f3c417753c7ff40023fcbb2958eb6109ebdd575 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Fri, 30 Aug 2019 13:10:04 +0200 Subject: [PATCH 57/83] remave the afl-fuzz folder to afl-fuzz-src due to gitignore --- .gitignore | 1 - Makefile | 2 +- src/{afl-fuzz => afl-fuzz-src}/afl-fuzz.c | 0 src/afl-fuzz-src/globals.c | 282 ++++++++++++++++++++++ src/afl-fuzz-src/python.c | 223 +++++++++++++++++ 5 files changed, 506 insertions(+), 2 deletions(-) rename src/{afl-fuzz => afl-fuzz-src}/afl-fuzz.c (100%) create mode 100644 src/afl-fuzz-src/globals.c create mode 100644 src/afl-fuzz-src/python.c diff --git a/.gitignore b/.gitignore index 2ee40f62..e4d2346e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ *.o *.so -.gitignore afl-analyze afl-as afl-clang diff --git a/Makefile b/Makefile index 850a6a80..0efdddd1 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I include/ \ -DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \ -DBIN_PATH=\"$(BIN_PATH)\" -AFL_FUZZ_FILES = $(wildcard src/afl-fuzz/*.c) +AFL_FUZZ_FILES = $(wildcard src/afl-fuzz-src/*.c) PYTHON_INCLUDE ?= /usr/include/python2.7 diff --git a/src/afl-fuzz/afl-fuzz.c b/src/afl-fuzz-src/afl-fuzz.c similarity index 100% rename from src/afl-fuzz/afl-fuzz.c rename to src/afl-fuzz-src/afl-fuzz.c diff --git a/src/afl-fuzz-src/globals.c b/src/afl-fuzz-src/globals.c new file mode 100644 index 00000000..127d7609 --- /dev/null +++ b/src/afl-fuzz-src/globals.c @@ -0,0 +1,282 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + +/* MOpt: + Lots of globals, but mostly for the status UI and other things where it + really makes no sense to haul them around as function parameters. */ +u64 limit_time_puppet, + orig_hit_cnt_puppet, + last_limit_time_start, + tmp_pilot_time, + total_pacemaker_time, + total_puppet_find, + temp_puppet_find, + most_time_key, + most_time, + most_execs_key, + most_execs, + old_hit_count; + +s32 SPLICE_CYCLES_puppet, + limit_time_sig, + key_puppet, + key_module; + +double w_init = 0.9, + w_end = 0.3, + w_now; + +s32 g_now; +s32 g_max = 5000; + +u64 tmp_core_time; +s32 swarm_now; + +double x_now[swarm_num][operator_num], + L_best[swarm_num][operator_num], + eff_best[swarm_num][operator_num], + G_best[operator_num], + v_now[swarm_num][operator_num], + probability_now[swarm_num][operator_num], + swarm_fitness[swarm_num]; + +u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per fuzz stage */ + stage_finds_puppet_v2[swarm_num][operator_num], + stage_cycles_puppet_v2[swarm_num][operator_num], + stage_cycles_puppet_v3[swarm_num][operator_num], + stage_cycles_puppet[swarm_num][operator_num], + operator_finds_puppet[operator_num], + core_operator_finds_puppet[operator_num], + core_operator_finds_puppet_v2[operator_num], + core_operator_cycles_puppet[operator_num], + core_operator_cycles_puppet_v2[operator_num], + core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ + +double period_pilot_tmp = 5000.0; +s32 key_lv; + +u8 *in_dir, /* Input directory with test cases */ + *out_dir, /* Working & output directory */ + *tmp_dir , /* Temporary directory for input */ + *sync_dir, /* Synchronization directory */ + *sync_id, /* Fuzzer ID */ + *power_name, /* Power schedule name */ + *use_banner, /* Display banner */ + *in_bitmap, /* Input bitmap */ + *file_extension, /* File extension */ + *orig_cmdline; /* Original command line */ +u8 *doc_path, /* Path to documentation dir */ + *target_path, /* Path to target binary */ + *out_file; /* File to fuzz, if any */ + +u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */ +u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */ + +u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */ + +u8 cal_cycles = CAL_CYCLES, /* Calibration cycles defaults */ + cal_cycles_long = CAL_CYCLES_LONG, + debug, /* Debug mode */ + python_only; /* Python-only mode */ + +u32 stats_update_freq = 1; /* Stats update frequency (execs) */ + +char *power_names[POWER_SCHEDULES_NUM] = { + "explore", + "fast", + "coe", + "lin", + "quad", + "exploit" +}; + +u8 schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/ +u8 havoc_max_mult = HAVOC_MAX_MULT; + +u8 skip_deterministic, /* Skip deterministic stages? */ + force_deterministic, /* Force deterministic stages? */ + use_splicing, /* Recombine input files? */ + dumb_mode, /* Run in non-instrumented mode? */ + score_changed, /* Scoring for favorites changed? */ + kill_signal, /* Signal that killed the child */ + resuming_fuzz, /* Resuming an older fuzzing job? */ + timeout_given, /* Specific timeout given? */ + not_on_tty, /* stdout is not a tty */ + term_too_small, /* terminal dimensions too small */ + no_forkserver, /* Disable forkserver? */ + crash_mode, /* Crash mode! Yeah! */ + in_place_resume, /* Attempt in-place resume? */ + auto_changed, /* Auto-generated tokens changed? */ + no_cpu_meter_red, /* Feng shui on the status screen */ + no_arith, /* Skip most arithmetic ops */ + shuffle_queue, /* Shuffle input queue? */ + bitmap_changed = 1, /* Time to update bitmap? */ + qemu_mode, /* Running in QEMU mode? */ + unicorn_mode, /* Running in Unicorn mode? */ + skip_requested, /* Skip request, via SIGUSR1 */ + run_over10m, /* Run time over 10 minutes? */ + persistent_mode, /* Running in persistent mode? */ + deferred_mode, /* Deferred forkserver mode? */ + fixed_seed, /* do not reseed */ + fast_cal, /* Try to calibrate faster? */ + uses_asan; /* Target uses ASAN? */ + +s32 out_fd, /* Persistent fd for out_file */ +#ifndef HAVE_ARC4RANDOM + dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ +#endif + dev_null_fd = -1, /* Persistent fd for /dev/null */ + fsrv_ctl_fd, /* Fork server control pipe (write) */ + fsrv_st_fd; /* Fork server status pipe (read) */ + + s32 forksrv_pid, /* PID of the fork server */ + child_pid = -1, /* PID of the fuzzed program */ + out_dir_fd = -1; /* FD of the lock file */ + + u8* trace_bits; /* SHM with instrumentation bitmap */ + +u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ + virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ + virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ + +u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ + +volatile u8 stop_soon, /* Ctrl-C pressed? */ + clear_screen = 1, /* Window resized? */ + child_timed_out; /* Traced process timed out? */ + +u32 queued_paths, /* Total number of queued testcases */ + queued_variable, /* Testcases with variable behavior */ + queued_at_start, /* Total number of initial inputs */ + queued_discovered, /* Items discovered during this run */ + queued_imported, /* Items imported via -S */ + queued_favored, /* Paths deemed favorable */ + queued_with_cov, /* Paths with new coverage bytes */ + pending_not_fuzzed, /* Queued but not done yet */ + pending_favored, /* Pending favored paths */ + cur_skipped_paths, /* Abandoned inputs in cur cycle */ + cur_depth, /* Current path depth */ + max_depth, /* Max path depth */ + useless_at_start, /* Number of useless starting paths */ + var_byte_count, /* Bitmap bytes with var behavior */ + current_entry, /* Current queue entry ID */ + havoc_div = 1; /* Cycle count divisor for havoc */ + +u64 total_crashes, /* Total number of crashes */ + unique_crashes, /* Crashes with unique signatures */ + total_tmouts, /* Total number of timeouts */ + unique_tmouts, /* Timeouts with unique signatures */ + unique_hangs, /* Hangs with unique signatures */ + total_execs, /* Total execve() calls */ + start_time, /* Unix start time (ms) */ + last_path_time, /* Time for most recent path (ms) */ + last_crash_time, /* Time for most recent crash (ms) */ + last_hang_time, /* Time for most recent hang (ms) */ + last_crash_execs, /* Exec counter at last crash */ + queue_cycle, /* Queue round counter */ + cycles_wo_finds, /* Cycles without any new paths */ + trim_execs, /* Execs done to trim input files */ + bytes_trim_in, /* Bytes coming into the trimmer */ + bytes_trim_out, /* Bytes coming outa the trimmer */ + blocks_eff_total, /* Blocks subject to effector maps */ + blocks_eff_select; /* Blocks selected as fuzzable */ + +u32 subseq_tmouts; /* Number of timeouts in a row */ + +u8 *stage_name = "init", /* Name of the current fuzz stage */ + *stage_short, /* Short stage name */ + *syncing_party; /* Currently syncing with... */ + +s32 stage_cur, stage_max; /* Stage progression */ +s32 splicing_with = -1; /* Splicing with which test case? */ + +u32 master_id, master_max; /* Master instance job splitting */ + +u32 syncing_case; /* Syncing with case #... */ + +s32 stage_cur_byte, /* Byte offset of current stage op */ + stage_cur_val; /* Value used for stage op */ + +u8 stage_val_type; /* Value type (STAGE_VAL_*) */ + +u64 stage_finds[32], /* Patterns found per fuzz stage */ + stage_cycles[32]; /* Execs per fuzz stage */ + +#ifndef HAVE_ARC4RANDOM +u32 rand_cnt; /* Random number counter */ +#endif + +u64 total_cal_us, /* Total calibration time (us) */ + total_cal_cycles; /* Total calibration cycles */ + +u64 total_bitmap_size, /* Total bit count for all bitmaps */ + total_bitmap_entries; /* Number of bitmaps counted */ + +s32 cpu_core_count; /* CPU core count */ + +#ifdef HAVE_AFFINITY + +s32 cpu_aff = -1; /* Selected CPU core */ + +#endif /* HAVE_AFFINITY */ + +FILE* plot_file; /* Gnuplot output file */ + + + +struct queue_entry *queue, /* Fuzzing queue (linked list) */ + *queue_cur, /* Current offset within the queue */ + *queue_top, /* Top of the list */ + *q_prev100; /* Previous 100 marker */ + +struct queue_entry* + top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ + +struct extra_data* extras; /* Extra tokens to fuzz with */ +u32 extras_cnt; /* Total number of tokens read */ + +struct extra_data* a_extras; /* Automatically selected extras */ +u32 a_extras_cnt; /* Total number of tokens available */ + +u8* (*post_handler)(u8* buf, u32* len); + +/* hooks for the custom mutator function */ +size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed); +size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data); + + +/* Interesting values, as per config.h */ + +s8 interesting_8[] = { INTERESTING_8 }; +s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 }; +s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 }; + +/* Python stuff */ +#ifdef USE_PYTHON + +PyObject *py_module; +PyObject *py_functions[PY_FUNC_COUNT]; + +#endif + diff --git a/src/afl-fuzz-src/python.c b/src/afl-fuzz-src/python.c new file mode 100644 index 00000000..74ffc524 --- /dev/null +++ b/src/afl-fuzz-src/python.c @@ -0,0 +1,223 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + +/* Python stuff */ +#ifdef USE_PYTHON + +int init_py() { + Py_Initialize(); + u8* module_name = getenv("AFL_PYTHON_MODULE"); + + if (module_name) { + PyObject* py_name = PyString_FromString(module_name); + + py_module = PyImport_Import(py_name); + Py_DECREF(py_name); + + if (py_module != NULL) { + u8 py_notrim = 0; + py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init"); + py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz"); + py_functions[PY_FUNC_INIT_TRIM] = PyObject_GetAttrString(py_module, "init_trim"); + py_functions[PY_FUNC_POST_TRIM] = PyObject_GetAttrString(py_module, "post_trim"); + py_functions[PY_FUNC_TRIM] = PyObject_GetAttrString(py_module, "trim"); + + for (u8 py_idx = 0; py_idx < PY_FUNC_COUNT; ++py_idx) { + if (!py_functions[py_idx] || !PyCallable_Check(py_functions[py_idx])) { + if (py_idx >= PY_FUNC_INIT_TRIM && py_idx <= PY_FUNC_TRIM) { + // Implementing the trim API is optional for now + if (PyErr_Occurred()) + PyErr_Print(); + py_notrim = 1; + } else { + if (PyErr_Occurred()) + PyErr_Print(); + fprintf(stderr, "Cannot find/call function with index %d in external Python module.\n", py_idx); + return 1; + } + } + + } + + if (py_notrim) { + py_functions[PY_FUNC_INIT_TRIM] = NULL; + py_functions[PY_FUNC_POST_TRIM] = NULL; + py_functions[PY_FUNC_TRIM] = NULL; + WARNF("Python module does not implement trim API, standard trimming will be used."); + } + + PyObject *py_args, *py_value; + + /* Provide the init function a seed for the Python RNG */ + py_args = PyTuple_New(1); + py_value = PyInt_FromLong(UR(0xFFFFFFFF)); + if (!py_value) { + Py_DECREF(py_args); + fprintf(stderr, "Cannot convert argument\n"); + return 1; + } + + PyTuple_SetItem(py_args, 0, py_value); + + py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT], py_args); + + Py_DECREF(py_args); + + if (py_value == NULL) { + PyErr_Print(); + fprintf(stderr,"Call failed\n"); + return 1; + } + } else { + PyErr_Print(); + fprintf(stderr, "Failed to load \"%s\"\n", module_name); + return 1; + } + } + + return 0; +} + +void finalize_py() { + if (py_module != NULL) { + u32 i; + for (i = 0; i < PY_FUNC_COUNT; ++i) + Py_XDECREF(py_functions[i]); + + Py_DECREF(py_module); + } + + Py_Finalize(); +} + +void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** ret, size_t* retlen) { + + if (py_module != NULL) { + PyObject *py_args, *py_value; + py_args = PyTuple_New(2); + py_value = PyByteArray_FromStringAndSize(buf, buflen); + if (!py_value) { + Py_DECREF(py_args); + fprintf(stderr, "Cannot convert argument\n"); + return; + } + + PyTuple_SetItem(py_args, 0, py_value); + + py_value = PyByteArray_FromStringAndSize(add_buf, add_buflen); + if (!py_value) { + Py_DECREF(py_args); + fprintf(stderr, "Cannot convert argument\n"); + return; + } + + PyTuple_SetItem(py_args, 1, py_value); + + py_value = PyObject_CallObject(py_functions[PY_FUNC_FUZZ], py_args); + + Py_DECREF(py_args); + + if (py_value != NULL) { + *retlen = PyByteArray_Size(py_value); + *ret = malloc(*retlen); + memcpy(*ret, PyByteArray_AsString(py_value), *retlen); + Py_DECREF(py_value); + } else { + PyErr_Print(); + fprintf(stderr,"Call failed\n"); + return; + } + } +} + +u32 init_trim_py(char* buf, size_t buflen) { + PyObject *py_args, *py_value; + + py_args = PyTuple_New(1); + py_value = PyByteArray_FromStringAndSize(buf, buflen); + if (!py_value) { + Py_DECREF(py_args); + FATAL("Failed to convert arguments"); + } + + PyTuple_SetItem(py_args, 0, py_value); + + py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT_TRIM], py_args); + Py_DECREF(py_args); + + if (py_value != NULL) { + u32 retcnt = PyInt_AsLong(py_value); + Py_DECREF(py_value); + return retcnt; + } else { + PyErr_Print(); + FATAL("Call failed"); + } +} + +u32 post_trim_py(char success) { + PyObject *py_args, *py_value; + + py_args = PyTuple_New(1); + + py_value = PyBool_FromLong(success); + if (!py_value) { + Py_DECREF(py_args); + FATAL("Failed to convert arguments"); + } + + PyTuple_SetItem(py_args, 0, py_value); + + py_value = PyObject_CallObject(py_functions[PY_FUNC_POST_TRIM], py_args); + Py_DECREF(py_args); + + if (py_value != NULL) { + u32 retcnt = PyInt_AsLong(py_value); + Py_DECREF(py_value); + return retcnt; + } else { + PyErr_Print(); + FATAL("Call failed"); + } +} + +void trim_py(char** ret, size_t* retlen) { + PyObject *py_args, *py_value; + + py_args = PyTuple_New(0); + py_value = PyObject_CallObject(py_functions[PY_FUNC_TRIM], py_args); + Py_DECREF(py_args); + + if (py_value != NULL) { + *retlen = PyByteArray_Size(py_value); + *ret = malloc(*retlen); + memcpy(*ret, PyByteArray_AsString(py_value), *retlen); + Py_DECREF(py_value); + } else { + PyErr_Print(); + FATAL("Call failed"); + } +} + +#endif /* USE_PYTHON */ From 500a378fdf8664aea42f557f60c9842bb15f06a0 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Sat, 31 Aug 2019 11:23:48 +0200 Subject: [PATCH 58/83] modernize some readmes --- libdislocator/{README.dislocator => README.md} | 6 +++--- libtokencap/{README.tokencap => README.md} | 12 +++++++----- qemu_mode/libcompcov/{README.compcov => README.md} | 8 ++++---- 3 files changed, 14 insertions(+), 12 deletions(-) rename libdislocator/{README.dislocator => README.md} (95%) rename libtokencap/{README.tokencap => README.md} (90%) rename qemu_mode/libcompcov/{README.compcov => README.md} (84%) diff --git a/libdislocator/README.dislocator b/libdislocator/README.md similarity index 95% rename from libdislocator/README.dislocator rename to libdislocator/README.md index 837e7466..5d5a1464 100644 --- a/libdislocator/README.dislocator +++ b/libdislocator/README.md @@ -1,6 +1,4 @@ -=================================== -libdislocator, an abusive allocator -=================================== +# libdislocator, an abusive allocator (See ../docs/README for the general instruction manual.) @@ -45,7 +43,9 @@ when fuzzing small, self-contained binaries. To use this library, run AFL like so: +``` AFL_PRELOAD=/path/to/libdislocator.so ./afl-fuzz [...other params...] +``` You *have* to specify path, even if it's just ./libdislocator.so or $PWD/libdislocator.so. diff --git a/libtokencap/README.tokencap b/libtokencap/README.md similarity index 90% rename from libtokencap/README.tokencap rename to libtokencap/README.md index 650739f2..baf69da1 100644 --- a/libtokencap/README.tokencap +++ b/libtokencap/README.md @@ -1,10 +1,8 @@ -========================================= -strcmp() / memcmp() token capture library -========================================= +# strcmp() / memcmp() token capture library (See ../docs/README for the general instruction manual.) -This Linux-only companion library allows you to instrument strcmp(), memcmp(), +This Linux-only companion library allows you to instrument `strcmp()`, `memcmp()`, and related functions to automatically extract syntax tokens passed to any of these libcalls. The resulting list of tokens may be then given as a starting dictionary to afl-fuzz (the -x option) to improve coverage on subsequent @@ -31,15 +29,18 @@ with -fno-builtin and is linked dynamically. If you wish to automate the first part without mucking with CFLAGS in Makefiles, you can set AFL_NO_BUILTIN=1 when using afl-gcc. This setting specifically adds the following flags: +``` -fno-builtin-strcmp -fno-builtin-strncmp -fno-builtin-strcasecmp -fno-builtin-strcasencmp -fno-builtin-memcmp -fno-builtin-strstr -fno-builtin-strcasestr +``` The next step is simply loading this library via LD_PRELOAD. The optimal usage pattern is to allow afl-fuzz to fuzz normally for a while and build up a corpus, and then fire off the target binary, with libtokencap.so loaded, on every file found by AFL in that earlier run. This demonstrates the basic principle: +``` export AFL_TOKEN_FILE=$PWD/temp_output.txt for i in /queue/id*; do @@ -48,6 +49,7 @@ found by AFL in that earlier run. This demonstrates the basic principle: done sort -u temp_output.txt >afl_dictionary.txt +``` If you don't get any results, the target library is probably not using strcmp() and memcmp() to parse input; or you haven't compiled it with -fno-builtin; or @@ -55,7 +57,7 @@ the whole thing isn't dynamically linked, and LD_PRELOAD is having no effect. PS. The library is Linux-only because there is probably no particularly portable and non-invasive way to distinguish between read-only and read-write memory -mappings. The __tokencap_load_mappings() function is the only thing that would +mappings. The `__tokencap_load_mappings()` function is the only thing that would need to be changed for other OSes. Porting to platforms with /proc//maps (e.g., FreeBSD) should be trivial. diff --git a/qemu_mode/libcompcov/README.compcov b/qemu_mode/libcompcov/README.md similarity index 84% rename from qemu_mode/libcompcov/README.compcov rename to qemu_mode/libcompcov/README.md index 9be13d88..5aaa3dd8 100644 --- a/qemu_mode/libcompcov/README.compcov +++ b/qemu_mode/libcompcov/README.md @@ -1,10 +1,8 @@ -================================================================ -strcmp() / memcmp() CompareCoverage library for AFLplusplus-QEMU -================================================================ +# strcmp() / memcmp() CompareCoverage library for afl++ QEMU Written by Andrea Fioraldi -This Linux-only companion library allows you to instrument strcmp(), memcmp(), +This Linux-only companion library allows you to instrument `strcmp()`, `memcmp()`, and related functions to log the CompareCoverage of these libcalls. Use this with caution. While this can speedup a lot the bypass of hard @@ -22,10 +20,12 @@ library and QEMU but build it with afl-clang-fast and the laf-intel options. To use this library make sure to preload it with AFL_PRELOAD. +``` export AFL_PRELOAD=/path/to/libcompcov.so export AFL_COMPCOV_LEVEL=1 afl-fuzz -Q -i input -o output -- +``` The AFL_COMPCOV_LEVEL tells to QEMU and libcompcov how to log comaprisons. Level 1 logs just comparison with immediates / read-only memory and level 2 From 659037eef53efc539a077331e52cd2657114d437 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Sat, 31 Aug 2019 11:31:51 +0200 Subject: [PATCH 59/83] modernize llvm_mode readmes --- .../{README.instrim => README.instrim.md} | 8 ++-- llvm_mode/README.laf-intel | 20 ---------- llvm_mode/README.laf-intel.md | 25 +++++++++++++ llvm_mode/{README.llvm => README.md} | 37 +++++++++---------- .../{README.neverzero => README.neverzero.md} | 8 ++-- .../{README.whitelist => README.whitelist.md} | 20 +++++----- 6 files changed, 61 insertions(+), 57 deletions(-) rename llvm_mode/{README.instrim => README.instrim.md} (86%) delete mode 100644 llvm_mode/README.laf-intel create mode 100644 llvm_mode/README.laf-intel.md rename llvm_mode/{README.llvm => README.md} (93%) rename llvm_mode/{README.neverzero => README.neverzero.md} (92%) rename llvm_mode/{README.whitelist => README.whitelist.md} (88%) diff --git a/llvm_mode/README.instrim b/llvm_mode/README.instrim.md similarity index 86% rename from llvm_mode/README.instrim rename to llvm_mode/README.instrim.md index 956a9856..e5e3614d 100644 --- a/llvm_mode/README.instrim +++ b/llvm_mode/README.instrim.md @@ -1,6 +1,6 @@ # InsTrim -InsTrim: Lightweight Instrumentation for Coverage-guided Fuzzing +InsTrim: Lightweight Instrumentation for Coverage-guided Fuzzing ## Introduction @@ -8,17 +8,15 @@ InsTrim uses CFG and markers to instrument just what is necessary in the binary in llvm_mode. It is about 20-25% faster but as a cost has a lower path discovery. - ## Usage -Set the environment variable AFL_LLVM_INSTRIM=1 +Set the environment variable `AFL_LLVM_INSTRIM=1`. There is also an advanced mode which instruments loops in a way so that afl-fuzz can see which loop path has been selected but not being able to see how often the loop has been rerun. This again is a tradeoff for speed for less path information. -To enable this mode set AFL_LLVM_INSTRIM_LOOPHEAD=1 - +To enable this mode set `AFL_LLVM_INSTRIM_LOOPHEAD=1`. ## Background diff --git a/llvm_mode/README.laf-intel b/llvm_mode/README.laf-intel deleted file mode 100644 index 340216c3..00000000 --- a/llvm_mode/README.laf-intel +++ /dev/null @@ -1,20 +0,0 @@ -Usage -===== - -By default the passes will not run when you compile programs using -afl-clang-fast. Hence, you can use AFL as usual. -To enable the passes you must set environment variables before you -compile the target project. - -The following options exist: - -export AFL_LLVM_LAF_SPLIT_SWITCHES=1 Enables the split-switches pass. - -export AFL_LLVM_LAF_TRANSFORM_COMPARES=1 Enables the transform-compares pass - (strcmp, memcmp, strncmp, strcasecmp, strncasecmp). - -export AFL_LLVM_LAF_SPLIT_COMPARES=1 Enables the split-compares pass. - By default it will split all compares with a bit width <= 64 bits. - You can change this behaviour by setting - export AFL_LLVM_LAF_SPLIT_COMPARES_BITW=. - diff --git a/llvm_mode/README.laf-intel.md b/llvm_mode/README.laf-intel.md new file mode 100644 index 00000000..d51c7e2f --- /dev/null +++ b/llvm_mode/README.laf-intel.md @@ -0,0 +1,25 @@ +# laf-intel instrumentation + +## Usage + +By default the passes will not run when you compile programs using +afl-clang-fast. Hence, you can use AFL as usual. +To enable the passes you must set environment variables before you +compile the target project. + +The following options exist: + +`export AFL_LLVM_LAF_SPLIT_SWITCHES=1` + +Enables the split-switches pass. + +`export AFL_LLVM_LAF_TRANSFORM_COMPARES=1` + +Enables the transform-compares pass (strcmp, memcmp, strncmp, strcasecmp, strncasecmp). + +`export AFL_LLVM_LAF_SPLIT_COMPARES=1` + +Enables the split-compares pass. +By default it will split all compares with a bit width <= 64 bits. +You can change this behaviour by setting `export AFL_LLVM_LAF_SPLIT_COMPARES_BITW=`. + diff --git a/llvm_mode/README.llvm b/llvm_mode/README.md similarity index 93% rename from llvm_mode/README.llvm rename to llvm_mode/README.md index 9bb091ac..c7ef4b45 100644 --- a/llvm_mode/README.llvm +++ b/llvm_mode/README.md @@ -1,12 +1,9 @@ -============================================ -Fast LLVM-based instrumentation for afl-fuzz -============================================ +# Fast LLVM-based instrumentation for afl-fuzz (See ../docs/README for the general instruction manual.) (See ../gcc_plugin/README.gcc for the GCC-based instrumentation.) -1) Introduction ---------------- +## 1) Introduction ! llvm_mode works with llvm versions 3.8.0 up to 9 ! @@ -38,8 +35,7 @@ co-exists with the original code. The idea and much of the implementation comes from Laszlo Szekeres. -2) How to use this ------------------- +## 2) How to use this In order to leverage this mechanism, you need to have clang installed on your system. You should also make sure that the llvm-config tool is in your path @@ -63,8 +59,10 @@ called afl-clang-fast and afl-clang-fast++ in the parent directory. Once this is done, you can instrument third-party code in a way similar to the standard operating mode of AFL, e.g.: +``` CC=/path/to/afl/afl-clang-fast ./configure [...options...] make +``` Be sure to also include CXX set to afl-clang-fast++ for C++ code. @@ -78,7 +76,7 @@ Note: if you want the LLVM helper to be installed on your system for all users, you need to build it before issuing 'make install' in the parent directory. -3) Options +## 3) Options Several options are present to make llvm_mode faster or help it rearrange the code to make afl-fuzz path discovery easier. @@ -101,15 +99,12 @@ is not optimal and was only fixed in llvm 9. You can set this with AFL_LLVM_NOT_ZERO=1 See README.neverzero - -4) Gotchas, feedback, bugs --------------------------- +## 4) Gotchas, feedback, bugs This is an early-stage mechanism, so field reports are welcome. You can send bug reports to . -5) Bonus feature #1: deferred initialization --------------------------------------------- +## 5) Bonus feature #1: deferred initialization AFL tries to optimize performance by executing the targeted binary just once, stopping it just before main(), and then cloning this "master" process to get @@ -145,9 +140,11 @@ a location after: With the location selected, add this code in the appropriate spot: +```c #ifdef __AFL_HAVE_MANUAL_CONTROL __AFL_INIT(); #endif +``` You don't need the #ifdef guards, but including them ensures that the program will keep working normally when compiled with a tool other than afl-clang-fast. @@ -155,8 +152,7 @@ will keep working normally when compiled with a tool other than afl-clang-fast. Finally, recompile the program with afl-clang-fast (afl-gcc or afl-clang will *not* generate a deferred-initialization binary) - and you should be all set! -6) Bonus feature #2: persistent mode ------------------------------------- +## 6) Bonus feature #2: persistent mode Some libraries provide APIs that are stateless, or whose state can be reset in between processing different input files. When such a reset is performed, a @@ -165,6 +161,7 @@ eliminating the need for repeated fork() calls and the associated OS overhead. The basic structure of the program that does this would be: +```c while (__AFL_LOOP(1000)) { /* Read input data. */ @@ -174,6 +171,7 @@ The basic structure of the program that does this would be: } /* Exit normally */ +``` The numerical value specified within the loop controls the maximum number of iterations before AFL will restart the process from scratch. This minimizes @@ -182,8 +180,8 @@ and going much higher increases the likelihood of hiccups without giving you any real performance benefits. A more detailed template is shown in ../experimental/persistent_demo/. -Similarly to the previous mode, the feature works only with afl-clang-fast; -#ifdef guards can be used to suppress it when using other compilers. +Similarly to the previous mode, the feature works only with afl-clang-fast; #ifdef +guards can be used to suppress it when using other compilers. Note that as with the previous mode, the feature is easy to misuse; if you do not fully reset the critical state, you may end up with false positives or @@ -195,8 +193,7 @@ PS. Because there are task switches still involved, the mode isn't as fast as faster than the normal fork() model, and compared to in-process fuzzing, should be a lot more robust. -8) Bonus feature #3: new 'trace-pc-guard' mode ----------------------------------------------- +## 8) Bonus feature #3: new 'trace-pc-guard' mode Recent versions of LLVM are shipping with a built-in execution tracing feature that provides AFL with the necessary tracing data without the need to @@ -207,7 +204,9 @@ post-process the assembly or install any compiler plugins. See: If you have a sufficiently recent compiler and want to give it a try, build afl-clang-fast this way: +``` AFL_TRACE_PC=1 make clean all +``` Note that this mode is currently about 20% slower than "vanilla" afl-clang-fast, and about 5-10% slower than afl-clang. This is likely because the diff --git a/llvm_mode/README.neverzero b/llvm_mode/README.neverzero.md similarity index 92% rename from llvm_mode/README.neverzero rename to llvm_mode/README.neverzero.md index ef873acb..5fcf7b47 100644 --- a/llvm_mode/README.neverzero +++ b/llvm_mode/README.neverzero.md @@ -1,5 +1,6 @@ -Usage -===== +# NeverZero counters for LLVM instrumentation + +## Usage In larger, complex or reiterative programs the map that collects the edge pairs can easily fill up and wrap. @@ -18,5 +19,6 @@ in version 9 and onwards. If you want to enable this for llvm < 9 then set +``` export AFL_LLVM_NOT_ZERO=1 - +``` diff --git a/llvm_mode/README.whitelist b/llvm_mode/README.whitelist.md similarity index 88% rename from llvm_mode/README.whitelist rename to llvm_mode/README.whitelist.md index ae044749..5aededba 100644 --- a/llvm_mode/README.whitelist +++ b/llvm_mode/README.whitelist.md @@ -1,6 +1,4 @@ -======================================== -Using afl++ with partial instrumentation -======================================== +# Using afl++ with partial instrumentation This file describes how you can selectively instrument only the source files that are interesting to you using the LLVM instrumentation provided by @@ -8,9 +6,7 @@ Using afl++ with partial instrumentation Originally developed by Christian Holler (:decoder) . - -1) Description and purpose --------------------------- +## 1) Description and purpose When building and testing complex programs where only a part of the program is the fuzzing target, it often helps to only instrument the necessary parts of @@ -23,15 +19,13 @@ mode of AFLFuzz that allows you to specify on a source file level which files should be compiled with or without instrumentation. -2) Building the LLVM module ---------------------------- +## 2) Building the LLVM module The new code is part of the existing afl++ LLVM module in the llvm_mode/ subdirectory. There is nothing specifically to do :) -3) How to use the partial instrumentation mode ----------------------------------------------- +## 3) How to use the partial instrumentation mode In order to build with partial instrumentation, you need to build with afl-clang-fast and afl-clang-fast++ respectively. The only required change is @@ -45,21 +39,27 @@ matching when absolute paths are used during compilation). For example if your source tree looks like this: +``` project/ project/feature_a/a1.cpp project/feature_a/a2.cpp project/feature_b/b1.cpp project/feature_b/b2.cpp +``` And you only want to test feature_a, then create a whitelist file containing: +``` feature_a/a1.cpp feature_a/a2.cpp +``` However if the whitelist file contains this, it works as well: +``` a1.cpp a2.cpp +``` but it might lead to files being unwantedly instrumented if the same filename exists somewhere else in the project. From c124576a4dc00e31ad5cad118098f46eaa29cd17 Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Sun, 1 Sep 2019 17:47:14 +0200 Subject: [PATCH 60/83] change text color in FATAL, ABORT and PFATAL macros for the actual message to avoid white text on white background (as is standard in plain X11 xterm). Now the text will be printed in default text color (which should be always readable) --- debug.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/debug.h b/debug.h index a943a573..c0044280 100644 --- a/debug.h +++ b/debug.h @@ -198,7 +198,7 @@ #define FATAL(x...) do { \ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \ - cBRI x); \ + cRST x); \ SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", \ __FUNCTION__, __FILE__, __LINE__); \ exit(1); \ @@ -208,7 +208,7 @@ #define ABORT(x...) do { \ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \ - cBRI x); \ + cRST x); \ SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", \ __FUNCTION__, __FILE__, __LINE__); \ abort(); \ @@ -219,7 +219,7 @@ #define PFATAL(x...) do { \ fflush(stdout); \ SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] SYSTEM ERROR : " \ - cBRI x); \ + cRST x); \ SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", \ __FUNCTION__, __FILE__, __LINE__); \ SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \ From 3b3df4e3cb0ce3e6ea728b68694b579e15cd00f7 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Sun, 1 Sep 2019 20:34:20 +0200 Subject: [PATCH 61/83] afl-fuzz-src bitmap and queue C files --- Makefile | 2 +- include/afl-fuzz.h | 66 ++++ src/afl-fuzz-src/afl-fuzz.c | 691 ------------------------------------ src/afl-fuzz-src/bitmap.c | 410 +++++++++++++++++++++ src/afl-fuzz-src/misc.c | 24 ++ src/afl-fuzz-src/queue.c | 286 +++++++++++++++ 6 files changed, 787 insertions(+), 692 deletions(-) create mode 100644 src/afl-fuzz-src/bitmap.c create mode 100644 src/afl-fuzz-src/misc.c create mode 100644 src/afl-fuzz-src/queue.c diff --git a/Makefile b/Makefile index 0efdddd1..14342129 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ SH_PROGS = afl-plot afl-cmin afl-whatsup afl-system-config CFLAGS ?= -O3 -funroll-loops CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I include/ \ -DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \ - -DBIN_PATH=\"$(BIN_PATH)\" + -DBIN_PATH=\"$(BIN_PATH)\" -Wno-unused-function AFL_FUZZ_FILES = $(wildcard src/afl-fuzz-src/*.c) diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index f243c7ba..22a78373 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -466,6 +466,34 @@ void trim_py(char**, size_t*); #endif +/* Queue */ + +void mark_as_det_done(struct queue_entry* q); +void mark_as_variable(struct queue_entry* q); +void mark_as_redundant(struct queue_entry* q, u8 state); +void add_to_queue(u8* fname, u32 len, u8 passed_det); +void destroy_queue(void); +void update_bitmap_score(struct queue_entry* q); +void cull_queue(void); + +/* Bitmap */ + +void write_bitmap(void); +void read_bitmap(u8* fname); +u8 has_new_bits(u8* virgin_map); +u32 count_bits(u8* mem); +u32 count_bytes(u8* mem); +u32 count_non_255_bytes(u8* mem); +#ifdef __x86_64__ +void simplify_trace(u64* mem); +void classify_counts(u64* mem); +#else +void simplify_trace(u32* mem); +void classify_counts(u32* mem); +#endif +void init_count_class16(void); +void minimize_bits(u8* dst, u8* src); + /**** Inline routines ****/ /* Generate a random number (from 0 to limit - 1). This may @@ -493,5 +521,43 @@ static inline u32 UR(u32 limit) { #endif } +/* Find first power of two greater or equal to val (assuming val under + 2^63). */ + +static u64 next_p2(u64 val) { + + u64 ret = 1; + while (val > ret) ret <<= 1; + return ret; + +} + +/* Get unix time in milliseconds */ + +static u64 get_cur_time(void) { + + struct timeval tv; + struct timezone tz; + + gettimeofday(&tv, &tz); + + return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000); + +} + + +/* Get unix time in microseconds */ + +static u64 get_cur_time_us(void) { + + struct timeval tv; + struct timezone tz; + + gettimeofday(&tv, &tz); + + return (tv.tv_sec * 1000000ULL) + tv.tv_usec; + +} + #endif diff --git a/src/afl-fuzz-src/afl-fuzz.c b/src/afl-fuzz-src/afl-fuzz.c index b93c17c8..dcb97387 100644 --- a/src/afl-fuzz-src/afl-fuzz.c +++ b/src/afl-fuzz-src/afl-fuzz.c @@ -45,34 +45,6 @@ int select_algorithm(void) { } -/* Get unix time in milliseconds */ - -static u64 get_cur_time(void) { - - struct timeval tv; - struct timezone tz; - - gettimeofday(&tv, &tz); - - return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000); - -} - - -/* Get unix time in microseconds */ - -static u64 get_cur_time_us(void) { - - struct timeval tv; - struct timezone tz; - - gettimeofday(&tv, &tz); - - return (tv.tv_sec * 1000000ULL) + tv.tv_usec; - -} - - /* Shuffle an array of pointers. Might be slightly biased. */ static void shuffle_ptrs(void** ptrs, u32 cnt) { @@ -393,669 +365,6 @@ static u8* DTD(u64 cur_ms, u64 event_ms) { } -/* Mark deterministic checks as done for a particular queue entry. We use the - .state file to avoid repeating deterministic fuzzing when resuming aborted - scans. */ - -static void mark_as_det_done(struct queue_entry* q) { - - u8* fn = strrchr(q->fname, '/'); - s32 fd; - - fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1); - - fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); - if (fd < 0) PFATAL("Unable to create '%s'", fn); - close(fd); - - ck_free(fn); - - q->passed_det = 1; - -} - - -/* Mark as variable. Create symlinks if possible to make it easier to examine - the files. */ - -static void mark_as_variable(struct queue_entry* q) { - - u8 *fn = strrchr(q->fname, '/') + 1, *ldest; - - ldest = alloc_printf("../../%s", fn); - fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn); - - if (symlink(ldest, fn)) { - - s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); - if (fd < 0) PFATAL("Unable to create '%s'", fn); - close(fd); - - } - - ck_free(ldest); - ck_free(fn); - - q->var_behavior = 1; - -} - - -/* Mark / unmark as redundant (edge-only). This is not used for restoring state, - but may be useful for post-processing datasets. */ - -static void mark_as_redundant(struct queue_entry* q, u8 state) { - - u8* fn; - - if (state == q->fs_redundant) return; - - q->fs_redundant = state; - - fn = strrchr(q->fname, '/'); - fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1); - - if (state) { - - s32 fd; - - fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); - if (fd < 0) PFATAL("Unable to create '%s'", fn); - close(fd); - - } else { - - if (unlink(fn)) PFATAL("Unable to remove '%s'", fn); - - } - - ck_free(fn); - -} - - -/* Append new test case to the queue. */ - -static void add_to_queue(u8* fname, u32 len, u8 passed_det) { - - struct queue_entry* q = ck_alloc(sizeof(struct queue_entry)); - - q->fname = fname; - q->len = len; - q->depth = cur_depth + 1; - q->passed_det = passed_det; - q->n_fuzz = 1; - - if (q->depth > max_depth) max_depth = q->depth; - - if (queue_top) { - - queue_top->next = q; - queue_top = q; - - } else q_prev100 = queue = queue_top = q; - - ++queued_paths; - ++pending_not_fuzzed; - - cycles_wo_finds = 0; - - if (!(queued_paths % 100)) { - - q_prev100->next_100 = q; - q_prev100 = q; - - } - - last_path_time = get_cur_time(); - -} - - -/* Destroy the entire queue. */ - -void destroy_queue(void) { - - struct queue_entry *q = queue, *n; - - while (q) { - - n = q->next; - ck_free(q->fname); - ck_free(q->trace_mini); - ck_free(q); - q = n; - - } - -} - - -/* Write bitmap to file. The bitmap is useful mostly for the secret - -B option, to focus a separate fuzzing session on a particular - interesting input without rediscovering all the others. */ - -void write_bitmap(void) { - - u8* fname; - s32 fd; - - if (!bitmap_changed) return; - bitmap_changed = 0; - - fname = alloc_printf("%s/fuzz_bitmap", out_dir); - fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600); - - if (fd < 0) PFATAL("Unable to open '%s'", fname); - - ck_write(fd, virgin_bits, MAP_SIZE, fname); - - close(fd); - ck_free(fname); - -} - - -/* Read bitmap from file. This is for the -B option again. */ - -void read_bitmap(u8* fname) { - - s32 fd = open(fname, O_RDONLY); - - if (fd < 0) PFATAL("Unable to open '%s'", fname); - - ck_read(fd, virgin_bits, MAP_SIZE, fname); - - close(fd); - -} - - -/* Check if the current execution path brings anything new to the table. - Update virgin bits to reflect the finds. Returns 1 if the only change is - the hit-count for a particular tuple; 2 if there are new tuples seen. - Updates the map, so subsequent calls will always return 0. - - This function is called after every exec() on a fairly large buffer, so - it needs to be fast. We do this in 32-bit and 64-bit flavors. */ - -static inline u8 has_new_bits(u8* virgin_map) { - -#ifdef __x86_64__ - - u64* current = (u64*)trace_bits; - u64* virgin = (u64*)virgin_map; - - u32 i = (MAP_SIZE >> 3); - -#else - - u32* current = (u32*)trace_bits; - u32* virgin = (u32*)virgin_map; - - u32 i = (MAP_SIZE >> 2); - -#endif /* ^__x86_64__ */ - - u8 ret = 0; - - while (i--) { - - /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap - that have not been already cleared from the virgin map - since this will - almost always be the case. */ - - if (unlikely(*current) && unlikely(*current & *virgin)) { - - if (likely(ret < 2)) { - - u8* cur = (u8*)current; - u8* vir = (u8*)virgin; - - /* Looks like we have not found any new bytes yet; see if any non-zero - bytes in current[] are pristine in virgin[]. */ - -#ifdef __x86_64__ - - if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || - (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) || - (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) || - (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2; - else ret = 1; - -#else - - if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || - (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2; - else ret = 1; - -#endif /* ^__x86_64__ */ - - } - - *virgin &= ~*current; - - } - - ++current; - ++virgin; - - } - - if (ret && virgin_map == virgin_bits) bitmap_changed = 1; - - return ret; - -} - - -/* Count the number of bits set in the provided bitmap. Used for the status - screen several times every second, does not have to be fast. */ - -static u32 count_bits(u8* mem) { - - u32* ptr = (u32*)mem; - u32 i = (MAP_SIZE >> 2); - u32 ret = 0; - - while (i--) { - - u32 v = *(ptr++); - - /* This gets called on the inverse, virgin bitmap; optimize for sparse - data. */ - - if (v == 0xffffffff) { - ret += 32; - continue; - } - - v -= ((v >> 1) & 0x55555555); - v = (v & 0x33333333) + ((v >> 2) & 0x33333333); - ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24; - - } - - return ret; - -} - - -#define FF(_b) (0xff << ((_b) << 3)) - -/* Count the number of bytes set in the bitmap. Called fairly sporadically, - mostly to update the status screen or calibrate and examine confirmed - new paths. */ - -static u32 count_bytes(u8* mem) { - - u32* ptr = (u32*)mem; - u32 i = (MAP_SIZE >> 2); - u32 ret = 0; - - while (i--) { - - u32 v = *(ptr++); - - if (!v) continue; - if (v & FF(0)) ++ret; - if (v & FF(1)) ++ret; - if (v & FF(2)) ++ret; - if (v & FF(3)) ++ret; - - } - - return ret; - -} - - -/* Count the number of non-255 bytes set in the bitmap. Used strictly for the - status screen, several calls per second or so. */ - -static u32 count_non_255_bytes(u8* mem) { - - u32* ptr = (u32*)mem; - u32 i = (MAP_SIZE >> 2); - u32 ret = 0; - - while (i--) { - - u32 v = *(ptr++); - - /* This is called on the virgin bitmap, so optimize for the most likely - case. */ - - if (v == 0xffffffff) continue; - if ((v & FF(0)) != FF(0)) ++ret; - if ((v & FF(1)) != FF(1)) ++ret; - if ((v & FF(2)) != FF(2)) ++ret; - if ((v & FF(3)) != FF(3)) ++ret; - - } - - return ret; - -} - - -/* Destructively simplify trace by eliminating hit count information - and replacing it with 0x80 or 0x01 depending on whether the tuple - is hit or not. Called on every new crash or timeout, should be - reasonably fast. */ - -static const u8 simplify_lookup[256] = { - - [0] = 1, - [1 ... 255] = 128 - -}; - -#ifdef __x86_64__ - -static void simplify_trace(u64* mem) { - - u32 i = MAP_SIZE >> 3; - - while (i--) { - - /* Optimize for sparse bitmaps. */ - - if (unlikely(*mem)) { - - u8* mem8 = (u8*)mem; - - mem8[0] = simplify_lookup[mem8[0]]; - mem8[1] = simplify_lookup[mem8[1]]; - mem8[2] = simplify_lookup[mem8[2]]; - mem8[3] = simplify_lookup[mem8[3]]; - mem8[4] = simplify_lookup[mem8[4]]; - mem8[5] = simplify_lookup[mem8[5]]; - mem8[6] = simplify_lookup[mem8[6]]; - mem8[7] = simplify_lookup[mem8[7]]; - - } else *mem = 0x0101010101010101ULL; - - ++mem; - - } - -} - -#else - -static void simplify_trace(u32* mem) { - - u32 i = MAP_SIZE >> 2; - - while (i--) { - - /* Optimize for sparse bitmaps. */ - - if (unlikely(*mem)) { - - u8* mem8 = (u8*)mem; - - mem8[0] = simplify_lookup[mem8[0]]; - mem8[1] = simplify_lookup[mem8[1]]; - mem8[2] = simplify_lookup[mem8[2]]; - mem8[3] = simplify_lookup[mem8[3]]; - - } else *mem = 0x01010101; - - ++mem; - } - -} - -#endif /* ^__x86_64__ */ - - -/* Destructively classify execution counts in a trace. This is used as a - preprocessing step for any newly acquired traces. Called on every exec, - must be fast. */ - -static const u8 count_class_lookup8[256] = { - - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 4, - [4 ... 7] = 8, - [8 ... 15] = 16, - [16 ... 31] = 32, - [32 ... 127] = 64, - [128 ... 255] = 128 - -}; - -static u16 count_class_lookup16[65536]; - - -void init_count_class16(void) { - - u32 b1, b2; - - for (b1 = 0; b1 < 256; b1++) - for (b2 = 0; b2 < 256; b2++) - count_class_lookup16[(b1 << 8) + b2] = - (count_class_lookup8[b1] << 8) | - count_class_lookup8[b2]; - -} - - -#ifdef __x86_64__ - -static inline void classify_counts(u64* mem) { - - u32 i = MAP_SIZE >> 3; - - while (i--) { - - /* Optimize for sparse bitmaps. */ - - if (unlikely(*mem)) { - - u16* mem16 = (u16*)mem; - - mem16[0] = count_class_lookup16[mem16[0]]; - mem16[1] = count_class_lookup16[mem16[1]]; - mem16[2] = count_class_lookup16[mem16[2]]; - mem16[3] = count_class_lookup16[mem16[3]]; - - } - - ++mem; - - } - -} - -#else - -static inline void classify_counts(u32* mem) { - - u32 i = MAP_SIZE >> 2; - - while (i--) { - - /* Optimize for sparse bitmaps. */ - - if (unlikely(*mem)) { - - u16* mem16 = (u16*)mem; - - mem16[0] = count_class_lookup16[mem16[0]]; - mem16[1] = count_class_lookup16[mem16[1]]; - - } - - ++mem; - - } - -} - -#endif /* ^__x86_64__ */ - - -/* Compact trace bytes into a smaller bitmap. We effectively just drop the - count information here. This is called only sporadically, for some - new paths. */ - -static void minimize_bits(u8* dst, u8* src) { - - u32 i = 0; - - while (i < MAP_SIZE) { - - if (*(src++)) dst[i >> 3] |= 1 << (i & 7); - ++i; - - } - -} - - - -/* Find first power of two greater or equal to val (assuming val under - 2^63). */ - -static u64 next_p2(u64 val) { - - u64 ret = 1; - while (val > ret) ret <<= 1; - return ret; - -} - - -/* When we bump into a new path, we call this to see if the path appears - more "favorable" than any of the existing ones. The purpose of the - "favorables" is to have a minimal set of paths that trigger all the bits - seen in the bitmap so far, and focus on fuzzing them at the expense of - the rest. - - The first step of the process is to maintain a list of top_rated[] entries - for every byte in the bitmap. We win that slot if there is no previous - contender, or if the contender has a more favorable speed x size factor. */ - - -static void update_bitmap_score(struct queue_entry* q) { - - u32 i; - u64 fav_factor = q->exec_us * q->len; - u64 fuzz_p2 = next_p2 (q->n_fuzz); - - /* For every byte set in trace_bits[], see if there is a previous winner, - and how it compares to us. */ - - for (i = 0; i < MAP_SIZE; ++i) - - if (trace_bits[i]) { - - if (top_rated[i]) { - - /* Faster-executing or smaller test cases are favored. */ - u64 top_rated_fuzz_p2 = next_p2 (top_rated[i]->n_fuzz); - u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len; - - if (fuzz_p2 > top_rated_fuzz_p2) { - continue; - } else if (fuzz_p2 == top_rated_fuzz_p2) { - if (fav_factor > top_rated_fav_factor) - continue; - } - - if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue; - - /* Looks like we're going to win. Decrease ref count for the - previous winner, discard its trace_bits[] if necessary. */ - - if (!--top_rated[i]->tc_ref) { - ck_free(top_rated[i]->trace_mini); - top_rated[i]->trace_mini = 0; - } - - } - - /* Insert ourselves as the new winner. */ - - top_rated[i] = q; - ++q->tc_ref; - - if (!q->trace_mini) { - q->trace_mini = ck_alloc(MAP_SIZE >> 3); - minimize_bits(q->trace_mini, trace_bits); - } - - score_changed = 1; - - } - -} - - -/* The second part of the mechanism discussed above is a routine that - goes over top_rated[] entries, and then sequentially grabs winners for - previously-unseen bytes (temp_v) and marks them as favored, at least - until the next run. The favored entries are given more air time during - all fuzzing steps. */ - -static void cull_queue(void) { - - struct queue_entry* q; - static u8 temp_v[MAP_SIZE >> 3]; - u32 i; - - if (dumb_mode || !score_changed) return; - - score_changed = 0; - - memset(temp_v, 255, MAP_SIZE >> 3); - - queued_favored = 0; - pending_favored = 0; - - q = queue; - - while (q) { - q->favored = 0; - q = q->next; - } - - /* Let's see if anything in the bitmap isn't captured in temp_v. - If yes, and if it has a top_rated[] contender, let's use it. */ - - for (i = 0; i < MAP_SIZE; ++i) - if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) { - - u32 j = MAP_SIZE >> 3; - - /* Remove all bits belonging to the current entry from temp_v. */ - - while (j--) - if (top_rated[i]->trace_mini[j]) - temp_v[j] &= ~top_rated[i]->trace_mini[j]; - - top_rated[i]->favored = 1; - ++queued_favored; - - if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored; - - } - - q = queue; - - while (q) { - mark_as_redundant(q, !q->favored); - q = q->next; - } - -} - - /* Load postprocessor, if available. */ static void setup_post(void) { diff --git a/src/afl-fuzz-src/bitmap.c b/src/afl-fuzz-src/bitmap.c new file mode 100644 index 00000000..6cd9852f --- /dev/null +++ b/src/afl-fuzz-src/bitmap.c @@ -0,0 +1,410 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + +/* Write bitmap to file. The bitmap is useful mostly for the secret + -B option, to focus a separate fuzzing session on a particular + interesting input without rediscovering all the others. */ + +void write_bitmap(void) { + + u8* fname; + s32 fd; + + if (!bitmap_changed) return; + bitmap_changed = 0; + + fname = alloc_printf("%s/fuzz_bitmap", out_dir); + fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600); + + if (fd < 0) PFATAL("Unable to open '%s'", fname); + + ck_write(fd, virgin_bits, MAP_SIZE, fname); + + close(fd); + ck_free(fname); + +} + + +/* Read bitmap from file. This is for the -B option again. */ + +void read_bitmap(u8* fname) { + + s32 fd = open(fname, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", fname); + + ck_read(fd, virgin_bits, MAP_SIZE, fname); + + close(fd); + +} + + +/* Check if the current execution path brings anything new to the table. + Update virgin bits to reflect the finds. Returns 1 if the only change is + the hit-count for a particular tuple; 2 if there are new tuples seen. + Updates the map, so subsequent calls will always return 0. + + This function is called after every exec() on a fairly large buffer, so + it needs to be fast. We do this in 32-bit and 64-bit flavors. */ + +u8 has_new_bits(u8* virgin_map) { + +#ifdef __x86_64__ + + u64* current = (u64*)trace_bits; + u64* virgin = (u64*)virgin_map; + + u32 i = (MAP_SIZE >> 3); + +#else + + u32* current = (u32*)trace_bits; + u32* virgin = (u32*)virgin_map; + + u32 i = (MAP_SIZE >> 2); + +#endif /* ^__x86_64__ */ + + u8 ret = 0; + + while (i--) { + + /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap + that have not been already cleared from the virgin map - since this will + almost always be the case. */ + + if (unlikely(*current) && unlikely(*current & *virgin)) { + + if (likely(ret < 2)) { + + u8* cur = (u8*)current; + u8* vir = (u8*)virgin; + + /* Looks like we have not found any new bytes yet; see if any non-zero + bytes in current[] are pristine in virgin[]. */ + +#ifdef __x86_64__ + + if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || + (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) || + (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) || + (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2; + else ret = 1; + +#else + + if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || + (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2; + else ret = 1; + +#endif /* ^__x86_64__ */ + + } + + *virgin &= ~*current; + + } + + ++current; + ++virgin; + + } + + if (ret && virgin_map == virgin_bits) bitmap_changed = 1; + + return ret; + +} + + +/* Count the number of bits set in the provided bitmap. Used for the status + screen several times every second, does not have to be fast. */ + +u32 count_bits(u8* mem) { + + u32* ptr = (u32*)mem; + u32 i = (MAP_SIZE >> 2); + u32 ret = 0; + + while (i--) { + + u32 v = *(ptr++); + + /* This gets called on the inverse, virgin bitmap; optimize for sparse + data. */ + + if (v == 0xffffffff) { + ret += 32; + continue; + } + + v -= ((v >> 1) & 0x55555555); + v = (v & 0x33333333) + ((v >> 2) & 0x33333333); + ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24; + + } + + return ret; + +} + + +#define FF(_b) (0xff << ((_b) << 3)) + +/* Count the number of bytes set in the bitmap. Called fairly sporadically, + mostly to update the status screen or calibrate and examine confirmed + new paths. */ + +u32 count_bytes(u8* mem) { + + u32* ptr = (u32*)mem; + u32 i = (MAP_SIZE >> 2); + u32 ret = 0; + + while (i--) { + + u32 v = *(ptr++); + + if (!v) continue; + if (v & FF(0)) ++ret; + if (v & FF(1)) ++ret; + if (v & FF(2)) ++ret; + if (v & FF(3)) ++ret; + + } + + return ret; + +} + + +/* Count the number of non-255 bytes set in the bitmap. Used strictly for the + status screen, several calls per second or so. */ + +u32 count_non_255_bytes(u8* mem) { + + u32* ptr = (u32*)mem; + u32 i = (MAP_SIZE >> 2); + u32 ret = 0; + + while (i--) { + + u32 v = *(ptr++); + + /* This is called on the virgin bitmap, so optimize for the most likely + case. */ + + if (v == 0xffffffff) continue; + if ((v & FF(0)) != FF(0)) ++ret; + if ((v & FF(1)) != FF(1)) ++ret; + if ((v & FF(2)) != FF(2)) ++ret; + if ((v & FF(3)) != FF(3)) ++ret; + + } + + return ret; + +} + + +/* Destructively simplify trace by eliminating hit count information + and replacing it with 0x80 or 0x01 depending on whether the tuple + is hit or not. Called on every new crash or timeout, should be + reasonably fast. */ + +const u8 simplify_lookup[256] = { + + [0] = 1, + [1 ... 255] = 128 + +}; + +#ifdef __x86_64__ + +void simplify_trace(u64* mem) { + + u32 i = MAP_SIZE >> 3; + + while (i--) { + + /* Optimize for sparse bitmaps. */ + + if (unlikely(*mem)) { + + u8* mem8 = (u8*)mem; + + mem8[0] = simplify_lookup[mem8[0]]; + mem8[1] = simplify_lookup[mem8[1]]; + mem8[2] = simplify_lookup[mem8[2]]; + mem8[3] = simplify_lookup[mem8[3]]; + mem8[4] = simplify_lookup[mem8[4]]; + mem8[5] = simplify_lookup[mem8[5]]; + mem8[6] = simplify_lookup[mem8[6]]; + mem8[7] = simplify_lookup[mem8[7]]; + + } else *mem = 0x0101010101010101ULL; + + ++mem; + + } + +} + +#else + +void simplify_trace(u32* mem) { + + u32 i = MAP_SIZE >> 2; + + while (i--) { + + /* Optimize for sparse bitmaps. */ + + if (unlikely(*mem)) { + + u8* mem8 = (u8*)mem; + + mem8[0] = simplify_lookup[mem8[0]]; + mem8[1] = simplify_lookup[mem8[1]]; + mem8[2] = simplify_lookup[mem8[2]]; + mem8[3] = simplify_lookup[mem8[3]]; + + } else *mem = 0x01010101; + + ++mem; + } + +} + +#endif /* ^__x86_64__ */ + + +/* Destructively classify execution counts in a trace. This is used as a + preprocessing step for any newly acquired traces. Called on every exec, + must be fast. */ + +static const u8 count_class_lookup8[256] = { + + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 + +}; + +static u16 count_class_lookup16[65536]; + + +void init_count_class16(void) { + + u32 b1, b2; + + for (b1 = 0; b1 < 256; b1++) + for (b2 = 0; b2 < 256; b2++) + count_class_lookup16[(b1 << 8) + b2] = + (count_class_lookup8[b1] << 8) | + count_class_lookup8[b2]; + +} + + +#ifdef __x86_64__ + +void classify_counts(u64* mem) { + + u32 i = MAP_SIZE >> 3; + + while (i--) { + + /* Optimize for sparse bitmaps. */ + + if (unlikely(*mem)) { + + u16* mem16 = (u16*)mem; + + mem16[0] = count_class_lookup16[mem16[0]]; + mem16[1] = count_class_lookup16[mem16[1]]; + mem16[2] = count_class_lookup16[mem16[2]]; + mem16[3] = count_class_lookup16[mem16[3]]; + + } + + ++mem; + + } + +} + +#else + +void classify_counts(u32* mem) { + + u32 i = MAP_SIZE >> 2; + + while (i--) { + + /* Optimize for sparse bitmaps. */ + + if (unlikely(*mem)) { + + u16* mem16 = (u16*)mem; + + mem16[0] = count_class_lookup16[mem16[0]]; + mem16[1] = count_class_lookup16[mem16[1]]; + + } + + ++mem; + + } + +} + +#endif /* ^__x86_64__ */ + + +/* Compact trace bytes into a smaller bitmap. We effectively just drop the + count information here. This is called only sporadically, for some + new paths. */ + +void minimize_bits(u8* dst, u8* src) { + + u32 i = 0; + + while (i < MAP_SIZE) { + + if (*(src++)) dst[i >> 3] |= 1 << (i & 7); + ++i; + + } + +} + diff --git a/src/afl-fuzz-src/misc.c b/src/afl-fuzz-src/misc.c new file mode 100644 index 00000000..58e57c8f --- /dev/null +++ b/src/afl-fuzz-src/misc.c @@ -0,0 +1,24 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + diff --git a/src/afl-fuzz-src/queue.c b/src/afl-fuzz-src/queue.c new file mode 100644 index 00000000..ed352bcb --- /dev/null +++ b/src/afl-fuzz-src/queue.c @@ -0,0 +1,286 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + +/* Mark deterministic checks as done for a particular queue entry. We use the + .state file to avoid repeating deterministic fuzzing when resuming aborted + scans. */ + +void mark_as_det_done(struct queue_entry* q) { + + u8* fn = strrchr(q->fname, '/'); + s32 fd; + + fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1); + + fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); + if (fd < 0) PFATAL("Unable to create '%s'", fn); + close(fd); + + ck_free(fn); + + q->passed_det = 1; + +} + + +/* Mark as variable. Create symlinks if possible to make it easier to examine + the files. */ + +void mark_as_variable(struct queue_entry* q) { + + u8 *fn = strrchr(q->fname, '/') + 1, *ldest; + + ldest = alloc_printf("../../%s", fn); + fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn); + + if (symlink(ldest, fn)) { + + s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); + if (fd < 0) PFATAL("Unable to create '%s'", fn); + close(fd); + + } + + ck_free(ldest); + ck_free(fn); + + q->var_behavior = 1; + +} + + +/* Mark / unmark as redundant (edge-only). This is not used for restoring state, + but may be useful for post-processing datasets. */ + +void mark_as_redundant(struct queue_entry* q, u8 state) { + + u8* fn; + + if (state == q->fs_redundant) return; + + q->fs_redundant = state; + + fn = strrchr(q->fname, '/'); + fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1); + + if (state) { + + s32 fd; + + fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); + if (fd < 0) PFATAL("Unable to create '%s'", fn); + close(fd); + + } else { + + if (unlink(fn)) PFATAL("Unable to remove '%s'", fn); + + } + + ck_free(fn); + +} + + +/* Append new test case to the queue. */ + +void add_to_queue(u8* fname, u32 len, u8 passed_det) { + + struct queue_entry* q = ck_alloc(sizeof(struct queue_entry)); + + q->fname = fname; + q->len = len; + q->depth = cur_depth + 1; + q->passed_det = passed_det; + q->n_fuzz = 1; + + if (q->depth > max_depth) max_depth = q->depth; + + if (queue_top) { + + queue_top->next = q; + queue_top = q; + + } else q_prev100 = queue = queue_top = q; + + ++queued_paths; + ++pending_not_fuzzed; + + cycles_wo_finds = 0; + + if (!(queued_paths % 100)) { + + q_prev100->next_100 = q; + q_prev100 = q; + + } + + last_path_time = get_cur_time(); + +} + + +/* Destroy the entire queue. */ + +void destroy_queue(void) { + + struct queue_entry *q = queue, *n; + + while (q) { + + n = q->next; + ck_free(q->fname); + ck_free(q->trace_mini); + ck_free(q); + q = n; + + } + +} + + +/* When we bump into a new path, we call this to see if the path appears + more "favorable" than any of the existing ones. The purpose of the + "favorables" is to have a minimal set of paths that trigger all the bits + seen in the bitmap so far, and focus on fuzzing them at the expense of + the rest. + + The first step of the process is to maintain a list of top_rated[] entries + for every byte in the bitmap. We win that slot if there is no previous + contender, or if the contender has a more favorable speed x size factor. */ + + +void update_bitmap_score(struct queue_entry* q) { + + u32 i; + u64 fav_factor = q->exec_us * q->len; + u64 fuzz_p2 = next_p2 (q->n_fuzz); + + /* For every byte set in trace_bits[], see if there is a previous winner, + and how it compares to us. */ + + for (i = 0; i < MAP_SIZE; ++i) + + if (trace_bits[i]) { + + if (top_rated[i]) { + + /* Faster-executing or smaller test cases are favored. */ + u64 top_rated_fuzz_p2 = next_p2 (top_rated[i]->n_fuzz); + u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len; + + if (fuzz_p2 > top_rated_fuzz_p2) { + continue; + } else if (fuzz_p2 == top_rated_fuzz_p2) { + if (fav_factor > top_rated_fav_factor) + continue; + } + + if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue; + + /* Looks like we're going to win. Decrease ref count for the + previous winner, discard its trace_bits[] if necessary. */ + + if (!--top_rated[i]->tc_ref) { + ck_free(top_rated[i]->trace_mini); + top_rated[i]->trace_mini = 0; + } + + } + + /* Insert ourselves as the new winner. */ + + top_rated[i] = q; + ++q->tc_ref; + + if (!q->trace_mini) { + q->trace_mini = ck_alloc(MAP_SIZE >> 3); + minimize_bits(q->trace_mini, trace_bits); + } + + score_changed = 1; + + } + +} + + +/* The second part of the mechanism discussed above is a routine that + goes over top_rated[] entries, and then sequentially grabs winners for + previously-unseen bytes (temp_v) and marks them as favored, at least + until the next run. The favored entries are given more air time during + all fuzzing steps. */ + +void cull_queue(void) { + + struct queue_entry* q; + static u8 temp_v[MAP_SIZE >> 3]; + u32 i; + + if (dumb_mode || !score_changed) return; + + score_changed = 0; + + memset(temp_v, 255, MAP_SIZE >> 3); + + queued_favored = 0; + pending_favored = 0; + + q = queue; + + while (q) { + q->favored = 0; + q = q->next; + } + + /* Let's see if anything in the bitmap isn't captured in temp_v. + If yes, and if it has a top_rated[] contender, let's use it. */ + + for (i = 0; i < MAP_SIZE; ++i) + if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) { + + u32 j = MAP_SIZE >> 3; + + /* Remove all bits belonging to the current entry from temp_v. */ + + while (j--) + if (top_rated[i]->trace_mini[j]) + temp_v[j] &= ~top_rated[i]->trace_mini[j]; + + top_rated[i]->favored = 1; + ++queued_favored; + + if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored; + + } + + q = queue; + + while (q) { + mark_as_redundant(q, !q->favored); + q = q->next; + } + +} + From af5fd8c819617f8232e9a78e6d41fa65ea2d7235 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Mon, 2 Sep 2019 00:15:12 +0200 Subject: [PATCH 62/83] split afl-fuzz: extras --- include/afl-fuzz.h | 46 ++- src/afl-fuzz-src/afl-fuzz.c | 615 ------------------------------------ src/afl-fuzz-src/extras.c | 484 ++++++++++++++++++++++++++++ src/afl-fuzz-src/misc.c | 152 +++++++++ 4 files changed, 667 insertions(+), 630 deletions(-) create mode 100644 src/afl-fuzz-src/extras.c diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 22a78373..c50a21a7 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -468,31 +468,47 @@ void trim_py(char**, size_t*); /* Queue */ -void mark_as_det_done(struct queue_entry* q); -void mark_as_variable(struct queue_entry* q); -void mark_as_redundant(struct queue_entry* q, u8 state); -void add_to_queue(u8* fname, u32 len, u8 passed_det); +void mark_as_det_done(struct queue_entry*); +void mark_as_variable(struct queue_entry*); +void mark_as_redundant(struct queue_entry*, u8); +void add_to_queue(u8*, u32, u8); void destroy_queue(void); -void update_bitmap_score(struct queue_entry* q); +void update_bitmap_score(struct queue_entry*); void cull_queue(void); /* Bitmap */ void write_bitmap(void); -void read_bitmap(u8* fname); -u8 has_new_bits(u8* virgin_map); -u32 count_bits(u8* mem); -u32 count_bytes(u8* mem); -u32 count_non_255_bytes(u8* mem); +void read_bitmap(u8*); +u8 has_new_bits(u8*); +u32 count_bits(u8*); +u32 count_bytes(u8*); +u32 count_non_255_bytes(u8*); #ifdef __x86_64__ -void simplify_trace(u64* mem); -void classify_counts(u64* mem); +void simplify_trace(u64*); +void classify_counts(u64*); #else -void simplify_trace(u32* mem); -void classify_counts(u32* mem); +void simplify_trace(u32*); +void classify_counts(u32*); #endif void init_count_class16(void); -void minimize_bits(u8* dst, u8* src); +void minimize_bits(u8*, u8*); + +/* Misc */ + +u8* DI(u64); +u8* DF(double); +u8* DMS(u64); +u8* DTD(u64, u64); + +/* Extras */ + +void load_extras_file(u8*, u32*, u32*, u32); +void load_extras(u8*); +void maybe_add_auto(u8*, u32); +void save_auto(void); +void load_auto(void); +void destroy_extras(void); /**** Inline routines ****/ diff --git a/src/afl-fuzz-src/afl-fuzz.c b/src/afl-fuzz-src/afl-fuzz.c index dcb97387..7cc05a39 100644 --- a/src/afl-fuzz-src/afl-fuzz.c +++ b/src/afl-fuzz-src/afl-fuzz.c @@ -211,159 +211,6 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { #endif /* !IGNORE_FINDS */ -/* Describe integer. Uses 12 cyclic static buffers for return values. The value - returned should be five characters or less for all the integers we reasonably - expect to see. */ - -static u8* DI(u64 val) { - - static u8 tmp[12][16]; - static u8 cur; - - cur = (cur + 1) % 12; - -#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \ - if (val < (_divisor) * (_limit_mult)) { \ - sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \ - return tmp[cur]; \ - } \ - } while (0) - - /* 0-9999 */ - CHK_FORMAT(1, 10000, "%llu", u64); - - /* 10.0k - 99.9k */ - CHK_FORMAT(1000, 99.95, "%0.01fk", double); - - /* 100k - 999k */ - CHK_FORMAT(1000, 1000, "%lluk", u64); - - /* 1.00M - 9.99M */ - CHK_FORMAT(1000 * 1000, 9.995, "%0.02fM", double); - - /* 10.0M - 99.9M */ - CHK_FORMAT(1000 * 1000, 99.95, "%0.01fM", double); - - /* 100M - 999M */ - CHK_FORMAT(1000 * 1000, 1000, "%lluM", u64); - - /* 1.00G - 9.99G */ - CHK_FORMAT(1000LL * 1000 * 1000, 9.995, "%0.02fG", double); - - /* 10.0G - 99.9G */ - CHK_FORMAT(1000LL * 1000 * 1000, 99.95, "%0.01fG", double); - - /* 100G - 999G */ - CHK_FORMAT(1000LL * 1000 * 1000, 1000, "%lluG", u64); - - /* 1.00T - 9.99G */ - CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 9.995, "%0.02fT", double); - - /* 10.0T - 99.9T */ - CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 99.95, "%0.01fT", double); - - /* 100T+ */ - strcpy(tmp[cur], "infty"); - return tmp[cur]; - -} - - -/* Describe float. Similar to the above, except with a single - static buffer. */ - -static u8* DF(double val) { - - static u8 tmp[16]; - - if (val < 99.995) { - sprintf(tmp, "%0.02f", val); - return tmp; - } - - if (val < 999.95) { - sprintf(tmp, "%0.01f", val); - return tmp; - } - - return DI((u64)val); - -} - - -/* Describe integer as memory size. */ - -static u8* DMS(u64 val) { - - static u8 tmp[12][16]; - static u8 cur; - - cur = (cur + 1) % 12; - - /* 0-9999 */ - CHK_FORMAT(1, 10000, "%llu B", u64); - - /* 10.0k - 99.9k */ - CHK_FORMAT(1024, 99.95, "%0.01f kB", double); - - /* 100k - 999k */ - CHK_FORMAT(1024, 1000, "%llu kB", u64); - - /* 1.00M - 9.99M */ - CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double); - - /* 10.0M - 99.9M */ - CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double); - - /* 100M - 999M */ - CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64); - - /* 1.00G - 9.99G */ - CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double); - - /* 10.0G - 99.9G */ - CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double); - - /* 100G - 999G */ - CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64); - - /* 1.00T - 9.99G */ - CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double); - - /* 10.0T - 99.9T */ - CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double); - -#undef CHK_FORMAT - - /* 100T+ */ - strcpy(tmp[cur], "infty"); - return tmp[cur]; - -} - - -/* Describe time delta. Returns one static buffer, 34 chars of less. */ - -static u8* DTD(u64 cur_ms, u64 event_ms) { - - static u8 tmp[64]; - u64 delta; - s32 t_d, t_h, t_m, t_s; - - if (!event_ms) return "none seen yet"; - - delta = cur_ms - event_ms; - - t_d = delta / 1000 / 60 / 60 / 24; - t_h = (delta / 1000 / 60 / 60) % 24; - t_m = (delta / 1000 / 60) % 60; - t_s = (delta / 1000) % 60; - - sprintf(tmp, "%s days, %d hrs, %d min, %d sec", DI(t_d), t_h, t_m, t_s); - return tmp; - -} - /* Load postprocessor, if available. */ @@ -516,468 +363,6 @@ static void read_testcases(void) { } -/* Helper function for load_extras. */ - -static int compare_extras_len(const void* p1, const void* p2) { - struct extra_data *e1 = (struct extra_data*)p1, - *e2 = (struct extra_data*)p2; - - return e1->len - e2->len; -} - -static int compare_extras_use_d(const void* p1, const void* p2) { - struct extra_data *e1 = (struct extra_data*)p1, - *e2 = (struct extra_data*)p2; - - return e2->hit_cnt - e1->hit_cnt; -} - - -/* Read extras from a file, sort by size. */ - -static void load_extras_file(u8* fname, u32* min_len, u32* max_len, - u32 dict_level) { - - FILE* f; - u8 buf[MAX_LINE]; - u8 *lptr; - u32 cur_line = 0; - - f = fopen(fname, "r"); - - if (!f) PFATAL("Unable to open '%s'", fname); - - while ((lptr = fgets(buf, MAX_LINE, f))) { - - u8 *rptr, *wptr; - u32 klen = 0; - - ++cur_line; - - /* Trim on left and right. */ - - while (isspace(*lptr)) ++lptr; - - rptr = lptr + strlen(lptr) - 1; - while (rptr >= lptr && isspace(*rptr)) --rptr; - ++rptr; - *rptr = 0; - - /* Skip empty lines and comments. */ - - if (!*lptr || *lptr == '#') continue; - - /* All other lines must end with '"', which we can consume. */ - - --rptr; - - if (rptr < lptr || *rptr != '"') - FATAL("Malformed name=\"value\" pair in line %u.", cur_line); - - *rptr = 0; - - /* Skip alphanumerics and dashes (label). */ - - while (isalnum(*lptr) || *lptr == '_') ++lptr; - - /* If @number follows, parse that. */ - - if (*lptr == '@') { - - ++lptr; - if (atoi(lptr) > dict_level) continue; - while (isdigit(*lptr)) ++lptr; - - } - - /* Skip whitespace and = signs. */ - - while (isspace(*lptr) || *lptr == '=') ++lptr; - - /* Consume opening '"'. */ - - if (*lptr != '"') - FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line); - - ++lptr; - - if (!*lptr) FATAL("Empty keyword in line %u.", cur_line); - - /* Okay, let's allocate memory and copy data between "...", handling - \xNN escaping, \\, and \". */ - - extras = ck_realloc_block(extras, (extras_cnt + 1) * - sizeof(struct extra_data)); - - wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr); - - while (*lptr) { - - char* hexdigits = "0123456789abcdef"; - - switch (*lptr) { - - case 1 ... 31: - case 128 ... 255: - FATAL("Non-printable characters in line %u.", cur_line); - - case '\\': - - ++lptr; - - if (*lptr == '\\' || *lptr == '"') { - *(wptr++) = *(lptr++); - klen++; - break; - } - - if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2])) - FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line); - - *(wptr++) = - ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) | - (strchr(hexdigits, tolower(lptr[2])) - hexdigits); - - lptr += 3; - ++klen; - - break; - - default: - - *(wptr++) = *(lptr++); - ++klen; - - } - - } - - extras[extras_cnt].len = klen; - - if (extras[extras_cnt].len > MAX_DICT_FILE) - FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, - DMS(klen), DMS(MAX_DICT_FILE)); - - if (*min_len > klen) *min_len = klen; - if (*max_len < klen) *max_len = klen; - - ++extras_cnt; - - } - - fclose(f); - -} - - -/* Read extras from the extras directory and sort them by size. */ - -static void load_extras(u8* dir) { - - DIR* d; - struct dirent* de; - u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0; - u8* x; - - /* If the name ends with @, extract level and continue. */ - - if ((x = strchr(dir, '@'))) { - - *x = 0; - dict_level = atoi(x + 1); - - } - - ACTF("Loading extra dictionary from '%s' (level %u)...", dir, dict_level); - - d = opendir(dir); - - if (!d) { - - if (errno == ENOTDIR) { - load_extras_file(dir, &min_len, &max_len, dict_level); - goto check_and_sort; - } - - PFATAL("Unable to open '%s'", dir); - - } - - if (x) FATAL("Dictionary levels not supported for directories."); - - while ((de = readdir(d))) { - - struct stat st; - u8* fn = alloc_printf("%s/%s", dir, de->d_name); - s32 fd; - - if (lstat(fn, &st) || access(fn, R_OK)) - PFATAL("Unable to access '%s'", fn); - - /* This also takes care of . and .. */ - if (!S_ISREG(st.st_mode) || !st.st_size) { - - ck_free(fn); - continue; - - } - - if (st.st_size > MAX_DICT_FILE) - FATAL("Extra '%s' is too big (%s, limit is %s)", fn, - DMS(st.st_size), DMS(MAX_DICT_FILE)); - - if (min_len > st.st_size) min_len = st.st_size; - if (max_len < st.st_size) max_len = st.st_size; - - extras = ck_realloc_block(extras, (extras_cnt + 1) * - sizeof(struct extra_data)); - - extras[extras_cnt].data = ck_alloc(st.st_size); - extras[extras_cnt].len = st.st_size; - - fd = open(fn, O_RDONLY); - - if (fd < 0) PFATAL("Unable to open '%s'", fn); - - ck_read(fd, extras[extras_cnt].data, st.st_size, fn); - - close(fd); - ck_free(fn); - - ++extras_cnt; - - } - - closedir(d); - -check_and_sort: - - if (!extras_cnt) FATAL("No usable files in '%s'", dir); - - qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len); - - OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, - DMS(min_len), DMS(max_len)); - - if (max_len > 32) - WARNF("Some tokens are relatively large (%s) - consider trimming.", - DMS(max_len)); - - if (extras_cnt > MAX_DET_EXTRAS) - WARNF("More than %d tokens - will use them probabilistically.", - MAX_DET_EXTRAS); - -} - - - - -/* Helper function for maybe_add_auto() */ - -static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) { - - while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1; - return 0; - -} - - -/* Maybe add automatic extra. */ - -static void maybe_add_auto(u8* mem, u32 len) { - - u32 i; - - /* Allow users to specify that they don't want auto dictionaries. */ - - if (!MAX_AUTO_EXTRAS || !USE_AUTO_EXTRAS) return; - - /* Skip runs of identical bytes. */ - - for (i = 1; i < len; ++i) - if (mem[0] ^ mem[i]) break; - - if (i == len) return; - - /* Reject builtin interesting values. */ - - if (len == 2) { - - i = sizeof(interesting_16) >> 1; - - while (i--) - if (*((u16*)mem) == interesting_16[i] || - *((u16*)mem) == SWAP16(interesting_16[i])) return; - - } - - if (len == 4) { - - i = sizeof(interesting_32) >> 2; - - while (i--) - if (*((u32*)mem) == interesting_32[i] || - *((u32*)mem) == SWAP32(interesting_32[i])) return; - - } - - /* Reject anything that matches existing extras. Do a case-insensitive - match. We optimize by exploiting the fact that extras[] are sorted - by size. */ - - for (i = 0; i < extras_cnt; ++i) - if (extras[i].len >= len) break; - - for (; i < extras_cnt && extras[i].len == len; ++i) - if (!memcmp_nocase(extras[i].data, mem, len)) return; - - /* Last but not least, check a_extras[] for matches. There are no - guarantees of a particular sort order. */ - - auto_changed = 1; - - for (i = 0; i < a_extras_cnt; ++i) { - - if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) { - - a_extras[i].hit_cnt++; - goto sort_a_extras; - - } - - } - - /* At this point, looks like we're dealing with a new entry. So, let's - append it if we have room. Otherwise, let's randomly evict some other - entry from the bottom half of the list. */ - - if (a_extras_cnt < MAX_AUTO_EXTRAS) { - - a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) * - sizeof(struct extra_data)); - - a_extras[a_extras_cnt].data = ck_memdup(mem, len); - a_extras[a_extras_cnt].len = len; - ++a_extras_cnt; - - } else { - - i = MAX_AUTO_EXTRAS / 2 + - UR((MAX_AUTO_EXTRAS + 1) / 2); - - ck_free(a_extras[i].data); - - a_extras[i].data = ck_memdup(mem, len); - a_extras[i].len = len; - a_extras[i].hit_cnt = 0; - - } - -sort_a_extras: - - /* First, sort all auto extras by use count, descending order. */ - - qsort(a_extras, a_extras_cnt, sizeof(struct extra_data), - compare_extras_use_d); - - /* Then, sort the top USE_AUTO_EXTRAS entries by size. */ - - qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), - sizeof(struct extra_data), compare_extras_len); - -} - - -/* Save automatically generated extras. */ - -static void save_auto(void) { - - u32 i; - - if (!auto_changed) return; - auto_changed = 0; - - for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); ++i) { - - u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i); - s32 fd; - - fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); - - if (fd < 0) PFATAL("Unable to create '%s'", fn); - - ck_write(fd, a_extras[i].data, a_extras[i].len, fn); - - close(fd); - ck_free(fn); - - } - -} - - -/* Load automatically generated extras. */ - -static void load_auto(void) { - - u32 i; - - for (i = 0; i < USE_AUTO_EXTRAS; ++i) { - - u8 tmp[MAX_AUTO_EXTRA + 1]; - u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i); - s32 fd, len; - - fd = open(fn, O_RDONLY, 0600); - - if (fd < 0) { - - if (errno != ENOENT) PFATAL("Unable to open '%s'", fn); - ck_free(fn); - break; - - } - - /* We read one byte more to cheaply detect tokens that are too - long (and skip them). */ - - len = read(fd, tmp, MAX_AUTO_EXTRA + 1); - - if (len < 0) PFATAL("Unable to read from '%s'", fn); - - if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA) - maybe_add_auto(tmp, len); - - close(fd); - ck_free(fn); - - } - - if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i); - else OKF("No auto-generated dictionary tokens to reuse."); - -} - - -/* Destroy extras. */ - -static void destroy_extras(void) { - - u32 i; - - for (i = 0; i < extras_cnt; ++i) - ck_free(extras[i].data); - - ck_free(extras); - - for (i = 0; i < a_extras_cnt; ++i) - ck_free(a_extras[i].data); - - ck_free(a_extras); - -} - - /* Execute target application, monitoring for timeouts. Return status information. The called program will update trace_bits[]. */ diff --git a/src/afl-fuzz-src/extras.c b/src/afl-fuzz-src/extras.c new file mode 100644 index 00000000..1f52181d --- /dev/null +++ b/src/afl-fuzz-src/extras.c @@ -0,0 +1,484 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + + +/* Helper function for load_extras. */ + +static int compare_extras_len(const void* p1, const void* p2) { + struct extra_data *e1 = (struct extra_data*)p1, + *e2 = (struct extra_data*)p2; + + return e1->len - e2->len; +} + +static int compare_extras_use_d(const void* p1, const void* p2) { + struct extra_data *e1 = (struct extra_data*)p1, + *e2 = (struct extra_data*)p2; + + return e2->hit_cnt - e1->hit_cnt; +} + + +/* Read extras from a file, sort by size. */ + +void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { + + FILE* f; + u8 buf[MAX_LINE]; + u8 *lptr; + u32 cur_line = 0; + + f = fopen(fname, "r"); + + if (!f) PFATAL("Unable to open '%s'", fname); + + while ((lptr = fgets(buf, MAX_LINE, f))) { + + u8 *rptr, *wptr; + u32 klen = 0; + + ++cur_line; + + /* Trim on left and right. */ + + while (isspace(*lptr)) ++lptr; + + rptr = lptr + strlen(lptr) - 1; + while (rptr >= lptr && isspace(*rptr)) --rptr; + ++rptr; + *rptr = 0; + + /* Skip empty lines and comments. */ + + if (!*lptr || *lptr == '#') continue; + + /* All other lines must end with '"', which we can consume. */ + + --rptr; + + if (rptr < lptr || *rptr != '"') + FATAL("Malformed name=\"value\" pair in line %u.", cur_line); + + *rptr = 0; + + /* Skip alphanumerics and dashes (label). */ + + while (isalnum(*lptr) || *lptr == '_') ++lptr; + + /* If @number follows, parse that. */ + + if (*lptr == '@') { + + ++lptr; + if (atoi(lptr) > dict_level) continue; + while (isdigit(*lptr)) ++lptr; + + } + + /* Skip whitespace and = signs. */ + + while (isspace(*lptr) || *lptr == '=') ++lptr; + + /* Consume opening '"'. */ + + if (*lptr != '"') + FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line); + + ++lptr; + + if (!*lptr) FATAL("Empty keyword in line %u.", cur_line); + + /* Okay, let's allocate memory and copy data between "...", handling + \xNN escaping, \\, and \". */ + + extras = ck_realloc_block(extras, (extras_cnt + 1) * + sizeof(struct extra_data)); + + wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr); + + while (*lptr) { + + char* hexdigits = "0123456789abcdef"; + + switch (*lptr) { + + case 1 ... 31: + case 128 ... 255: + FATAL("Non-printable characters in line %u.", cur_line); + + case '\\': + + ++lptr; + + if (*lptr == '\\' || *lptr == '"') { + *(wptr++) = *(lptr++); + klen++; + break; + } + + if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2])) + FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line); + + *(wptr++) = + ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) | + (strchr(hexdigits, tolower(lptr[2])) - hexdigits); + + lptr += 3; + ++klen; + + break; + + default: + + *(wptr++) = *(lptr++); + ++klen; + + } + + } + + extras[extras_cnt].len = klen; + + if (extras[extras_cnt].len > MAX_DICT_FILE) + FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, + DMS(klen), DMS(MAX_DICT_FILE)); + + if (*min_len > klen) *min_len = klen; + if (*max_len < klen) *max_len = klen; + + ++extras_cnt; + + } + + fclose(f); + +} + + +/* Read extras from the extras directory and sort them by size. */ + +void load_extras(u8* dir) { + + DIR* d; + struct dirent* de; + u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0; + u8* x; + + /* If the name ends with @, extract level and continue. */ + + if ((x = strchr(dir, '@'))) { + + *x = 0; + dict_level = atoi(x + 1); + + } + + ACTF("Loading extra dictionary from '%s' (level %u)...", dir, dict_level); + + d = opendir(dir); + + if (!d) { + + if (errno == ENOTDIR) { + load_extras_file(dir, &min_len, &max_len, dict_level); + goto check_and_sort; + } + + PFATAL("Unable to open '%s'", dir); + + } + + if (x) FATAL("Dictionary levels not supported for directories."); + + while ((de = readdir(d))) { + + struct stat st; + u8* fn = alloc_printf("%s/%s", dir, de->d_name); + s32 fd; + + if (lstat(fn, &st) || access(fn, R_OK)) + PFATAL("Unable to access '%s'", fn); + + /* This also takes care of . and .. */ + if (!S_ISREG(st.st_mode) || !st.st_size) { + + ck_free(fn); + continue; + + } + + if (st.st_size > MAX_DICT_FILE) + FATAL("Extra '%s' is too big (%s, limit is %s)", fn, + DMS(st.st_size), DMS(MAX_DICT_FILE)); + + if (min_len > st.st_size) min_len = st.st_size; + if (max_len < st.st_size) max_len = st.st_size; + + extras = ck_realloc_block(extras, (extras_cnt + 1) * + sizeof(struct extra_data)); + + extras[extras_cnt].data = ck_alloc(st.st_size); + extras[extras_cnt].len = st.st_size; + + fd = open(fn, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", fn); + + ck_read(fd, extras[extras_cnt].data, st.st_size, fn); + + close(fd); + ck_free(fn); + + ++extras_cnt; + + } + + closedir(d); + +check_and_sort: + + if (!extras_cnt) FATAL("No usable files in '%s'", dir); + + qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len); + + OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, + DMS(min_len), DMS(max_len)); + + if (max_len > 32) + WARNF("Some tokens are relatively large (%s) - consider trimming.", + DMS(max_len)); + + if (extras_cnt > MAX_DET_EXTRAS) + WARNF("More than %d tokens - will use them probabilistically.", + MAX_DET_EXTRAS); + +} + + + +/* Helper function for maybe_add_auto() */ + +static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) { + + while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1; + return 0; + +} + + +/* Maybe add automatic extra. */ + +void maybe_add_auto(u8* mem, u32 len) { + + u32 i; + + /* Allow users to specify that they don't want auto dictionaries. */ + + if (!MAX_AUTO_EXTRAS || !USE_AUTO_EXTRAS) return; + + /* Skip runs of identical bytes. */ + + for (i = 1; i < len; ++i) + if (mem[0] ^ mem[i]) break; + + if (i == len) return; + + /* Reject builtin interesting values. */ + + if (len == 2) { + + i = sizeof(interesting_16) >> 1; + + while (i--) + if (*((u16*)mem) == interesting_16[i] || + *((u16*)mem) == SWAP16(interesting_16[i])) return; + + } + + if (len == 4) { + + i = sizeof(interesting_32) >> 2; + + while (i--) + if (*((u32*)mem) == interesting_32[i] || + *((u32*)mem) == SWAP32(interesting_32[i])) return; + + } + + /* Reject anything that matches existing extras. Do a case-insensitive + match. We optimize by exploiting the fact that extras[] are sorted + by size. */ + + for (i = 0; i < extras_cnt; ++i) + if (extras[i].len >= len) break; + + for (; i < extras_cnt && extras[i].len == len; ++i) + if (!memcmp_nocase(extras[i].data, mem, len)) return; + + /* Last but not least, check a_extras[] for matches. There are no + guarantees of a particular sort order. */ + + auto_changed = 1; + + for (i = 0; i < a_extras_cnt; ++i) { + + if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) { + + a_extras[i].hit_cnt++; + goto sort_a_extras; + + } + + } + + /* At this point, looks like we're dealing with a new entry. So, let's + append it if we have room. Otherwise, let's randomly evict some other + entry from the bottom half of the list. */ + + if (a_extras_cnt < MAX_AUTO_EXTRAS) { + + a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) * + sizeof(struct extra_data)); + + a_extras[a_extras_cnt].data = ck_memdup(mem, len); + a_extras[a_extras_cnt].len = len; + ++a_extras_cnt; + + } else { + + i = MAX_AUTO_EXTRAS / 2 + + UR((MAX_AUTO_EXTRAS + 1) / 2); + + ck_free(a_extras[i].data); + + a_extras[i].data = ck_memdup(mem, len); + a_extras[i].len = len; + a_extras[i].hit_cnt = 0; + + } + +sort_a_extras: + + /* First, sort all auto extras by use count, descending order. */ + + qsort(a_extras, a_extras_cnt, sizeof(struct extra_data), + compare_extras_use_d); + + /* Then, sort the top USE_AUTO_EXTRAS entries by size. */ + + qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), + sizeof(struct extra_data), compare_extras_len); + +} + + +/* Save automatically generated extras. */ + +void save_auto(void) { + + u32 i; + + if (!auto_changed) return; + auto_changed = 0; + + for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); ++i) { + + u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i); + s32 fd; + + fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); + + if (fd < 0) PFATAL("Unable to create '%s'", fn); + + ck_write(fd, a_extras[i].data, a_extras[i].len, fn); + + close(fd); + ck_free(fn); + + } + +} + + +/* Load automatically generated extras. */ + +void load_auto(void) { + + u32 i; + + for (i = 0; i < USE_AUTO_EXTRAS; ++i) { + + u8 tmp[MAX_AUTO_EXTRA + 1]; + u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i); + s32 fd, len; + + fd = open(fn, O_RDONLY, 0600); + + if (fd < 0) { + + if (errno != ENOENT) PFATAL("Unable to open '%s'", fn); + ck_free(fn); + break; + + } + + /* We read one byte more to cheaply detect tokens that are too + long (and skip them). */ + + len = read(fd, tmp, MAX_AUTO_EXTRA + 1); + + if (len < 0) PFATAL("Unable to read from '%s'", fn); + + if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA) + maybe_add_auto(tmp, len); + + close(fd); + ck_free(fn); + + } + + if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i); + else OKF("No auto-generated dictionary tokens to reuse."); + +} + + +/* Destroy extras. */ + +void destroy_extras(void) { + + u32 i; + + for (i = 0; i < extras_cnt; ++i) + ck_free(extras[i].data); + + ck_free(extras); + + for (i = 0; i < a_extras_cnt; ++i) + ck_free(a_extras[i].data); + + ck_free(a_extras); + +} + diff --git a/src/afl-fuzz-src/misc.c b/src/afl-fuzz-src/misc.c index 58e57c8f..69ff2f6b 100644 --- a/src/afl-fuzz-src/misc.c +++ b/src/afl-fuzz-src/misc.c @@ -22,3 +22,155 @@ #include "afl-fuzz.h" +/* Describe integer. Uses 12 cyclic static buffers for return values. The value + returned should be five characters or less for all the integers we reasonably + expect to see. */ + +u8* DI(u64 val) { + + static u8 tmp[12][16]; + static u8 cur; + + cur = (cur + 1) % 12; + +#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \ + if (val < (_divisor) * (_limit_mult)) { \ + sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \ + return tmp[cur]; \ + } \ + } while (0) + + /* 0-9999 */ + CHK_FORMAT(1, 10000, "%llu", u64); + + /* 10.0k - 99.9k */ + CHK_FORMAT(1000, 99.95, "%0.01fk", double); + + /* 100k - 999k */ + CHK_FORMAT(1000, 1000, "%lluk", u64); + + /* 1.00M - 9.99M */ + CHK_FORMAT(1000 * 1000, 9.995, "%0.02fM", double); + + /* 10.0M - 99.9M */ + CHK_FORMAT(1000 * 1000, 99.95, "%0.01fM", double); + + /* 100M - 999M */ + CHK_FORMAT(1000 * 1000, 1000, "%lluM", u64); + + /* 1.00G - 9.99G */ + CHK_FORMAT(1000LL * 1000 * 1000, 9.995, "%0.02fG", double); + + /* 10.0G - 99.9G */ + CHK_FORMAT(1000LL * 1000 * 1000, 99.95, "%0.01fG", double); + + /* 100G - 999G */ + CHK_FORMAT(1000LL * 1000 * 1000, 1000, "%lluG", u64); + + /* 1.00T - 9.99G */ + CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 9.995, "%0.02fT", double); + + /* 10.0T - 99.9T */ + CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 99.95, "%0.01fT", double); + + /* 100T+ */ + strcpy(tmp[cur], "infty"); + return tmp[cur]; + +} + + +/* Describe float. Similar to the above, except with a single + static buffer. */ + +u8* DF(double val) { + + static u8 tmp[16]; + + if (val < 99.995) { + sprintf(tmp, "%0.02f", val); + return tmp; + } + + if (val < 999.95) { + sprintf(tmp, "%0.01f", val); + return tmp; + } + + return DI((u64)val); + +} + + +/* Describe integer as memory size. */ + +u8* DMS(u64 val) { + + static u8 tmp[12][16]; + static u8 cur; + + cur = (cur + 1) % 12; + + /* 0-9999 */ + CHK_FORMAT(1, 10000, "%llu B", u64); + + /* 10.0k - 99.9k */ + CHK_FORMAT(1024, 99.95, "%0.01f kB", double); + + /* 100k - 999k */ + CHK_FORMAT(1024, 1000, "%llu kB", u64); + + /* 1.00M - 9.99M */ + CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double); + + /* 10.0M - 99.9M */ + CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double); + + /* 100M - 999M */ + CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64); + + /* 1.00G - 9.99G */ + CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double); + + /* 10.0G - 99.9G */ + CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double); + + /* 100G - 999G */ + CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64); + + /* 1.00T - 9.99G */ + CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double); + + /* 10.0T - 99.9T */ + CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double); + +#undef CHK_FORMAT + + /* 100T+ */ + strcpy(tmp[cur], "infty"); + return tmp[cur]; + +} + + +/* Describe time delta. Returns one static buffer, 34 chars of less. */ + +u8* DTD(u64 cur_ms, u64 event_ms) { + + static u8 tmp[64]; + u64 delta; + s32 t_d, t_h, t_m, t_s; + + if (!event_ms) return "none seen yet"; + + delta = cur_ms - event_ms; + + t_d = delta / 1000 / 60 / 60 / 24; + t_h = (delta / 1000 / 60 / 60) % 24; + t_m = (delta / 1000 / 60) % 60; + t_s = (delta / 1000) % 60; + + sprintf(tmp, "%s days, %d hrs, %d min, %d sec", DI(t_d), t_h, t_m, t_s); + return tmp; + +} From e76ad2980faff269a9ac523cb71237462985b3e6 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 2 Sep 2019 09:41:52 +0200 Subject: [PATCH 63/83] added force-ui env --- config.h | 363 ------------------------------------ docs/ChangeLog | 8 + docs/env_variables.txt | 3 + include/config.h | 5 + src/afl-fuzz-src/afl-fuzz.c | 5 + 5 files changed, 21 insertions(+), 363 deletions(-) delete mode 100644 config.h diff --git a/config.h b/config.h deleted file mode 100644 index 29c33d46..00000000 --- a/config.h +++ /dev/null @@ -1,363 +0,0 @@ -/* - american fuzzy lop plus plus - vaguely configurable bits - ---------------------------------------------- - - Written and maintained by Michal Zalewski - - Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at: - - http://www.apache.org/licenses/LICENSE-2.0 - - */ - -#ifndef _HAVE_CONFIG_H -#define _HAVE_CONFIG_H - -#include "types.h" - -/* Version string: */ - -#define VERSION "++2.53d" // c = release, d = volatile github dev - -/****************************************************** - * * - * Settings that may be of interest to power users: * - * * - ******************************************************/ - -/* Comment out to disable terminal colors (note that this makes afl-analyze - a lot less nice): */ - -#define USE_COLOR - -/* Comment out to disable fancy ANSI boxes and use poor man's 7-bit UI: */ - -#define FANCY_BOXES - -/* Default timeout for fuzzed code (milliseconds). This is the upper bound, - also used for detecting hangs; the actual value is auto-scaled: */ - -#define EXEC_TIMEOUT 1000 - -/* Timeout rounding factor when auto-scaling (milliseconds): */ - -#define EXEC_TM_ROUND 20 - -/* Default memory limit for child process (MB): */ - -#ifndef __x86_64__ -# define MEM_LIMIT 25 -#else -# define MEM_LIMIT 50 -#endif /* ^!__x86_64__ */ - -/* Default memory limit when running in QEMU mode (MB): */ - -#define MEM_LIMIT_QEMU 200 - -/* Default memory limit when running in Unicorn mode (MB): */ - -#define MEM_LIMIT_UNICORN 200 - -/* Number of calibration cycles per every new test case (and for test - cases that show variable behavior): */ - -#define CAL_CYCLES 8 -#define CAL_CYCLES_LONG 40 - -/* Number of subsequent timeouts before abandoning an input file: */ - -#define TMOUT_LIMIT 250 - -/* Maximum number of unique hangs or crashes to record: */ - -#define KEEP_UNIQUE_HANG 500 -#define KEEP_UNIQUE_CRASH 5000 - -/* Baseline number of random tweaks during a single 'havoc' stage: */ - -#define HAVOC_CYCLES 256 -#define HAVOC_CYCLES_INIT 1024 - -/* Maximum multiplier for the above (should be a power of two, beware - of 32-bit int overflows): */ - -#define HAVOC_MAX_MULT 16 -#define HAVOC_MAX_MULT_MOPT 32 - -/* Absolute minimum number of havoc cycles (after all adjustments): */ - -#define HAVOC_MIN 16 - -/* Power Schedule Divisor */ -#define POWER_BETA 1 -#define MAX_FACTOR (POWER_BETA * 32) - -/* Maximum stacking for havoc-stage tweaks. The actual value is calculated - like this: - - n = random between 1 and HAVOC_STACK_POW2 - stacking = 2^n - - In other words, the default (n = 7) produces 2, 4, 8, 16, 32, 64, or - 128 stacked tweaks: */ - -#define HAVOC_STACK_POW2 7 - -/* Caps on block sizes for cloning and deletion operations. Each of these - ranges has a 33% probability of getting picked, except for the first - two cycles where smaller blocks are favored: */ - -#define HAVOC_BLK_SMALL 32 -#define HAVOC_BLK_MEDIUM 128 -#define HAVOC_BLK_LARGE 1500 - -/* Extra-large blocks, selected very rarely (<5% of the time): */ - -#define HAVOC_BLK_XL 32768 - -/* Probabilities of skipping non-favored entries in the queue, expressed as - percentages: */ - -#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */ -#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */ -#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */ - -/* Splicing cycle count: */ - -#define SPLICE_CYCLES 15 - -/* Nominal per-splice havoc cycle length: */ - -#define SPLICE_HAVOC 32 - -/* Maximum offset for integer addition / subtraction stages: */ - -#define ARITH_MAX 35 - -/* Limits for the test case trimmer. The absolute minimum chunk size; and - the starting and ending divisors for chopping up the input file: */ - -#define TRIM_MIN_BYTES 4 -#define TRIM_START_STEPS 16 -#define TRIM_END_STEPS 1024 - -/* Maximum size of input file, in bytes (keep under 100MB): */ - -#define MAX_FILE (1 * 1024 * 1024) - -/* The same, for the test case minimizer: */ - -#define TMIN_MAX_FILE (10 * 1024 * 1024) - -/* Block normalization steps for afl-tmin: */ - -#define TMIN_SET_MIN_SIZE 4 -#define TMIN_SET_STEPS 128 - -/* Maximum dictionary token size (-x), in bytes: */ - -#define MAX_DICT_FILE 128 - -/* Length limits for auto-detected dictionary tokens: */ - -#define MIN_AUTO_EXTRA 3 -#define MAX_AUTO_EXTRA 32 - -/* Maximum number of user-specified dictionary tokens to use in deterministic - steps; past this point, the "extras/user" step will be still carried out, - but with proportionally lower odds: */ - -#define MAX_DET_EXTRAS 200 - -/* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing - (first value), and to keep in memory as candidates. The latter should be much - higher than the former. */ - -#define USE_AUTO_EXTRAS 50 -#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10) - -/* Scaling factor for the effector map used to skip some of the more - expensive deterministic steps. The actual divisor is set to - 2^EFF_MAP_SCALE2 bytes: */ - -#define EFF_MAP_SCALE2 3 - -/* Minimum input file length at which the effector logic kicks in: */ - -#define EFF_MIN_LEN 128 - -/* Maximum effector density past which everything is just fuzzed - unconditionally (%): */ - -#define EFF_MAX_PERC 90 - -/* UI refresh frequency (Hz): */ - -#define UI_TARGET_HZ 5 - -/* Fuzzer stats file and plot update intervals (sec): */ - -#define STATS_UPDATE_SEC 60 -#define PLOT_UPDATE_SEC 5 - -/* Smoothing divisor for CPU load and exec speed stats (1 - no smoothing). */ - -#define AVG_SMOOTHING 16 - -/* Sync interval (every n havoc cycles): */ - -#define SYNC_INTERVAL 5 - -/* Output directory reuse grace period (minutes): */ - -#define OUTPUT_GRACE 25 - -/* Uncomment to use simple file names (id_NNNNNN): */ - -// #define SIMPLE_FILES - -/* List of interesting values to use in fuzzing. */ - -#define INTERESTING_8 \ - -128, /* Overflow signed 8-bit when decremented */ \ - -1, /* */ \ - 0, /* */ \ - 1, /* */ \ - 16, /* One-off with common buffer size */ \ - 32, /* One-off with common buffer size */ \ - 64, /* One-off with common buffer size */ \ - 100, /* One-off with common buffer size */ \ - 127 /* Overflow signed 8-bit when incremented */ - -#define INTERESTING_16 \ - -32768, /* Overflow signed 16-bit when decremented */ \ - -129, /* Overflow signed 8-bit */ \ - 128, /* Overflow signed 8-bit */ \ - 255, /* Overflow unsig 8-bit when incremented */ \ - 256, /* Overflow unsig 8-bit */ \ - 512, /* One-off with common buffer size */ \ - 1000, /* One-off with common buffer size */ \ - 1024, /* One-off with common buffer size */ \ - 4096, /* One-off with common buffer size */ \ - 32767 /* Overflow signed 16-bit when incremented */ - -#define INTERESTING_32 \ - -2147483648LL, /* Overflow signed 32-bit when decremented */ \ - -100663046, /* Large negative number (endian-agnostic) */ \ - -32769, /* Overflow signed 16-bit */ \ - 32768, /* Overflow signed 16-bit */ \ - 65535, /* Overflow unsig 16-bit when incremented */ \ - 65536, /* Overflow unsig 16 bit */ \ - 100663045, /* Large positive number (endian-agnostic) */ \ - 2147483647 /* Overflow signed 32-bit when incremented */ - -/*********************************************************** - * * - * Really exotic stuff you probably don't want to touch: * - * * - ***********************************************************/ - -/* Call count interval between reseeding the libc PRNG from /dev/urandom: */ - -#define RESEED_RNG 10000 - -/* Maximum line length passed from GCC to 'as' and used for parsing - configuration files: */ - -#define MAX_LINE 8192 - -/* Environment variable used to pass SHM ID to the called program. */ - -#define SHM_ENV_VAR "__AFL_SHM_ID" - -/* Other less interesting, internal-only variables. */ - -#define CLANG_ENV_VAR "__AFL_CLANG_MODE" -#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK" -#define PERSIST_ENV_VAR "__AFL_PERSISTENT" -#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV" - -/* In-code signatures for deferred and persistent mode. */ - -#define PERSIST_SIG "##SIG_AFL_PERSISTENT##" -#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##" - -/* Distinctive bitmap signature used to indicate failed execution: */ - -#define EXEC_FAIL_SIG 0xfee1dead - -/* Distinctive exit code used to indicate MSAN trip condition: */ - -#define MSAN_ERROR 86 - -/* Designated file descriptors for forkserver commands (the application will - use FORKSRV_FD and FORKSRV_FD + 1): */ - -#define FORKSRV_FD 198 - -/* Fork server init timeout multiplier: we'll wait the user-selected - timeout plus this much for the fork server to spin up. */ - -#define FORK_WAIT_MULT 10 - -/* Calibration timeout adjustments, to be a bit more generous when resuming - fuzzing sessions or trying to calibrate already-added internal finds. - The first value is a percentage, the other is in milliseconds: */ - -#define CAL_TMOUT_PERC 125 -#define CAL_TMOUT_ADD 50 - -/* Number of chances to calibrate a case before giving up: */ - -#define CAL_CHANCES 3 - -/* Map size for the traced binary (2^MAP_SIZE_POW2). Must be greater than - 2; you probably want to keep it under 18 or so for performance reasons - (adjusting AFL_INST_RATIO when compiling is probably a better way to solve - problems with complex programs). You need to recompile the target binary - after changing this - otherwise, SEGVs may ensue. */ - -#define MAP_SIZE_POW2 16 -#define MAP_SIZE (1 << MAP_SIZE_POW2) - -/* Maximum allocator request size (keep well under INT_MAX): */ - -#define MAX_ALLOC 0x40000000 - -/* A made-up hashing seed: */ - -#define HASH_CONST 0xa5b35705 - -/* Constants for afl-gotcpu to control busy loop timing: */ - -#define CTEST_TARGET_MS 5000 -#define CTEST_CORE_TRG_MS 1000 -#define CTEST_BUSY_CYCLES (10 * 1000 * 1000) - -/* Enable NeverZero counters in QEMU mode */ - -#define AFL_QEMU_NOT_ZERO - -/* Uncomment this to use inferior block-coverage-based instrumentation. Note - that you need to recompile the target binary for this to have any effect: */ - -// #define COVERAGE_ONLY - -/* Uncomment this to ignore hit counts and output just one bit per tuple. - As with the previous setting, you will need to recompile the target - binary: */ - -// #define SKIP_COUNTS - -/* Uncomment this to use instrumentation data to record newly discovered paths, - but do not use them as seeds for fuzzing. This is useful for conveniently - measuring coverage that could be attained by a "dumb" fuzzing algorithm: */ - -// #define IGNORE_FINDS - -#endif /* ! _HAVE_CONFIG_H */ diff --git a/docs/ChangeLog b/docs/ChangeLog index 782320d6..dd5b597c 100644 --- a/docs/ChangeLog +++ b/docs/ChangeLog @@ -17,6 +17,14 @@ sending a mail to . Version ++2.53d (dev): ---------------------- + - big code refactoring: + * all includes are now in include/ + * all afl sources are now in src/ - see src/README + * afl-fuzz was splitted up in various individual files for including + functionality in other programs (e.g. forkserver, memory map, etc.) + or better readability. + * new code indention everywhere + - added AFL_FORCE_UI to show the UI even if the terminal is not detected - llvm 9 is now supported (still needs testing) - Android is now supported (thank to JoeyJiao!) - still need to modify the Makefile though - fix building qemu on some Ubuntus (thanks to floyd!) diff --git a/docs/env_variables.txt b/docs/env_variables.txt index 821463ae..cea3597b 100644 --- a/docs/env_variables.txt +++ b/docs/env_variables.txt @@ -223,6 +223,9 @@ checks or alter some of the more exotic semantics of the tool: some basic stats. This behavior is also automatically triggered when the output from afl-fuzz is redirected to a file or to a pipe. + - Setting AFL_FORCE_UI will force painting the UI on the screen even if + no valid terminal was detected (for virtual consoles) + - If you are Jakub, you may need AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES. Others need not apply. diff --git a/include/config.h b/include/config.h index 324435b3..17836151 100644 --- a/include/config.h +++ b/include/config.h @@ -345,6 +345,11 @@ #define CTEST_CORE_TRG_MS 1000 #define CTEST_BUSY_CYCLES (10 * 1000 * 1000) +/* Enable NeverZero counters in QEMU mode */ + +#define AFL_QEMU_NOT_ZERO + + /* Uncomment this to use inferior block-coverage-based instrumentation. Note that you need to recompile the target binary for this to have any effect: */ diff --git a/src/afl-fuzz-src/afl-fuzz.c b/src/afl-fuzz-src/afl-fuzz.c index 7cc05a39..0e12f493 100644 --- a/src/afl-fuzz-src/afl-fuzz.c +++ b/src/afl-fuzz-src/afl-fuzz.c @@ -10104,6 +10104,9 @@ int main(int argc, char** argv) { } + if (getenv("AFL_NO_UI") && getenv("AFL_FORCE_UI")) + FATAL("AFL_NO_UI and AFL_FORCE_UI are mutually exclusive"); + if (strchr(argv[optind], '/') == NULL) WARNF(cLRD "Target binary called without a prefixed path, make sure you are fuzzing the right binary: " cRST "%s", argv[optind]); OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" Eissfeldt and Andrea Fioraldi"); @@ -10151,6 +10154,8 @@ int main(int argc, char** argv) { fix_up_banner(argv[optind]); check_if_tty(); + if (getenv("AFL_FORCE_UI")) + not_on_tty = 0; if (getenv("AFL_CAL_FAST")) { /* Use less calibration cycles, for slow applications */ From 6cb07a91310494ff95bfd832c6926994592194c3 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 2 Sep 2019 09:43:05 +0200 Subject: [PATCH 64/83] previous merge lost the symlink, restoring --- config.h | 1 + 1 file changed, 1 insertion(+) create mode 120000 config.h diff --git a/config.h b/config.h new file mode 120000 index 00000000..046ab52a --- /dev/null +++ b/config.h @@ -0,0 +1 @@ +include/config.h \ No newline at end of file From 39c4bb7a49d22c66b3cb613fb13329ec7a6dc16d Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 2 Sep 2019 10:29:54 +0200 Subject: [PATCH 65/83] added peak_rss_mb and slowest_exec_ms in fuzzer_stats report --- docs/ChangeLog | 3 +-- include/afl-fuzz.h | 1 + src/afl-fuzz-src/afl-fuzz.c | 29 ++++++++++++++++++++++++++++- src/afl-fuzz-src/globals.c | 1 + 4 files changed, 31 insertions(+), 3 deletions(-) diff --git a/docs/ChangeLog b/docs/ChangeLog index dd5b597c..1cd95650 100644 --- a/docs/ChangeLog +++ b/docs/ChangeLog @@ -29,18 +29,17 @@ Version ++2.53d (dev): - Android is now supported (thank to JoeyJiao!) - still need to modify the Makefile though - fix building qemu on some Ubuntus (thanks to floyd!) - custom mutator by a loaded library is now supported (thanks to kyakdan!) + - added PR that includes peak_rss_mb and slowest_exec_ms in the fuzzer_stats report - more support for *BSD (thanks to devnexen!) - fix building on *BSD (thanks to tobias.kortkamp for the patch) - fix for a few features to support different map sized than 2^16 - afl-showmap: new option -r now shows the real values in the buckets (stock afl never did), plus shows tuple content summary information now - - the forkserver is now in its own C file to be easily integratable - small docu updates - NeverZero counters for QEMU - NeverZero counters for Unicorn - CompareCoverage Unicorn - Immediates-only instrumentation for CompareCoverage - - ... your patch? :) -------------------------- diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index c50a21a7..7b380dce 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -350,6 +350,7 @@ extern u64 total_crashes, /* Total number of crashes */ unique_tmouts, /* Timeouts with unique signatures */ unique_hangs, /* Hangs with unique signatures */ total_execs, /* Total execve() calls */ + slowest_exec_ms, /* Slowest testcase non hang in ms */ start_time, /* Unix start time (ms) */ last_path_time, /* Time for most recent path (ms) */ last_crash_time, /* Time for most recent crash (ms) */ diff --git a/src/afl-fuzz-src/afl-fuzz.c b/src/afl-fuzz-src/afl-fuzz.c index 0e12f493..dc21de17 100644 --- a/src/afl-fuzz-src/afl-fuzz.c +++ b/src/afl-fuzz-src/afl-fuzz.c @@ -370,6 +370,7 @@ static u8 run_target(char** argv, u32 timeout) { static struct itimerval it; static u32 prev_timed_out = 0; + static u64 exec_ms = 0; int status = 0; u32 tb4; @@ -519,6 +520,10 @@ static u8 run_target(char** argv, u32 timeout) { } if (!WIFSTOPPED(status)) child_pid = 0; + + getitimer(ITIMER_REAL, &it); + exec_ms = (u64) timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000); + if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms; it.it_value.tv_sec = 0; it.it_value.tv_usec = 0; @@ -1491,6 +1496,7 @@ static void find_timeout(void) { static void write_stats_file(double bitmap_cvg, double stability, double eps) { static double last_bcvg, last_stab, last_eps; + static struct rusage usage; u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); s32 fd; @@ -1543,6 +1549,8 @@ static void write_stats_file(double bitmap_cvg, double stability, double eps) { "last_hang : %llu\n" "execs_since_crash : %llu\n" "exec_timeout : %u\n" + "slowest_exec_ms : %llu\n" + "peak_rss_mb : %lu\n" "afl_banner : %s\n" "afl_version : " VERSION "\n" "target_mode : %s%s%s%s%s%s%s%s\n" @@ -1554,7 +1562,7 @@ static void write_stats_file(double bitmap_cvg, double stability, double eps) { queued_variable, stability, bitmap_cvg, unique_crashes, unique_hangs, last_path_time / 1000, last_crash_time / 1000, last_hang_time / 1000, total_execs - last_crash_execs, - exec_tmout, use_banner, + exec_tmout, slowest_exec_ms, (unsigned long int)usage.ru_maxrss, use_banner, unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "", no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "", persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "", @@ -10347,6 +10355,25 @@ int main(int argc, char** argv) { if (queue_cur) show_stats(); + /* + * ATTENTION - the following 10 lines were copied from a PR to Google's afl + * repository - and slightly fixed. + * These lines have nothing to do with the purpose of original PR though. + * Looks like when an exit condition was completed (AFL_BENCH_JUST_ONE, + * AFL_EXIT_WHEN_DONE or AFL_BENCH_UNTIL_CRASH) the child and forkserver + * where not killed? + */ + /* if we stopped programmatically, we kill the forkserver and the current runner. + if we stopped manually, this is done by the signal handler */ + if (stop_soon == 2){ + if (child_pid > 0) kill(child_pid, SIGKILL); + if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL); + /* Now that we've killed the forkserver, we wait for it to be able to get rusage stats. */ + if (waitpid(forksrv_pid, NULL, 0) <= 0) { + WARNF("error waitpid\n"); + } + } + write_bitmap(); write_stats_file(0, 0, 0); save_auto(); diff --git a/src/afl-fuzz-src/globals.c b/src/afl-fuzz-src/globals.c index 127d7609..e28c3099 100644 --- a/src/afl-fuzz-src/globals.c +++ b/src/afl-fuzz-src/globals.c @@ -189,6 +189,7 @@ u64 total_crashes, /* Total number of crashes */ unique_tmouts, /* Timeouts with unique signatures */ unique_hangs, /* Hangs with unique signatures */ total_execs, /* Total execve() calls */ + slowest_exec_ms, /* Slowest testcase non hang in ms */ start_time, /* Unix start time (ms) */ last_path_time, /* Time for most recent path (ms) */ last_crash_time, /* Time for most recent crash (ms) */ From 1652831f1de2fcf13184162503bb764bd610914c Mon Sep 17 00:00:00 2001 From: van Hauser Date: Mon, 2 Sep 2019 17:40:23 +0200 Subject: [PATCH 66/83] afl-fuzz-src/* -> src/afl-fuzz* rename --- Makefile | 2 +- src/{afl-fuzz-src/bitmap.c => afl-fuzz-bitmap.c} | 0 src/{afl-fuzz-src/extras.c => afl-fuzz-extras.c} | 0 src/{afl-fuzz-src/globals.c => afl-fuzz-globals.c} | 0 src/{afl-fuzz-src/misc.c => afl-fuzz-misc.c} | 0 src/{afl-fuzz-src/python.c => afl-fuzz-python.c} | 0 src/{afl-fuzz-src/queue.c => afl-fuzz-queue.c} | 0 src/{afl-fuzz-src => }/afl-fuzz.c | 0 8 files changed, 1 insertion(+), 1 deletion(-) rename src/{afl-fuzz-src/bitmap.c => afl-fuzz-bitmap.c} (100%) rename src/{afl-fuzz-src/extras.c => afl-fuzz-extras.c} (100%) rename src/{afl-fuzz-src/globals.c => afl-fuzz-globals.c} (100%) rename src/{afl-fuzz-src/misc.c => afl-fuzz-misc.c} (100%) rename src/{afl-fuzz-src/python.c => afl-fuzz-python.c} (100%) rename src/{afl-fuzz-src/queue.c => afl-fuzz-queue.c} (100%) rename src/{afl-fuzz-src => }/afl-fuzz.c (100%) diff --git a/Makefile b/Makefile index 14342129..6eb6f871 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I include/ \ -DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \ -DBIN_PATH=\"$(BIN_PATH)\" -Wno-unused-function -AFL_FUZZ_FILES = $(wildcard src/afl-fuzz-src/*.c) +AFL_FUZZ_FILES = $(wildcard src/afl-fuzz*.c) PYTHON_INCLUDE ?= /usr/include/python2.7 diff --git a/src/afl-fuzz-src/bitmap.c b/src/afl-fuzz-bitmap.c similarity index 100% rename from src/afl-fuzz-src/bitmap.c rename to src/afl-fuzz-bitmap.c diff --git a/src/afl-fuzz-src/extras.c b/src/afl-fuzz-extras.c similarity index 100% rename from src/afl-fuzz-src/extras.c rename to src/afl-fuzz-extras.c diff --git a/src/afl-fuzz-src/globals.c b/src/afl-fuzz-globals.c similarity index 100% rename from src/afl-fuzz-src/globals.c rename to src/afl-fuzz-globals.c diff --git a/src/afl-fuzz-src/misc.c b/src/afl-fuzz-misc.c similarity index 100% rename from src/afl-fuzz-src/misc.c rename to src/afl-fuzz-misc.c diff --git a/src/afl-fuzz-src/python.c b/src/afl-fuzz-python.c similarity index 100% rename from src/afl-fuzz-src/python.c rename to src/afl-fuzz-python.c diff --git a/src/afl-fuzz-src/queue.c b/src/afl-fuzz-queue.c similarity index 100% rename from src/afl-fuzz-src/queue.c rename to src/afl-fuzz-queue.c diff --git a/src/afl-fuzz-src/afl-fuzz.c b/src/afl-fuzz.c similarity index 100% rename from src/afl-fuzz-src/afl-fuzz.c rename to src/afl-fuzz.c From e9d968e060f59df634409d2bbe58c279cf6eca00 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Mon, 2 Sep 2019 18:41:27 +0200 Subject: [PATCH 67/83] afl-fuzz.c completely splitted --- include/afl-fuzz.h | 70 +- src/afl-fuzz-bitmap.c | 293 ++ src/afl-fuzz-init.c | 1943 +++++++++ src/afl-fuzz-one.c | 5719 ++++++++++++++++++++++++ src/afl-fuzz-python.c | 106 + src/afl-fuzz-queue.c | 136 + src/afl-fuzz-run.c | 775 ++++ src/afl-fuzz-stats.c | 754 ++++ src/afl-fuzz.c | 9655 +---------------------------------------- 9 files changed, 9795 insertions(+), 9656 deletions(-) create mode 100644 src/afl-fuzz-init.c create mode 100644 src/afl-fuzz-one.c create mode 100644 src/afl-fuzz-run.c create mode 100644 src/afl-fuzz-stats.c diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 7b380dce..ca22ef75 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -80,6 +80,11 @@ # define HAVE_AFFINITY 1 #endif /* __linux__ */ +#ifndef SIMPLE_FILES +# define CASE_PREFIX "id:" +#else +# define CASE_PREFIX "id_" +#endif /* ^!SIMPLE_FILES */ struct queue_entry { @@ -455,16 +460,15 @@ extern PyObject *py_functions[PY_FUNC_COUNT]; /**** Prototypes ****/ -/* Python stuff */ +/* Python */ #ifdef USE_PYTHON - int init_py(); void finalize_py(); void fuzz_py(char*, size_t, char*, size_t, char**, size_t*); u32 init_trim_py(char*, size_t); u32 post_trim_py(char); void trim_py(char**, size_t*); - +u8 trim_case_python(char**, struct queue_entry*, u8*); #endif /* Queue */ @@ -476,6 +480,7 @@ void add_to_queue(u8*, u32, u8); void destroy_queue(void); void update_bitmap_score(struct queue_entry*); void cull_queue(void); +u32 calculate_score(struct queue_entry*); /* Bitmap */ @@ -494,6 +499,10 @@ void classify_counts(u32*); #endif void init_count_class16(void); void minimize_bits(u8*, u8*); +#ifndef SIMPLE_FILES +u8* describe_op(u8); +#endif +u8 save_if_interesting(char**, void*, u32, u8); /* Misc */ @@ -511,6 +520,61 @@ void save_auto(void); void load_auto(void); void destroy_extras(void); +/* Stats */ + +void write_stats_file(double, double, double); +void maybe_update_plot_file(double, double); +void show_stats(void); +void show_init_stats(void); + +/* Run */ + +u8 run_target(char**, u32); +void write_to_testcase(void*, u32); +void write_with_gap(void*, u32, u32, u32); +u8 calibrate_case(char**, struct queue_entry*, u8*, u32, u8); +void sync_fuzzers(char**); +u8 trim_case(char**, struct queue_entry*, u8*); +u8 common_fuzz_stuff(char**, u8*, u32); + +/* Fuzz one */ + +u8 fuzz_one_original(char**); +static u8 pilot_fuzzing(char**); +u8 core_fuzzing(char**); +void pso_updating(void); +u8 fuzz_one(char**); + +/* Init */ + +#ifdef HAVE_AFFINITY +void bind_to_free_cpu(void); +#endif +void setup_post(void); +void setup_custom_mutator(void); +void read_testcases(void); +void perform_dry_run(char**); +void pivot_inputs(void); +u32 find_start_position(void); +void find_timeout(void); +double get_runnable_processes(void); +void nuke_resume_dir(void); +void maybe_delete_out_dir(void); +void setup_dirs_fds(void); +void setup_cmdline_file(char**); +void setup_stdio_file(void); +void check_crash_handling(void); +void check_cpu_governor(void); +void get_core_count(void); +void fix_up_sync(void); +void check_asan_opts(void); +void check_binary(u8*); +void fix_up_banner(u8*); +void check_if_tty(void); +void setup_signal_handlers(void); +char** get_qemu_argv(u8*, char**, int); +void save_cmdline(u32, char**); + /**** Inline routines ****/ /* Generate a random number (from 0 to limit - 1). This may diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 6cd9852f..1a77dc13 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -408,3 +408,296 @@ void minimize_bits(u8* dst, u8* src) { } + +#ifndef SIMPLE_FILES + +/* Construct a file name for a new test case, capturing the operation + that led to its discovery. Uses a static buffer. */ + +u8* describe_op(u8 hnb) { + + static u8 ret[256]; + + if (syncing_party) { + + sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case); + + } else { + + sprintf(ret, "src:%06u", current_entry); + + sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time); + + if (splicing_with >= 0) + sprintf(ret + strlen(ret), "+%06d", splicing_with); + + sprintf(ret + strlen(ret), ",op:%s", stage_short); + + if (stage_cur_byte >= 0) { + + sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte); + + if (stage_val_type != STAGE_VAL_NONE) + sprintf(ret + strlen(ret), ",val:%s%+d", + (stage_val_type == STAGE_VAL_BE) ? "be:" : "", + stage_cur_val); + + } else sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val); + + } + + if (hnb == 2) strcat(ret, ",+cov"); + + return ret; + +} + +#endif /* !SIMPLE_FILES */ + + +/* Write a message accompanying the crash directory :-) */ + +static void write_crash_readme(void) { + + u8* fn = alloc_printf("%s/crashes/README.txt", out_dir); + s32 fd; + FILE* f; + + fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); + ck_free(fn); + + /* Do not die on errors here - that would be impolite. */ + + if (fd < 0) return; + + f = fdopen(fd, "w"); + + if (!f) { + close(fd); + return; + } + + fprintf(f, "Command line used to find this crash:\n\n" + + "%s\n\n" + + "If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n" + "memory limit. The limit used for this fuzzing session was %s.\n\n" + + "Need a tool to minimize test cases before investigating the crashes or sending\n" + "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n" + + "Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n" + "an mail at once the issues are fixed\n\n" + + " https://github.com/vanhauser-thc/AFLplusplus\n\n", + + orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */ + + fclose(f); + +} + + +/* Check if the result of an execve() during routine fuzzing is interesting, + save or queue the input test case for further analysis if so. Returns 1 if + entry is saved, 0 otherwise. */ + +u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { + + if (len == 0) return 0; + + u8 *fn = ""; + u8 hnb; + s32 fd; + u8 keeping = 0, res; + + /* Update path frequency. */ + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + + struct queue_entry* q = queue; + while (q) { + if (q->exec_cksum == cksum) + q->n_fuzz = q->n_fuzz + 1; + + q = q->next; + + } + + if (fault == crash_mode) { + + /* Keep only if there are new bits in the map, add to queue for + future fuzzing, etc. */ + + if (!(hnb = has_new_bits(virgin_bits))) { + if (crash_mode) ++total_crashes; + return 0; + } + +#ifndef SIMPLE_FILES + + fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths, + describe_op(hnb)); + +#else + + fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths); + +#endif /* ^!SIMPLE_FILES */ + + add_to_queue(fn, len, 0); + + if (hnb == 2) { + queue_top->has_new_cov = 1; + ++queued_with_cov; + } + + queue_top->exec_cksum = cksum; + + /* Try to calibrate inline; this also calls update_bitmap_score() when + successful. */ + + res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0); + + if (res == FAULT_ERROR) + FATAL("Unable to execute target application"); + + fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); + if (fd < 0) PFATAL("Unable to create '%s'", fn); + ck_write(fd, mem, len, fn); + close(fd); + + keeping = 1; + + } + + switch (fault) { + + case FAULT_TMOUT: + + /* Timeouts are not very interesting, but we're still obliged to keep + a handful of samples. We use the presence of new bits in the + hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we + just keep everything. */ + + ++total_tmouts; + + if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping; + + if (!dumb_mode) { + +#ifdef __x86_64__ + simplify_trace((u64*)trace_bits); +#else + simplify_trace((u32*)trace_bits); +#endif /* ^__x86_64__ */ + + if (!has_new_bits(virgin_tmout)) return keeping; + + } + + ++unique_tmouts; + + /* Before saving, we make sure that it's a genuine hang by re-running + the target with a more generous timeout (unless the default timeout + is already generous). */ + + if (exec_tmout < hang_tmout) { + + u8 new_fault; + write_to_testcase(mem, len); + new_fault = run_target(argv, hang_tmout); + + /* A corner case that one user reported bumping into: increasing the + timeout actually uncovers a crash. Make sure we don't discard it if + so. */ + + if (!stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash; + + if (stop_soon || new_fault != FAULT_TMOUT) return keeping; + + } + +#ifndef SIMPLE_FILES + + fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, + unique_hangs, describe_op(0)); + +#else + + fn = alloc_printf("%s/hangs/id_%06llu", out_dir, + unique_hangs); + +#endif /* ^!SIMPLE_FILES */ + + ++unique_hangs; + + last_hang_time = get_cur_time(); + + break; + + case FAULT_CRASH: + +keep_as_crash: + + /* This is handled in a manner roughly similar to timeouts, + except for slightly different limits and no need to re-run test + cases. */ + + ++total_crashes; + + if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping; + + if (!dumb_mode) { + +#ifdef __x86_64__ + simplify_trace((u64*)trace_bits); +#else + simplify_trace((u32*)trace_bits); +#endif /* ^__x86_64__ */ + + if (!has_new_bits(virgin_crash)) return keeping; + + } + + if (!unique_crashes) write_crash_readme(); + +#ifndef SIMPLE_FILES + + fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir, + unique_crashes, kill_signal, describe_op(0)); + +#else + + fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes, + kill_signal); + +#endif /* ^!SIMPLE_FILES */ + + ++unique_crashes; + + last_crash_time = get_cur_time(); + last_crash_execs = total_execs; + + break; + + case FAULT_ERROR: FATAL("Unable to execute target application"); + + default: return keeping; + + } + + /* If we're here, we apparently want to save the crash or hang + test case, too. */ + + fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); + if (fd < 0) PFATAL("Unable to create '%s'", fn); + ck_write(fd, mem, len, fn); + close(fd); + + ck_free(fn); + + return keeping; + +} + diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c new file mode 100644 index 00000000..f66db74c --- /dev/null +++ b/src/afl-fuzz-init.c @@ -0,0 +1,1943 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + + +#ifdef HAVE_AFFINITY + +/* Build a list of processes bound to specific cores. Returns -1 if nothing + can be found. Assumes an upper bound of 4k CPUs. */ + +void bind_to_free_cpu(void) { + + DIR* d; + struct dirent* de; + cpu_set_t c; + + u8 cpu_used[4096] = { 0 }; + u32 i; + + if (cpu_core_count < 2) return; + + if (getenv("AFL_NO_AFFINITY")) { + + WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set)."); + return; + + } + + d = opendir("/proc"); + + if (!d) { + + WARNF("Unable to access /proc - can't scan for free CPU cores."); + return; + + } + + ACTF("Checking CPU core loadout..."); + + /* Introduce some jitter, in case multiple AFL tasks are doing the same + thing at the same time... */ + + usleep(R(1000) * 250); + + /* Scan all /proc//status entries, checking for Cpus_allowed_list. + Flag all processes bound to a specific CPU using cpu_used[]. This will + fail for some exotic binding setups, but is likely good enough in almost + all real-world use cases. */ + + while ((de = readdir(d))) { + + u8* fn; + FILE* f; + u8 tmp[MAX_LINE]; + u8 has_vmsize = 0; + + if (!isdigit(de->d_name[0])) continue; + + fn = alloc_printf("/proc/%s/status", de->d_name); + + if (!(f = fopen(fn, "r"))) { + ck_free(fn); + continue; + } + + while (fgets(tmp, MAX_LINE, f)) { + + u32 hval; + + /* Processes without VmSize are probably kernel tasks. */ + + if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1; + + if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) && + !strchr(tmp, '-') && !strchr(tmp, ',') && + sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) && + has_vmsize) { + + cpu_used[hval] = 1; + break; + + } + + } + + ck_free(fn); + fclose(f); + + } + + closedir(d); + + for (i = 0; i < cpu_core_count; ++i) if (!cpu_used[i]) break; + + if (i == cpu_core_count) { + + SAYF("\n" cLRD "[-] " cRST + "Uh-oh, looks like all %d CPU cores on your system are allocated to\n" + " other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n" + " another fuzzer on this machine is probably a bad plan, but if you are\n" + " absolutely sure, you can set AFL_NO_AFFINITY and try again.\n", + cpu_core_count); + + FATAL("No more free CPU cores"); + + } + + OKF("Found a free CPU core, binding to #%u.", i); + + cpu_aff = i; + + CPU_ZERO(&c); + CPU_SET(i, &c); + + if (sched_setaffinity(0, sizeof(c), &c)) + PFATAL("sched_setaffinity failed"); + +} + +#endif /* HAVE_AFFINITY */ + +/* Load postprocessor, if available. */ + +void setup_post(void) { + + void* dh; + u8* fn = getenv("AFL_POST_LIBRARY"); + u32 tlen = 6; + + if (!fn) return; + + ACTF("Loading postprocessor from '%s'...", fn); + + dh = dlopen(fn, RTLD_NOW); + if (!dh) FATAL("%s", dlerror()); + + post_handler = dlsym(dh, "afl_postprocess"); + if (!post_handler) FATAL("Symbol 'afl_postprocess' not found."); + + /* Do a quick test. It's better to segfault now than later =) */ + + post_handler("hello", &tlen); + + OKF("Postprocessor installed successfully."); + +} + +void setup_custom_mutator(void) { + void* dh; + u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY"); + + if (!fn) return; + + ACTF("Loading custom mutator library from '%s'...", fn); + + dh = dlopen(fn, RTLD_NOW); + if (!dh) FATAL("%s", dlerror()); + + custom_mutator = dlsym(dh, "afl_custom_mutator"); + if (!custom_mutator) FATAL("Symbol 'afl_custom_mutator' not found."); + + pre_save_handler = dlsym(dh, "afl_pre_save_handler"); +// if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found."); + + OKF("Custom mutator installed successfully."); +} + + +/* Shuffle an array of pointers. Might be slightly biased. */ + +static void shuffle_ptrs(void** ptrs, u32 cnt) { + + u32 i; + + for (i = 0; i < cnt - 2; ++i) { + + u32 j = i + UR(cnt - i); + void *s = ptrs[i]; + ptrs[i] = ptrs[j]; + ptrs[j] = s; + + } + +} + +/* Read all testcases from the input directory, then queue them for testing. + Called at startup. */ + +void read_testcases(void) { + + struct dirent **nl; + s32 nl_cnt; + u32 i; + u8* fn1; + + /* Auto-detect non-in-place resumption attempts. */ + + fn1 = alloc_printf("%s/queue", in_dir); + if (!access(fn1, F_OK)) in_dir = fn1; else ck_free(fn1); + + ACTF("Scanning '%s'...", in_dir); + + /* We use scandir() + alphasort() rather than readdir() because otherwise, + the ordering of test cases would vary somewhat randomly and would be + difficult to control. */ + + nl_cnt = scandir(in_dir, &nl, NULL, alphasort); + + if (nl_cnt < 0) { + + if (errno == ENOENT || errno == ENOTDIR) + + SAYF("\n" cLRD "[-] " cRST + "The input directory does not seem to be valid - try again. The fuzzer needs\n" + " one or more test case to start with - ideally, a small file under 1 kB\n" + " or so. The cases must be stored as regular files directly in the input\n" + " directory.\n"); + + PFATAL("Unable to open '%s'", in_dir); + + } + + if (shuffle_queue && nl_cnt > 1) { + + ACTF("Shuffling queue..."); + shuffle_ptrs((void**)nl, nl_cnt); + + } + + for (i = 0; i < nl_cnt; ++i) { + + struct stat st; + + u8* fn2 = alloc_printf("%s/%s", in_dir, nl[i]->d_name); + u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name); + + u8 passed_det = 0; + + free(nl[i]); /* not tracked */ + + if (lstat(fn2, &st) || access(fn2, R_OK)) + PFATAL("Unable to access '%s'", fn2); + + /* This also takes care of . and .. */ + + if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn2, "/README.txt")) { + + ck_free(fn2); + ck_free(dfn); + continue; + + } + + if (st.st_size > MAX_FILE) + FATAL("Test case '%s' is too big (%s, limit is %s)", fn2, + DMS(st.st_size), DMS(MAX_FILE)); + + /* Check for metadata that indicates that deterministic fuzzing + is complete for this entry. We don't want to repeat deterministic + fuzzing when resuming aborted scans, because it would be pointless + and probably very time-consuming. */ + + if (!access(dfn, F_OK)) passed_det = 1; + ck_free(dfn); + + add_to_queue(fn2, st.st_size, passed_det); + + } + + free(nl); /* not tracked */ + + if (!queued_paths) { + + SAYF("\n" cLRD "[-] " cRST + "Looks like there are no valid test cases in the input directory! The fuzzer\n" + " needs one or more test case to start with - ideally, a small file under\n" + " 1 kB or so. The cases must be stored as regular files directly in the\n" + " input directory.\n"); + + FATAL("No usable test cases in '%s'", in_dir); + + } + + last_path_time = 0; + queued_at_start = queued_paths; + +} + + +/* Examine map coverage. Called once, for first test case. */ + +static void check_map_coverage(void) { + + u32 i; + + if (count_bytes(trace_bits) < 100) return; + + for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; ++i) + if (trace_bits[i]) return; + + WARNF("Recompile binary with newer version of afl to improve coverage!"); + +} + + +/* Perform dry run of all test cases to confirm that the app is working as + expected. This is done only for the initial inputs, and only once. */ + +void perform_dry_run(char** argv) { + + struct queue_entry* q = queue; + u32 cal_failures = 0; + u8* skip_crashes = getenv("AFL_SKIP_CRASHES"); + + while (q) { + + u8* use_mem; + u8 res; + s32 fd; + + u8* fn = strrchr(q->fname, '/') + 1; + + ACTF("Attempting dry run with '%s'...", fn); + + fd = open(q->fname, O_RDONLY); + if (fd < 0) PFATAL("Unable to open '%s'", q->fname); + + use_mem = ck_alloc_nozero(q->len); + + if (read(fd, use_mem, q->len) != q->len) + FATAL("Short read from '%s'", q->fname); + + close(fd); + + res = calibrate_case(argv, q, use_mem, 0, 1); + ck_free(use_mem); + + if (stop_soon) return; + + if (res == crash_mode || res == FAULT_NOBITS) + SAYF(cGRA " len = %u, map size = %u, exec speed = %llu us\n" cRST, + q->len, q->bitmap_size, q->exec_us); + + switch (res) { + + case FAULT_NONE: + + if (q == queue) check_map_coverage(); + + if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn); + + break; + + case FAULT_TMOUT: + + if (timeout_given) { + + /* The -t nn+ syntax in the command line sets timeout_given to '2' and + instructs afl-fuzz to tolerate but skip queue entries that time + out. */ + + if (timeout_given > 1) { + WARNF("Test case results in a timeout (skipping)"); + q->cal_failed = CAL_CHANCES; + ++cal_failures; + break; + } + + SAYF("\n" cLRD "[-] " cRST + "The program took more than %u ms to process one of the initial test cases.\n" + " Usually, the right thing to do is to relax the -t option - or to delete it\n" + " altogether and allow the fuzzer to auto-calibrate. That said, if you know\n" + " what you are doing and want to simply skip the unruly test cases, append\n" + " '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout, + exec_tmout); + + FATAL("Test case '%s' results in a timeout", fn); + + } else { + + SAYF("\n" cLRD "[-] " cRST + "The program took more than %u ms to process one of the initial test cases.\n" + " This is bad news; raising the limit with the -t option is possible, but\n" + " will probably make the fuzzing process extremely slow.\n\n" + + " If this test case is just a fluke, the other option is to just avoid it\n" + " altogether, and find one that is less of a CPU hog.\n", exec_tmout); + + FATAL("Test case '%s' results in a timeout", fn); + + } + + case FAULT_CRASH: + + if (crash_mode) break; + + if (skip_crashes) { + WARNF("Test case results in a crash (skipping)"); + q->cal_failed = CAL_CHANCES; + ++cal_failures; + break; + } + + if (mem_limit) { + + SAYF("\n" cLRD "[-] " cRST + "Oops, the program crashed with one of the test cases provided. There are\n" + " several possible explanations:\n\n" + + " - The test case causes known crashes under normal working conditions. If\n" + " so, please remove it. The fuzzer should be seeded with interesting\n" + " inputs - but not ones that cause an outright crash.\n\n" + + " - The current memory limit (%s) is too low for this program, causing\n" + " it to die due to OOM when parsing valid files. To fix this, try\n" + " bumping it up with the -m setting in the command line. If in doubt,\n" + " try something along the lines of:\n\n" + + MSG_ULIMIT_USAGE " /path/to/binary [...] for troubleshooting tips.\n", + DMS(mem_limit << 20), mem_limit - 1, doc_path); + + } else { + + SAYF("\n" cLRD "[-] " cRST + "Oops, the program crashed with one of the test cases provided. There are\n" + " several possible explanations:\n\n" + + " - The test case causes known crashes under normal working conditions. If\n" + " so, please remove it. The fuzzer should be seeded with interesting\n" + " inputs - but not ones that cause an outright crash.\n\n" + + MSG_FORK_ON_APPLE + + " - Least likely, there is a horrible bug in the fuzzer. If other options\n" + " fail, poke for troubleshooting tips.\n"); + + } +#undef MSG_ULIMIT_USAGE +#undef MSG_FORK_ON_APPLE + + FATAL("Test case '%s' results in a crash", fn); + + case FAULT_ERROR: + + FATAL("Unable to execute target application ('%s')", argv[0]); + + case FAULT_NOINST: + + FATAL("No instrumentation detected"); + + case FAULT_NOBITS: + + ++useless_at_start; + + if (!in_bitmap && !shuffle_queue) + WARNF("No new instrumentation output, test case may be useless."); + + break; + + } + + if (q->var_behavior) WARNF("Instrumentation output varies across runs."); + + q = q->next; + + } + + if (cal_failures) { + + if (cal_failures == queued_paths) + FATAL("All test cases time out%s, giving up!", + skip_crashes ? " or crash" : ""); + + WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures, + ((double)cal_failures) * 100 / queued_paths, + skip_crashes ? " or crashes" : ""); + + if (cal_failures * 5 > queued_paths) + WARNF(cLRD "High percentage of rejected test cases, check settings!"); + + } + + OKF("All test cases processed."); + +} + + +/* Helper function: link() if possible, copy otherwise. */ + +static void link_or_copy(u8* old_path, u8* new_path) { + + s32 i = link(old_path, new_path); + s32 sfd, dfd; + u8* tmp; + + if (!i) return; + + sfd = open(old_path, O_RDONLY); + if (sfd < 0) PFATAL("Unable to open '%s'", old_path); + + dfd = open(new_path, O_WRONLY | O_CREAT | O_EXCL, 0600); + if (dfd < 0) PFATAL("Unable to create '%s'", new_path); + + tmp = ck_alloc(64 * 1024); + + while ((i = read(sfd, tmp, 64 * 1024)) > 0) + ck_write(dfd, tmp, i, new_path); + + if (i < 0) PFATAL("read() failed"); + + ck_free(tmp); + close(sfd); + close(dfd); + +} + + +/* Create hard links for input test cases in the output directory, choosing + good names and pivoting accordingly. */ + +void pivot_inputs(void) { + + struct queue_entry* q = queue; + u32 id = 0; + + ACTF("Creating hard links for all input files..."); + + while (q) { + + u8 *nfn, *rsl = strrchr(q->fname, '/'); + u32 orig_id; + + if (!rsl) rsl = q->fname; else ++rsl; + + /* If the original file name conforms to the syntax and the recorded + ID matches the one we'd assign, just use the original file name. + This is valuable for resuming fuzzing runs. */ + + if (!strncmp(rsl, CASE_PREFIX, 3) && + sscanf(rsl + 3, "%06u", &orig_id) == 1 && orig_id == id) { + + u8* src_str; + u32 src_id; + + resuming_fuzz = 1; + nfn = alloc_printf("%s/queue/%s", out_dir, rsl); + + /* Since we're at it, let's also try to find parent and figure out the + appropriate depth for this entry. */ + + src_str = strchr(rsl + 3, ':'); + + if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) { + + struct queue_entry* s = queue; + while (src_id-- && s) s = s->next; + if (s) q->depth = s->depth + 1; + + if (max_depth < q->depth) max_depth = q->depth; + + } + + } else { + + /* No dice - invent a new name, capturing the original one as a + substring. */ + +#ifndef SIMPLE_FILES + + u8* use_name = strstr(rsl, ",orig:"); + + if (use_name) use_name += 6; else use_name = rsl; + nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name); + +#else + + nfn = alloc_printf("%s/queue/id_%06u", out_dir, id); + +#endif /* ^!SIMPLE_FILES */ + + } + + /* Pivot to the new queue entry. */ + + link_or_copy(q->fname, nfn); + ck_free(q->fname); + q->fname = nfn; + + /* Make sure that the passed_det value carries over, too. */ + + if (q->passed_det) mark_as_det_done(q); + + q = q->next; + ++id; + + } + + if (in_place_resume) nuke_resume_dir(); + +} + + +/* When resuming, try to find the queue position to start from. This makes sense + only when resuming, and when we can find the original fuzzer_stats. */ + +u32 find_start_position(void) { + + static u8 tmp[4096]; /* Ought to be enough for anybody. */ + + u8 *fn, *off; + s32 fd, i; + u32 ret; + + if (!resuming_fuzz) return 0; + + if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); + else fn = alloc_printf("%s/../fuzzer_stats", in_dir); + + fd = open(fn, O_RDONLY); + ck_free(fn); + + if (fd < 0) return 0; + + i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ + close(fd); + + off = strstr(tmp, "cur_path : "); + if (!off) return 0; + + ret = atoi(off + 20); + if (ret >= queued_paths) ret = 0; + return ret; + +} + + +/* The same, but for timeouts. The idea is that when resuming sessions without + -t given, we don't want to keep auto-scaling the timeout over and over + again to prevent it from growing due to random flukes. */ + +void find_timeout(void) { + + static u8 tmp[4096]; /* Ought to be enough for anybody. */ + + u8 *fn, *off; + s32 fd, i; + u32 ret; + + if (!resuming_fuzz) return; + + if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); + else fn = alloc_printf("%s/../fuzzer_stats", in_dir); + + fd = open(fn, O_RDONLY); + ck_free(fn); + + if (fd < 0) return; + + i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ + close(fd); + + off = strstr(tmp, "exec_timeout : "); + if (!off) return; + + ret = atoi(off + 17); + if (ret <= 4) return; + + exec_tmout = ret; + timeout_given = 3; + +} + + + +/* A helper function for maybe_delete_out_dir(), deleting all prefixed + files in a directory. */ + +static u8 delete_files(u8* path, u8* prefix) { + + DIR* d; + struct dirent* d_ent; + + d = opendir(path); + + if (!d) return 0; + + while ((d_ent = readdir(d))) { + + if (d_ent->d_name[0] != '.' && (!prefix || + !strncmp(d_ent->d_name, prefix, strlen(prefix)))) { + + u8* fname = alloc_printf("%s/%s", path, d_ent->d_name); + if (unlink(fname)) PFATAL("Unable to delete '%s'", fname); + ck_free(fname); + + } + + } + + closedir(d); + + return !!rmdir(path); + +} + + +/* Get the number of runnable processes, with some simple smoothing. */ + +double get_runnable_processes(void) { + + static double res; + +#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) + + /* I don't see any portable sysctl or so that would quickly give us the + number of runnable processes; the 1-minute load average can be a + semi-decent approximation, though. */ + + if (getloadavg(&res, 1) != 1) return 0; + +#else + + /* On Linux, /proc/stat is probably the best way; load averages are + computed in funny ways and sometimes don't reflect extremely short-lived + processes well. */ + + FILE* f = fopen("/proc/stat", "r"); + u8 tmp[1024]; + u32 val = 0; + + if (!f) return 0; + + while (fgets(tmp, sizeof(tmp), f)) { + + if (!strncmp(tmp, "procs_running ", 14) || + !strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14); + + } + + fclose(f); + + if (!res) { + + res = val; + + } else { + + res = res * (1.0 - 1.0 / AVG_SMOOTHING) + + ((double)val) * (1.0 / AVG_SMOOTHING); + + } + +#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ + + return res; + +} + + +/* Delete the temporary directory used for in-place session resume. */ + +void nuke_resume_dir(void) { + + u8* fn; + + fn = alloc_printf("%s/_resume/.state/deterministic_done", out_dir); + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/_resume/.state/auto_extras", out_dir); + if (delete_files(fn, "auto_")) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/_resume/.state/redundant_edges", out_dir); + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/_resume/.state/variable_behavior", out_dir); + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/_resume/.state", out_dir); + if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/_resume", out_dir); + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + return; + +dir_cleanup_failed: + + FATAL("_resume directory cleanup failed"); + +} + + +/* Delete fuzzer output directory if we recognize it as ours, if the fuzzer + is not currently running, and if the last run time isn't too great. */ + +void maybe_delete_out_dir(void) { + + FILE* f; + u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir); + + /* See if the output directory is locked. If yes, bail out. If not, + create a lock that will persist for the lifetime of the process + (this requires leaving the descriptor open).*/ + + out_dir_fd = open(out_dir, O_RDONLY); + if (out_dir_fd < 0) PFATAL("Unable to open '%s'", out_dir); + +#ifndef __sun + + if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) { + + SAYF("\n" cLRD "[-] " cRST + "Looks like the job output directory is being actively used by another\n" + " instance of afl-fuzz. You will need to choose a different %s\n" + " or stop the other process first.\n", + sync_id ? "fuzzer ID" : "output location"); + + FATAL("Directory '%s' is in use", out_dir); + + } + +#endif /* !__sun */ + + f = fopen(fn, "r"); + + if (f) { + + u64 start_time2, last_update; + + if (fscanf(f, "start_time : %llu\n" + "last_update : %llu\n", &start_time2, &last_update) != 2) + FATAL("Malformed data in '%s'", fn); + + fclose(f); + + /* Let's see how much work is at stake. */ + + if (!in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) { + + SAYF("\n" cLRD "[-] " cRST + "The job output directory already exists and contains the results of more\n" + " than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n" + " automatically delete this data for you.\n\n" + + " If you wish to start a new session, remove or rename the directory manually,\n" + " or specify a different output location for this job. To resume the old\n" + " session, put '-' as the input directory in the command line ('-i -') and\n" + " try again.\n", OUTPUT_GRACE); + + FATAL("At-risk data found in '%s'", out_dir); + + } + + } + + ck_free(fn); + + /* The idea for in-place resume is pretty simple: we temporarily move the old + queue/ to a new location that gets deleted once import to the new queue/ + is finished. If _resume/ already exists, the current queue/ may be + incomplete due to an earlier abort, so we want to use the old _resume/ + dir instead, and we let rename() fail silently. */ + + if (in_place_resume) { + + u8* orig_q = alloc_printf("%s/queue", out_dir); + + in_dir = alloc_printf("%s/_resume", out_dir); + + rename(orig_q, in_dir); /* Ignore errors */ + + OKF("Output directory exists, will attempt session resume."); + + ck_free(orig_q); + + } else { + + OKF("Output directory exists but deemed OK to reuse."); + + } + + ACTF("Deleting old session data..."); + + /* Okay, let's get the ball rolling! First, we need to get rid of the entries + in /.synced/.../id:*, if any are present. */ + + if (!in_place_resume) { + + fn = alloc_printf("%s/.synced", out_dir); + if (delete_files(fn, NULL)) goto dir_cleanup_failed; + ck_free(fn); + + } + + /* Next, we need to clean up /queue/.state/ subdirectories: */ + + fn = alloc_printf("%s/queue/.state/deterministic_done", out_dir); + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/queue/.state/auto_extras", out_dir); + if (delete_files(fn, "auto_")) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/queue/.state/redundant_edges", out_dir); + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/queue/.state/variable_behavior", out_dir); + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + /* Then, get rid of the .state subdirectory itself (should be empty by now) + and everything matching /queue/id:*. */ + + fn = alloc_printf("%s/queue/.state", out_dir); + if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/queue", out_dir); + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + /* All right, let's do /crashes/id:* and /hangs/id:*. */ + + if (!in_place_resume) { + + fn = alloc_printf("%s/crashes/README.txt", out_dir); + unlink(fn); /* Ignore errors */ + ck_free(fn); + + } + + fn = alloc_printf("%s/crashes", out_dir); + + /* Make backup of the crashes directory if it's not empty and if we're + doing in-place resume. */ + + if (in_place_resume && rmdir(fn)) { + + time_t cur_t = time(0); + struct tm* t = localtime(&cur_t); + +#ifndef SIMPLE_FILES + + u8* nfn = alloc_printf("%s.%04d-%02d-%02d-%02d:%02d:%02d", fn, + t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, + t->tm_hour, t->tm_min, t->tm_sec); + +#else + + u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, + t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, + t->tm_hour, t->tm_min, t->tm_sec); + +#endif /* ^!SIMPLE_FILES */ + + rename(fn, nfn); /* Ignore errors. */ + ck_free(nfn); + + } + + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/hangs", out_dir); + + /* Backup hangs, too. */ + + if (in_place_resume && rmdir(fn)) { + + time_t cur_t = time(0); + struct tm* t = localtime(&cur_t); + +#ifndef SIMPLE_FILES + + u8* nfn = alloc_printf("%s.%04d-%02d-%02d-%02d:%02d:%02d", fn, + t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, + t->tm_hour, t->tm_min, t->tm_sec); + +#else + + u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, + t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, + t->tm_hour, t->tm_min, t->tm_sec); + +#endif /* ^!SIMPLE_FILES */ + + rename(fn, nfn); /* Ignore errors. */ + ck_free(nfn); + + } + + if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; + ck_free(fn); + + /* And now, for some finishing touches. */ + + if (file_extension) { + fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); + } else { + fn = alloc_printf("%s/.cur_input", out_dir); + } + + if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/fuzz_bitmap", out_dir); + if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; + ck_free(fn); + + if (!in_place_resume) { + fn = alloc_printf("%s/fuzzer_stats", out_dir); + if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; + ck_free(fn); + } + + fn = alloc_printf("%s/plot_data", out_dir); + if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; + ck_free(fn); + + fn = alloc_printf("%s/cmdline", out_dir); + if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; + ck_free(fn); + + OKF("Output dir cleanup successful."); + + /* Wow... is that all? If yes, celebrate! */ + + return; + +dir_cleanup_failed: + + SAYF("\n" cLRD "[-] " cRST + "Whoops, the fuzzer tried to reuse your output directory, but bumped into\n" + " some files that shouldn't be there or that couldn't be removed - so it\n" + " decided to abort! This happened while processing this path:\n\n" + + " %s\n\n" + " Please examine and manually delete the files, or specify a different\n" + " output location for the tool.\n", fn); + + FATAL("Output directory cleanup failed"); + +} + + +/* Prepare output directories and fds. */ + +void setup_dirs_fds(void) { + + u8* tmp; + s32 fd; + + ACTF("Setting up output directories..."); + + if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST) + PFATAL("Unable to create '%s'", sync_dir); + + if (mkdir(out_dir, 0700)) { + + if (errno != EEXIST) PFATAL("Unable to create '%s'", out_dir); + + maybe_delete_out_dir(); + + } else { + + if (in_place_resume) + FATAL("Resume attempted but old output directory not found"); + + out_dir_fd = open(out_dir, O_RDONLY); + +#ifndef __sun + + if (out_dir_fd < 0 || flock(out_dir_fd, LOCK_EX | LOCK_NB)) + PFATAL("Unable to flock() output directory."); + +#endif /* !__sun */ + + } + + /* Queue directory for any starting & discovered paths. */ + + tmp = alloc_printf("%s/queue", out_dir); + if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + /* Top-level directory for queue metadata used for session + resume and related tasks. */ + + tmp = alloc_printf("%s/queue/.state/", out_dir); + if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + /* Directory for flagging queue entries that went through + deterministic fuzzing in the past. */ + + tmp = alloc_printf("%s/queue/.state/deterministic_done/", out_dir); + if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + /* Directory with the auto-selected dictionary entries. */ + + tmp = alloc_printf("%s/queue/.state/auto_extras/", out_dir); + if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + /* The set of paths currently deemed redundant. */ + + tmp = alloc_printf("%s/queue/.state/redundant_edges/", out_dir); + if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + /* The set of paths showing variable behavior. */ + + tmp = alloc_printf("%s/queue/.state/variable_behavior/", out_dir); + if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + /* Sync directory for keeping track of cooperating fuzzers. */ + + if (sync_id) { + + tmp = alloc_printf("%s/.synced/", out_dir); + + if (mkdir(tmp, 0700) && (!in_place_resume || errno != EEXIST)) + PFATAL("Unable to create '%s'", tmp); + + ck_free(tmp); + + } + + /* All recorded crashes. */ + + tmp = alloc_printf("%s/crashes", out_dir); + if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + /* All recorded hangs. */ + + tmp = alloc_printf("%s/hangs", out_dir); + if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + /* Generally useful file descriptors. */ + + dev_null_fd = open("/dev/null", O_RDWR); + if (dev_null_fd < 0) PFATAL("Unable to open /dev/null"); + +#ifndef HAVE_ARC4RANDOM + dev_urandom_fd = open("/dev/urandom", O_RDONLY); + if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom"); +#endif + + /* Gnuplot output file. */ + + tmp = alloc_printf("%s/plot_data", out_dir); + fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600); + if (fd < 0) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + plot_file = fdopen(fd, "w"); + if (!plot_file) PFATAL("fdopen() failed"); + + fprintf(plot_file, "# unix_time, cycles_done, cur_path, paths_total, " + "pending_total, pending_favs, map_size, unique_crashes, " + "unique_hangs, max_depth, execs_per_sec\n"); + /* ignore errors */ + +} + +void setup_cmdline_file(char** argv) { + u8* tmp; + s32 fd; + u32 i = 0; + + FILE* cmdline_file = NULL; + + /* Store the command line to reproduce our findings */ + tmp = alloc_printf("%s/cmdline", out_dir); + fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600); + if (fd < 0) PFATAL("Unable to create '%s'", tmp); + ck_free(tmp); + + cmdline_file = fdopen(fd, "w"); + if (!cmdline_file) PFATAL("fdopen() failed"); + + while (argv[i]) { + fprintf(cmdline_file, "%s\n", argv[i]); + ++i; + } + + fclose(cmdline_file); +} + + +/* Setup the output file for fuzzed data, if not using -f. */ + +void setup_stdio_file(void) { + + u8* fn; + if (file_extension) { + fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); + } else { + fn = alloc_printf("%s/.cur_input", out_dir); + } + + unlink(fn); /* Ignore errors */ + + out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600); + + if (out_fd < 0) PFATAL("Unable to create '%s'", fn); + + ck_free(fn); + +} + + +/* Make sure that core dumps don't go to a program. */ + +void check_crash_handling(void) { + +#ifdef __APPLE__ + + /* Yuck! There appears to be no simple C API to query for the state of + loaded daemons on MacOS X, and I'm a bit hesitant to do something + more sophisticated, such as disabling crash reporting via Mach ports, + until I get a box to test the code. So, for now, we check for crash + reporting the awful way. */ + + if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return; + + SAYF("\n" cLRD "[-] " cRST + "Whoops, your system is configured to forward crash notifications to an\n" + " external crash reporting utility. This will cause issues due to the\n" + " extended delay between the fuzzed binary malfunctioning and this fact\n" + " being relayed to the fuzzer via the standard waitpid() API.\n\n" + " To avoid having crashes misinterpreted as timeouts, please run the\n" + " following commands:\n\n" + + " SL=/System/Library; PL=com.apple.ReportCrash\n" + " launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n" + " sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n"); + + if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) + FATAL("Crash reporter detected"); + +#else + + /* This is Linux specific, but I don't think there's anything equivalent on + *BSD, so we can just let it slide for now. */ + + s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY); + u8 fchar; + + if (fd < 0) return; + + ACTF("Checking core_pattern..."); + + if (read(fd, &fchar, 1) == 1 && fchar == '|') { + + SAYF("\n" cLRD "[-] " cRST + "Hmm, your system is configured to send core dump notifications to an\n" + " external utility. This will cause issues: there will be an extended delay\n" + " between stumbling upon a crash and having this information relayed to the\n" + " fuzzer via the standard waitpid() API.\n\n" + + " To avoid having crashes misinterpreted as timeouts, please log in as root\n" + " and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n" + + " echo core >/proc/sys/kernel/core_pattern\n"); + + if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) + FATAL("Pipe at the beginning of 'core_pattern'"); + + } + + close(fd); + +#endif /* ^__APPLE__ */ + +} + + +/* Check CPU governor. */ + +void check_cpu_governor(void) { +#ifdef __linux__ + FILE* f; + u8 tmp[128]; + u64 min = 0, max = 0; + + if (getenv("AFL_SKIP_CPUFREQ")) return; + + if (cpu_aff > 0) + snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff, "/cpufreq/scaling_governor"); + else + snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"); + f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r"); + if (!f) { + if (cpu_aff > 0) + snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpufreq/policy", cpu_aff, "/scaling_governor"); + else + snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor"); + f = fopen(tmp, "r"); + } + if (!f) { + WARNF("Could not check CPU scaling governor"); + return; + } + + ACTF("Checking CPU scaling governor..."); + + if (!fgets(tmp, 128, f)) PFATAL("fgets() failed"); + + fclose(f); + + if (!strncmp(tmp, "perf", 4)) return; + + f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r"); + + if (f) { + if (fscanf(f, "%llu", &min) != 1) min = 0; + fclose(f); + } + + f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r"); + + if (f) { + if (fscanf(f, "%llu", &max) != 1) max = 0; + fclose(f); + } + + if (min == max) return; + + SAYF("\n" cLRD "[-] " cRST + "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n" + " between %llu and %llu MHz. Unfortunately, the scaling algorithm in the\n" + " kernel is imperfect and can miss the short-lived processes spawned by\n" + " afl-fuzz. To keep things moving, run these commands as root:\n\n" + + " cd /sys/devices/system/cpu\n" + " echo performance | tee cpu*/cpufreq/scaling_governor\n\n" + + " You can later go back to the original state by replacing 'performance' with\n" + " 'ondemand'. If you don't want to change the settings, set AFL_SKIP_CPUFREQ\n" + " to make afl-fuzz skip this check - but expect some performance drop.\n", + min / 1024, max / 1024); + + FATAL("Suboptimal CPU scaling governor"); +#endif +} + + +/* Count the number of logical CPU cores. */ + +void get_core_count(void) { + +#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) + + size_t s = sizeof(cpu_core_count); + + /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */ + +#ifdef __APPLE__ + + if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) + return; + +#else + + int s_name[2] = { CTL_HW, HW_NCPU }; + + if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return; + +#endif /* ^__APPLE__ */ + +#else + +#ifdef HAVE_AFFINITY + + cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN); + +#else + + FILE* f = fopen("/proc/stat", "r"); + u8 tmp[1024]; + + if (!f) return; + + while (fgets(tmp, sizeof(tmp), f)) + if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) ++cpu_core_count; + + fclose(f); + +#endif /* ^HAVE_AFFINITY */ + +#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ + + if (cpu_core_count > 0) { + + u32 cur_runnable = 0; + + cur_runnable = (u32)get_runnable_processes(); + +#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) + + /* Add ourselves, since the 1-minute average doesn't include that yet. */ + + ++cur_runnable; + +#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ + + OKF("You have %d CPU core%s and %u runnable tasks (utilization: %0.0f%%).", + cpu_core_count, cpu_core_count > 1 ? "s" : "", + cur_runnable, cur_runnable * 100.0 / cpu_core_count); + + if (cpu_core_count > 1) { + + if (cur_runnable > cpu_core_count * 1.5) { + + WARNF("System under apparent load, performance may be spotty."); + + } else if (cur_runnable + 1 <= cpu_core_count) { + + OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path); + + } + + } + + } else { + + cpu_core_count = 0; + WARNF("Unable to figure out the number of CPU cores."); + + } + +} + + +/* Validate and fix up out_dir and sync_dir when using -S. */ + +void fix_up_sync(void) { + + u8* x = sync_id; + + if (dumb_mode) + FATAL("-S / -M and -n are mutually exclusive"); + + if (skip_deterministic) { + + if (force_deterministic) + FATAL("use -S instead of -M -d"); + //else + // FATAL("-S already implies -d"); + + } + + while (*x) { + + if (!isalnum(*x) && *x != '_' && *x != '-') + FATAL("Non-alphanumeric fuzzer ID specified via -S or -M"); + + ++x; + + } + + if (strlen(sync_id) > 32) FATAL("Fuzzer ID too long"); + + x = alloc_printf("%s/%s", out_dir, sync_id); + + sync_dir = out_dir; + out_dir = x; + + if (!force_deterministic) { + skip_deterministic = 1; + use_splicing = 1; + } + +} + + +/* Handle screen resize (SIGWINCH). */ + +static void handle_resize(int sig) { + clear_screen = 1; +} + + +/* Check ASAN options. */ + +void check_asan_opts(void) { + u8* x = getenv("ASAN_OPTIONS"); + + if (x) { + + if (!strstr(x, "abort_on_error=1")) + FATAL("Custom ASAN_OPTIONS set without abort_on_error=1 - please fix!"); + + if (!strstr(x, "symbolize=0")) + FATAL("Custom ASAN_OPTIONS set without symbolize=0 - please fix!"); + + } + + x = getenv("MSAN_OPTIONS"); + + if (x) { + + if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR))) + FATAL("Custom MSAN_OPTIONS set without exit_code=" + STRINGIFY(MSAN_ERROR) " - please fix!"); + + if (!strstr(x, "symbolize=0")) + FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!"); + + } + +} + + +/* Handle stop signal (Ctrl-C, etc). */ + +static void handle_stop_sig(int sig) { + + stop_soon = 1; + + if (child_pid > 0) kill(child_pid, SIGKILL); + if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL); + +} + + +/* Handle skip request (SIGUSR1). */ + +static void handle_skipreq(int sig) { + + skip_requested = 1; + +} + + +/* Do a PATH search and find target binary to see that it exists and + isn't a shell script - a common and painful mistake. We also check for + a valid ELF header and for evidence of AFL instrumentation. */ + +void check_binary(u8* fname) { + + u8* env_path = 0; + struct stat st; + + s32 fd; + u8* f_data; + u32 f_len = 0; + + ACTF("Validating target binary..."); + + if (strchr(fname, '/') || !(env_path = getenv("PATH"))) { + + target_path = ck_strdup(fname); + if (stat(target_path, &st) || !S_ISREG(st.st_mode) || + !(st.st_mode & 0111) || (f_len = st.st_size) < 4) + FATAL("Program '%s' not found or not executable", fname); + + } else { + + while (env_path) { + + u8 *cur_elem, *delim = strchr(env_path, ':'); + + if (delim) { + + cur_elem = ck_alloc(delim - env_path + 1); + memcpy(cur_elem, env_path, delim - env_path); + ++delim; + + } else cur_elem = ck_strdup(env_path); + + env_path = delim; + + if (cur_elem[0]) + target_path = alloc_printf("%s/%s", cur_elem, fname); + else + target_path = ck_strdup(fname); + + ck_free(cur_elem); + + if (!stat(target_path, &st) && S_ISREG(st.st_mode) && + (st.st_mode & 0111) && (f_len = st.st_size) >= 4) break; + + ck_free(target_path); + target_path = 0; + + } + + if (!target_path) FATAL("Program '%s' not found or not executable", fname); + + } + + if (getenv("AFL_SKIP_BIN_CHECK")) return; + + /* Check for blatant user errors. */ + + if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) || + (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/'))) + FATAL("Please don't keep binaries in /tmp or /var/tmp"); + + fd = open(target_path, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", target_path); + + f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0); + + if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", target_path); + + close(fd); + + if (f_data[0] == '#' && f_data[1] == '!') { + + SAYF("\n" cLRD "[-] " cRST + "Oops, the target binary looks like a shell script. Some build systems will\n" + " sometimes generate shell stubs for dynamically linked programs; try static\n" + " library mode (./configure --disable-shared) if that's the case.\n\n" + + " Another possible cause is that you are actually trying to use a shell\n" + " wrapper around the fuzzed component. Invoking shell can slow down the\n" + " fuzzing process by a factor of 20x or more; it's best to write the wrapper\n" + " in a compiled language instead.\n"); + + FATAL("Program '%s' is a shell script", target_path); + + } + +#ifndef __APPLE__ + + if (f_data[0] != 0x7f || memcmp(f_data + 1, "ELF", 3)) + FATAL("Program '%s' is not an ELF binary", target_path); + +#else + +#if !defined(__arm__) && !defined(__arm64__) + if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED) + FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path); +#endif + +#endif /* ^!__APPLE__ */ + + if (!qemu_mode && !unicorn_mode && !dumb_mode && + !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { + + SAYF("\n" cLRD "[-] " cRST + "Looks like the target binary is not instrumented! The fuzzer depends on\n" + " compile-time instrumentation to isolate interesting test cases while\n" + " mutating the input data. For more information, and for tips on how to\n" + " instrument binaries, please see %s/README.\n\n" + + " When source code is not available, you may be able to leverage QEMU\n" + " mode support. Consult the README for tips on how to enable this.\n" + + " (It is also possible to use afl-fuzz as a traditional, \"dumb\" fuzzer.\n" + " For that, you can use the -n option - but expect much worse results.)\n", + doc_path); + + FATAL("No instrumentation detected"); + + } + + if ((qemu_mode || unicorn_mode) && + memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { + + SAYF("\n" cLRD "[-] " cRST + "This program appears to be instrumented with afl-gcc, but is being run in\n" + " QEMU or Unicorn mode (-Q or -U). This is probably not what you want -\n" + " this setup will be slow and offer no practical benefits.\n"); + + FATAL("Instrumentation found in -Q or -U mode"); + + } + + if (memmem(f_data, f_len, "libasan.so", 10) || + memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1; + + /* Detect persistent & deferred init signatures in the binary. */ + + if (memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) { + + OKF(cPIN "Persistent mode binary detected."); + setenv(PERSIST_ENV_VAR, "1", 1); + persistent_mode = 1; + + } else if (getenv("AFL_PERSISTENT")) { + + WARNF("AFL_PERSISTENT is no longer supported and may misbehave!"); + + } + + if (memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) { + + OKF(cPIN "Deferred forkserver binary detected."); + setenv(DEFER_ENV_VAR, "1", 1); + deferred_mode = 1; + + } else if (getenv("AFL_DEFER_FORKSRV")) { + + WARNF("AFL_DEFER_FORKSRV is no longer supported and may misbehave!"); + + } + + if (munmap(f_data, f_len)) PFATAL("unmap() failed"); + +} + + +/* Trim and possibly create a banner for the run. */ + +void fix_up_banner(u8* name) { + + if (!use_banner) { + + if (sync_id) { + + use_banner = sync_id; + + } else { + + u8* trim = strrchr(name, '/'); + if (!trim) use_banner = name; else use_banner = trim + 1; + + } + + } + + if (strlen(use_banner) > 32) { + + u8* tmp = ck_alloc(36); + sprintf(tmp, "%.32s...", use_banner); + use_banner = tmp; + + } + +} + + +/* Check if we're on TTY. */ + +void check_if_tty(void) { + + struct winsize ws; + + if (getenv("AFL_NO_UI")) { + OKF("Disabling the UI because AFL_NO_UI is set."); + not_on_tty = 1; + return; + } + + if (ioctl(1, TIOCGWINSZ, &ws)) { + + if (errno == ENOTTY) { + OKF("Looks like we're not running on a tty, so I'll be a bit less verbose."); + not_on_tty = 1; + } + + return; + } + +} + + +/* Set up signal handlers. More complicated that needs to be, because libc on + Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call + siginterrupt(), and does other stupid things. */ + +void setup_signal_handlers(void) { + + struct sigaction sa; + + sa.sa_handler = NULL; + sa.sa_flags = SA_RESTART; + sa.sa_sigaction = NULL; + + sigemptyset(&sa.sa_mask); + + /* Various ways of saying "stop". */ + + sa.sa_handler = handle_stop_sig; + sigaction(SIGHUP, &sa, NULL); + sigaction(SIGINT, &sa, NULL); + sigaction(SIGTERM, &sa, NULL); + + /* Exec timeout notifications. */ + + sa.sa_handler = handle_timeout; + sigaction(SIGALRM, &sa, NULL); + + /* Window resize */ + + sa.sa_handler = handle_resize; + sigaction(SIGWINCH, &sa, NULL); + + /* SIGUSR1: skip entry */ + + sa.sa_handler = handle_skipreq; + sigaction(SIGUSR1, &sa, NULL); + + /* Things we don't care about. */ + + sa.sa_handler = SIG_IGN; + sigaction(SIGTSTP, &sa, NULL); + sigaction(SIGPIPE, &sa, NULL); + +} + + +/* Rewrite argv for QEMU. */ + +char** get_qemu_argv(u8* own_loc, char** argv, int argc) { + + char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); + u8 *tmp, *cp, *rsl, *own_copy; + + memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); + + new_argv[2] = target_path; + new_argv[1] = "--"; + + /* Now we need to actually find the QEMU binary to put in argv[0]. */ + + tmp = getenv("AFL_PATH"); + + if (tmp) { + + cp = alloc_printf("%s/afl-qemu-trace", tmp); + + if (access(cp, X_OK)) + FATAL("Unable to find '%s'", tmp); + + target_path = new_argv[0] = cp; + return new_argv; + + } + + own_copy = ck_strdup(own_loc); + rsl = strrchr(own_copy, '/'); + + if (rsl) { + + *rsl = 0; + + cp = alloc_printf("%s/afl-qemu-trace", own_copy); + ck_free(own_copy); + + if (!access(cp, X_OK)) { + + target_path = new_argv[0] = cp; + return new_argv; + + } + + } else ck_free(own_copy); + + if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { + + target_path = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace"); + return new_argv; + + } + + SAYF("\n" cLRD "[-] " cRST + "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be built\n" + " separately by following the instructions in qemu_mode/README.qemu. If you\n" + " already have the binary installed, you may need to specify AFL_PATH in the\n" + " environment.\n\n" + + " Of course, even without QEMU, afl-fuzz can still work with binaries that are\n" + " instrumented at compile time with afl-gcc. It is also possible to use it as a\n" + " traditional \"dumb\" fuzzer by specifying '-n' in the command line.\n"); + + FATAL("Failed to locate 'afl-qemu-trace'."); + +} + +/* Make a copy of the current command line. */ + +void save_cmdline(u32 argc, char** argv) { + + u32 len = 1, i; + u8* buf; + + for (i = 0; i < argc; ++i) + len += strlen(argv[i]) + 1; + + buf = orig_cmdline = ck_alloc(len); + + for (i = 0; i < argc; ++i) { + + u32 l = strlen(argv[i]); + + memcpy(buf, argv[i], l); + buf += l; + + if (i != argc - 1) *(buf++) = ' '; + + } + + *buf = 0; + +} + diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c new file mode 100644 index 00000000..59370c3d --- /dev/null +++ b/src/afl-fuzz-one.c @@ -0,0 +1,5719 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + +/* MOpt */ + +int select_algorithm(void) { + + int i_puppet, j_puppet; + + double sele = ((double)(UR(10000))*0.0001); + j_puppet = 0; + for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) { + if (unlikely(i_puppet == 0)) { + if (sele < probability_now[swarm_now][i_puppet]) + break; + } else { + if (sele < probability_now[swarm_now][i_puppet]) { + j_puppet =1; + break; + } + } + } + if (j_puppet ==1 && sele < probability_now[swarm_now][i_puppet-1]) + FATAL("error select_algorithm"); + return i_puppet; +} + +/* Helper to choose random block len for block operations in fuzz_one(). + Doesn't return zero, provided that max_len is > 0. */ + +static u32 choose_block_len(u32 limit) { + + u32 min_value, max_value; + u32 rlim = MIN(queue_cycle, 3); + + if (!run_over10m) rlim = 1; + + switch (UR(rlim)) { + + case 0: min_value = 1; + max_value = HAVOC_BLK_SMALL; + break; + + case 1: min_value = HAVOC_BLK_SMALL; + max_value = HAVOC_BLK_MEDIUM; + break; + + default: + + if (UR(10)) { + + min_value = HAVOC_BLK_MEDIUM; + max_value = HAVOC_BLK_LARGE; + + } else { + + min_value = HAVOC_BLK_LARGE; + max_value = HAVOC_BLK_XL; + + } + + } + + if (min_value >= limit) min_value = 1; + + return min_value + UR(MIN(max_value, limit) - min_value + 1); + +} + + +/* Helper function to see if a particular change (xor_val = old ^ new) could + be a product of deterministic bit flips with the lengths and stepovers + attempted by afl-fuzz. This is used to avoid dupes in some of the + deterministic fuzzing operations that follow bit flips. We also + return 1 if xor_val is zero, which implies that the old and attempted new + values are identical and the exec would be a waste of time. */ + +static u8 could_be_bitflip(u32 xor_val) { + + u32 sh = 0; + + if (!xor_val) return 1; + + /* Shift left until first bit set. */ + + while (!(xor_val & 1)) { ++sh; xor_val >>= 1; } + + /* 1-, 2-, and 4-bit patterns are OK anywhere. */ + + if (xor_val == 1 || xor_val == 3 || xor_val == 15) return 1; + + /* 8-, 16-, and 32-bit patterns are OK only if shift factor is + divisible by 8, since that's the stepover for these ops. */ + + if (sh & 7) return 0; + + if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) + return 1; + + return 0; + +} + + +/* Helper function to see if a particular value is reachable through + arithmetic operations. Used for similar purposes. */ + +static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { + + u32 i, ov = 0, nv = 0, diffs = 0; + + if (old_val == new_val) return 1; + + /* See if one-byte adjustments to any byte could produce this result. */ + + for (i = 0; i < blen; ++i) { + + u8 a = old_val >> (8 * i), + b = new_val >> (8 * i); + + if (a != b) { ++diffs; ov = a; nv = b; } + + } + + /* If only one byte differs and the values are within range, return 1. */ + + if (diffs == 1) { + + if ((u8)(ov - nv) <= ARITH_MAX || + (u8)(nv - ov) <= ARITH_MAX) return 1; + + } + + if (blen == 1) return 0; + + /* See if two-byte adjustments to any byte would produce this result. */ + + diffs = 0; + + for (i = 0; i < blen / 2; ++i) { + + u16 a = old_val >> (16 * i), + b = new_val >> (16 * i); + + if (a != b) { ++diffs; ov = a; nv = b; } + + } + + /* If only one word differs and the values are within range, return 1. */ + + if (diffs == 1) { + + if ((u16)(ov - nv) <= ARITH_MAX || + (u16)(nv - ov) <= ARITH_MAX) return 1; + + ov = SWAP16(ov); nv = SWAP16(nv); + + if ((u16)(ov - nv) <= ARITH_MAX || + (u16)(nv - ov) <= ARITH_MAX) return 1; + + } + + /* Finally, let's do the same thing for dwords. */ + + if (blen == 4) { + + if ((u32)(old_val - new_val) <= ARITH_MAX || + (u32)(new_val - old_val) <= ARITH_MAX) return 1; + + new_val = SWAP32(new_val); + old_val = SWAP32(old_val); + + if ((u32)(old_val - new_val) <= ARITH_MAX || + (u32)(new_val - old_val) <= ARITH_MAX) return 1; + + } + + return 0; + +} + + +/* Last but not least, a similar helper to see if insertion of an + interesting integer is redundant given the insertions done for + shorter blen. The last param (check_le) is set if the caller + already executed LE insertion for current blen and wants to see + if BE variant passed in new_val is unique. */ + +static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { + + u32 i, j; + + if (old_val == new_val) return 1; + + /* See if one-byte insertions from interesting_8 over old_val could + produce new_val. */ + + for (i = 0; i < blen; ++i) { + + for (j = 0; j < sizeof(interesting_8); ++j) { + + u32 tval = (old_val & ~(0xff << (i * 8))) | + (((u8)interesting_8[j]) << (i * 8)); + + if (new_val == tval) return 1; + + } + + } + + /* Bail out unless we're also asked to examine two-byte LE insertions + as a preparation for BE attempts. */ + + if (blen == 2 && !check_le) return 0; + + /* See if two-byte insertions over old_val could give us new_val. */ + + for (i = 0; i < blen - 1; ++i) { + + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + + u32 tval = (old_val & ~(0xffff << (i * 8))) | + (((u16)interesting_16[j]) << (i * 8)); + + if (new_val == tval) return 1; + + /* Continue here only if blen > 2. */ + + if (blen > 2) { + + tval = (old_val & ~(0xffff << (i * 8))) | + (SWAP16(interesting_16[j]) << (i * 8)); + + if (new_val == tval) return 1; + + } + + } + + } + + if (blen == 4 && check_le) { + + /* See if four-byte insertions could produce the same result + (LE only). */ + + for (j = 0; j < sizeof(interesting_32) / 4; ++j) + if (new_val == (u32)interesting_32[j]) return 1; + + } + + return 0; + +} + + +#ifndef IGNORE_FINDS + +/* Helper function to compare buffers; returns first and last differing offset. We + use this to find reasonable locations for splicing two files. */ + +static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { + + s32 f_loc = -1; + s32 l_loc = -1; + u32 pos; + + for (pos = 0; pos < len; ++pos) { + + if (*(ptr1++) != *(ptr2++)) { + + if (f_loc == -1) f_loc = pos; + l_loc = pos; + + } + + } + + *first = f_loc; + *last = l_loc; + + return; + +} + +#endif /* !IGNORE_FINDS */ + +/* Take the current entry from the queue, fuzz it for a while. This + function is a tad too long... returns 0 if fuzzed successfully, 1 if + skipped or bailed out. */ + +u8 fuzz_one_original(char** argv) { + + s32 len, fd, temp_len, i, j; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt; + u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + + u8 ret_val = 1, doing_det = 0; + + u8 a_collect[MAX_AUTO_EXTRA]; + u32 a_len = 0; + +#ifdef IGNORE_FINDS + + /* In IGNORE_FINDS mode, skip any entries that weren't in the + initial data set. */ + + if (queue_cur->depth > 1) return 1; + +#else + + if (pending_favored) { + + /* If we have any favored, non-fuzzed new arrivals in the queue, + possibly skip to them at the expense of already-fuzzed or non-favored + cases. */ + + if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) return 1; + + } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { + + /* Otherwise, still possibly skip non-favored cases, albeit less often. + The odds of skipping stuff are higher for already-fuzzed inputs and + lower for never-fuzzed entries. */ + + if (queue_cycle > 1 && (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) { + + if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; + + } else { + + if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; + + } + + } + +#endif /* ^IGNORE_FINDS */ + + if (not_on_tty) { + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", + current_entry, queued_paths, unique_crashes); + fflush(stdout); + } + + /* Map the test case into memory. */ + + fd = open(queue_cur->fname, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + + len = queue_cur->len; + + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len); + + close(fd); + + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every + single byte anyway, so it wouldn't give us any performance or memory usage + benefits. */ + + out_buf = ck_alloc_nozero(len); + + subseq_tmouts = 0; + + cur_depth = queue_cur->depth; + + /******************************************* + * CALIBRATION (only if failed earlier on) * + *******************************************/ + + if (queue_cur->cal_failed) { + + u8 res = FAULT_TMOUT; + + if (queue_cur->cal_failed < CAL_CHANCES) { + + res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); + + if (res == FAULT_ERROR) + FATAL("Unable to execute target application"); + + } + + if (stop_soon || res != crash_mode) { + ++cur_skipped_paths; + goto abandon_entry; + } + + } + + /************ + * TRIMMING * + ************/ + + if (!dumb_mode && !queue_cur->trim_done && !custom_mutator) { + + u8 res = trim_case(argv, queue_cur, in_buf); + + if (res == FAULT_ERROR) + FATAL("Unable to execute target application"); + + if (stop_soon) { + ++cur_skipped_paths; + goto abandon_entry; + } + + /* Don't retry trimming, even if it failed. */ + + queue_cur->trim_done = 1; + + len = queue_cur->len; + + } + + memcpy(out_buf, in_buf, len); + + /********************* + * PERFORMANCE SCORE * + *********************/ + + orig_perf = perf_score = calculate_score(queue_cur); + + if (perf_score == 0) goto abandon_entry; + + if (custom_mutator) { + stage_short = "custom"; + stage_name = "custom mutator"; + stage_max = len << 3; + stage_val_type = STAGE_VAL_NONE; + + const u32 max_seed_size = 4096*4096; + u8* mutated_buf = ck_alloc(max_seed_size); + + orig_hit_cnt = queued_paths + unique_crashes; + + for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) { + size_t orig_size = (size_t) len; + size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX)); + if (mutated_size > 0) { + out_buf = ck_realloc(out_buf, mutated_size); + memcpy(out_buf, mutated_buf, mutated_size); + if (common_fuzz_stuff(argv, out_buf, (u32) mutated_size)) { + goto abandon_entry; + } + } + } + + ck_free(mutated_buf); + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max; + goto abandon_entry; + } + + + /* Skip right away if -d is given, if it has not been chosen sufficiently + often to warrant the expensive deterministic stage (fuzz_level), or + if it has gone through deterministic testing in earlier, resumed runs + (passed_det). */ + + if (skip_deterministic + || ((!queue_cur->passed_det) + && perf_score < ( + queue_cur->depth * 30 <= havoc_max_mult * 100 + ? queue_cur->depth * 30 + : havoc_max_mult * 100)) + || queue_cur->passed_det) +#ifdef USE_PYTHON + goto python_stage; +#else + goto havoc_stage; +#endif + + /* Skip deterministic fuzzing if exec path checksum puts this out of scope + for this master instance. */ + + if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) +#ifdef USE_PYTHON + goto python_stage; +#else + goto havoc_stage; +#endif + + doing_det = 1; + + /********************************************* + * SIMPLE BITFLIP (+dictionary construction) * + *********************************************/ + +#define FLIP_BIT(_ar, _b) do { \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ + } while (0) + + /* Single walking bit. */ + + stage_short = "flip1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = queued_paths + unique_crashes; + + prev_cksum = queue_cur->exec_cksum; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + + /* While flipping the least significant bit in every byte, pull of an extra + trick to detect possible syntax tokens. In essence, the idea is that if + you have a binary blob like this: + + xxxxxxxxIHDRxxxxxxxx + + ...and changing the leading and trailing bytes causes variable or no + changes in program flow, but touching any character in the "IHDR" string + always produces the same, distinctive path, it's highly likely that + "IHDR" is an atomically-checked magic value of special significance to + the fuzzed format. + + We do this here, rather than as a separate stage, because it's a nice + way to keep the operation approximately "free" (i.e., no extra execs). + + Empirically, performing the check when flipping the least significant bit + is advantageous, compared to doing it at the time of more disruptive + changes, where the program flow may be affected in more violent ways. + + The caveat is that we won't generate dictionaries in the -d mode or -S + mode - but that's probably a fair trade-off. + + This won't work particularly well with paths that exhibit variable + behavior, but fails gracefully, so we'll carry out the checks anyway. + + */ + + if (!dumb_mode && (stage_cur & 7) == 7) { + + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + + if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + + /* If at end of file and we are still collecting a string, grab the + final character and force output. */ + + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; + + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); + + } else if (cksum != prev_cksum) { + + /* Otherwise, if the checksum has changed, see if we have something + worthwhile queued up, and collect that if the answer is yes. */ + + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); + + a_len = 0; + prev_cksum = cksum; + + } + + /* Continue collecting string, but only if the bit flip actually made + any difference - we don't want no-op tokens. */ + + if (cksum != queue_cur->exec_cksum) { + + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; + + } + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP1] += stage_max; + + /* Two walking bits. */ + + stage_name = "bitflip 2/1"; + stage_short = "flip2"; + stage_max = (len << 3) - 1; + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP2] += stage_max; + + /* Four walking bits. */ + + stage_name = "bitflip 4/1"; + stage_short = "flip4"; + stage_max = (len << 3) - 3; + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP4] += stage_max; + + /* Effector map setup. These macros calculate: + + EFF_APOS - position of a particular file offset in the map. + EFF_ALEN - length of a map with a particular number of bytes. + EFF_SPAN_ALEN - map span for a sequence of bytes. + + */ + +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) + + /* Initialize effector map for the next step (see comments below). Always + flag first and last byte as doing something. */ + + eff_map = ck_alloc(EFF_ALEN(len)); + eff_map[0] = 1; + + if (EFF_APOS(len - 1) != 0) { + eff_map[EFF_APOS(len - 1)] = 1; + ++eff_cnt; + } + + /* Walking byte. */ + + stage_name = "bitflip 8/8"; + stage_short = "flip8"; + stage_max = len; + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur; + + out_buf[stage_cur] ^= 0xFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + /* We also use this stage to pull off a simple trick: we identify + bytes that seem to have no effect on the current execution path + even when fully flipped - and we skip them during more expensive + deterministic stages, such as arithmetics or known ints. */ + + if (!eff_map[EFF_APOS(stage_cur)]) { + + u32 cksum; + + /* If in dumb mode or if the file is very short, just flag everything + without wasting time on checksums. */ + + if (!dumb_mode && len >= EFF_MIN_LEN) + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + else + cksum = ~queue_cur->exec_cksum; + + if (cksum != queue_cur->exec_cksum) { + eff_map[EFF_APOS(stage_cur)] = 1; + ++eff_cnt; + } + + } + + out_buf[stage_cur] ^= 0xFF; + + } + + /* If the effector map is more than EFF_MAX_PERC dense, just flag the + whole thing as worth fuzzing, since we wouldn't be saving much time + anyway. */ + + if (eff_cnt != EFF_ALEN(len) && + eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + + memset(eff_map, 1, EFF_ALEN(len)); + + blocks_eff_select += EFF_ALEN(len); + + } else { + + blocks_eff_select += eff_cnt; + + } + + blocks_eff_total += EFF_ALEN(len); + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP8] += stage_max; + + /* Two walking bytes. */ + + if (len < 2) goto skip_bitflip; + + stage_name = "bitflip 16/8"; + stage_short = "flip16"; + stage_cur = 0; + stage_max = len - 1; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + --stage_max; + continue; + } + + stage_cur_byte = i; + + *(u16*)(out_buf + i) ^= 0xFFFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + *(u16*)(out_buf + i) ^= 0xFFFF; + + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP16] += stage_max; + + if (len < 4) goto skip_bitflip; + + /* Four walking bytes. */ + + stage_name = "bitflip 32/8"; + stage_short = "flip32"; + stage_cur = 0; + stage_max = len - 3; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; ++i) { + + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + --stage_max; + continue; + } + + stage_cur_byte = i; + + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP32] += stage_max; + +skip_bitflip: + + if (no_arith) goto skip_arith; + + /********************** + * ARITHMETIC INC/DEC * + **********************/ + + /* 8-bit arithmetics. */ + + stage_name = "arith 8/8"; + stage_short = "arith8"; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; + + stage_val_type = STAGE_VAL_LE; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len; ++i) { + + u8 orig = out_buf[i]; + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)]) { + stage_max -= 2 * ARITH_MAX; + continue; + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u8 r = orig ^ (orig + j); + + /* Do arithmetic operations only if the result couldn't be a product + of a bitflip. */ + + if (!could_be_bitflip(r)) { + + stage_cur_val = j; + out_buf[i] = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + r = orig ^ (orig - j); + + if (!could_be_bitflip(r)) { + + stage_cur_val = -j; + out_buf[i] = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + out_buf[i] = orig; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH8] += stage_max; + + /* 16-bit arithmetics, both endians. */ + + if (len < 2) goto skip_arith; + + stage_name = "arith 16/8"; + stage_short = "arith16"; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + u16 orig = *(u16*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= 4 * ARITH_MAX; + continue; + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u16 r1 = orig ^ (orig + j), + r2 = orig ^ (orig - j), + r3 = orig ^ SWAP16(SWAP16(orig) + j), + r4 = orig ^ SWAP16(SWAP16(orig) - j); + + /* Try little endian addition and subtraction first. Do it only + if the operation would affect more than one byte (hence the + & 0xff overflow checks) and if it couldn't be a product of + a bitflip. */ + + stage_val_type = STAGE_VAL_LE; + + if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + + stage_cur_val = j; + *(u16*)(out_buf + i) = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + + stage_cur_val = -j; + *(u16*)(out_buf + i) = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + /* Big endian comes next. Same deal. */ + + stage_val_type = STAGE_VAL_BE; + + + if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + + stage_cur_val = j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((orig >> 8) < j && !could_be_bitflip(r4)) { + + stage_cur_val = -j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + *(u16*)(out_buf + i) = orig; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH16] += stage_max; + + /* 32-bit arithmetics, both endians. */ + + if (len < 4) goto skip_arith; + + stage_name = "arith 32/8"; + stage_short = "arith32"; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; ++i) { + + u32 orig = *(u32*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= 4 * ARITH_MAX; + continue; + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u32 r1 = orig ^ (orig + j), + r2 = orig ^ (orig - j), + r3 = orig ^ SWAP32(SWAP32(orig) + j), + r4 = orig ^ SWAP32(SWAP32(orig) - j); + + /* Little endian first. Same deal as with 16-bit: we only want to + try if the operation would have effect on more than two bytes. */ + + stage_val_type = STAGE_VAL_LE; + + if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { + + stage_cur_val = j; + *(u32*)(out_buf + i) = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + + stage_cur_val = -j; + *(u32*)(out_buf + i) = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + /* Big endian next. */ + + stage_val_type = STAGE_VAL_BE; + + if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + + stage_cur_val = j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + + stage_cur_val = -j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + *(u32*)(out_buf + i) = orig; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH32] += stage_max; + +skip_arith: + + /********************** + * INTERESTING VALUES * + **********************/ + + stage_name = "interest 8/8"; + stage_short = "int8"; + stage_cur = 0; + stage_max = len * sizeof(interesting_8); + + stage_val_type = STAGE_VAL_LE; + + orig_hit_cnt = new_hit_cnt; + + /* Setting 8-bit integers. */ + + for (i = 0; i < len; ++i) { + + u8 orig = out_buf[i]; + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)]) { + stage_max -= sizeof(interesting_8); + continue; + } + + stage_cur_byte = i; + + for (j = 0; j < sizeof(interesting_8); ++j) { + + /* Skip if the value could be a product of bitflips or arithmetics. */ + + if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || + could_be_arith(orig, (u8)interesting_8[j], 1)) { + --stage_max; + continue; + } + + stage_cur_val = interesting_8[j]; + out_buf[i] = interesting_8[j]; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + out_buf[i] = orig; + ++stage_cur; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST8] += stage_max; + + /* Setting 16-bit integers, both endians. */ + + if (no_arith || len < 2) goto skip_interest; + + stage_name = "interest 16/8"; + stage_short = "int16"; + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + u16 orig = *(u16*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= sizeof(interesting_16); + continue; + } + + stage_cur_byte = i; + + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + + stage_cur_val = interesting_16[j]; + + /* Skip if this could be a product of a bitflip, arithmetics, + or single-byte interesting value insertion. */ + + if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && + !could_be_arith(orig, (u16)interesting_16[j], 2) && + !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { + + stage_val_type = STAGE_VAL_LE; + + *(u16*)(out_buf + i) = interesting_16[j]; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && + !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && + !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && + !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + + stage_val_type = STAGE_VAL_BE; + + *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + } + + *(u16*)(out_buf + i) = orig; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST16] += stage_max; + + if (len < 4) goto skip_interest; + + /* Setting 32-bit integers, both endians. */ + + stage_name = "interest 32/8"; + stage_short = "int32"; + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; i++) { + + u32 orig = *(u32*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= sizeof(interesting_32) >> 1; + continue; + } + + stage_cur_byte = i; + + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { + + stage_cur_val = interesting_32[j]; + + /* Skip if this could be a product of a bitflip, arithmetics, + or word interesting value insertion. */ + + if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && + !could_be_arith(orig, interesting_32[j], 4) && + !could_be_interest(orig, interesting_32[j], 4, 0)) { + + stage_val_type = STAGE_VAL_LE; + + *(u32*)(out_buf + i) = interesting_32[j]; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && + !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && + !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && + !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { + + stage_val_type = STAGE_VAL_BE; + + *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + } + + *(u32*)(out_buf + i) = orig; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST32] += stage_max; + +skip_interest: + + /******************** + * DICTIONARY STUFF * + ********************/ + + if (!extras_cnt) goto skip_user_extras; + + /* Overwrite with user-supplied extras. */ + + stage_name = "user extras (over)"; + stage_short = "ext_UO"; + stage_cur = 0; + stage_max = extras_cnt * len; + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len; ++i) { + + u32 last_len = 0; + + stage_cur_byte = i; + + /* Extras are sorted by size, from smallest to largest. This means + that we don't have to worry about restoring the buffer in + between writes at a particular offset determined by the outer + loop. */ + + for (j = 0; j < extras_cnt; ++j) { + + /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also + skip them if there's no room to insert the payload, if the token + is redundant, or if its entire span has no bytes set in the effector + map. */ + + if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || + extras[j].len > len - i || + !memcmp(extras[j].data, out_buf + i, extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + + --stage_max; + continue; + + } + + last_len = extras[j].len; + memcpy(out_buf + i, extras[j].data, last_len); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + ++stage_cur; + + } + + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UO] += stage_max; + + /* Insertion of user-supplied extras. */ + + stage_name = "user extras (insert)"; + stage_short = "ext_UI"; + stage_cur = 0; + stage_max = extras_cnt * len; + + orig_hit_cnt = new_hit_cnt; + + ex_tmp = ck_alloc(len + MAX_DICT_FILE); + + for (i = 0; i <= len; ++i) { + + stage_cur_byte = i; + + for (j = 0; j < extras_cnt; ++j) { + + if (len + extras[j].len > MAX_FILE) { + --stage_max; + continue; + } + + /* Insert token */ + memcpy(ex_tmp + i, extras[j].data, extras[j].len); + + /* Copy tail */ + memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + + if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { + ck_free(ex_tmp); + goto abandon_entry; + } + + ++stage_cur; + + } + + /* Copy head */ + ex_tmp[i] = out_buf[i]; + + } + + ck_free(ex_tmp); + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UI] += stage_max; + +skip_user_extras: + + if (!a_extras_cnt) goto skip_extras; + + stage_name = "auto extras (over)"; + stage_short = "ext_AO"; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len; ++i) { + + u32 last_len = 0; + + stage_cur_byte = i; + + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + + /* See the comment in the earlier code; extras are sorted by size. */ + + if (a_extras[j].len > len - i || + !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + + --stage_max; + continue; + + } + + last_len = a_extras[j].len; + memcpy(out_buf + i, a_extras[j].data, last_len); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + ++stage_cur; + + } + + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_AO] += stage_max; + +skip_extras: + + /* If we made this to here without jumping to havoc_stage or abandon_entry, + we're properly done with deterministic steps and can mark it as such + in the .state/ directory. */ + + if (!queue_cur->passed_det) mark_as_det_done(queue_cur); + +#ifdef USE_PYTHON +python_stage: + /********************************** + * EXTERNAL MUTATORS (Python API) * + **********************************/ + + if (!py_module) goto havoc_stage; + + stage_name = "python"; + stage_short = "python"; + stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100; + + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; + + orig_hit_cnt = queued_paths + unique_crashes; + + char* retbuf = NULL; + size_t retlen = 0; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + struct queue_entry* target; + u32 tid; + u8* new_buf; + +retry_external_pick: + /* Pick a random other queue entry for passing to external API */ + do { tid = UR(queued_paths); } while (tid == current_entry && queued_paths > 1); + + target = queue; + + while (tid >= 100) { target = target->next_100; tid -= 100; } + while (tid--) target = target->next; + + /* Make sure that the target has a reasonable length. */ + + while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) { + target = target->next; + ++splicing_with; + } + + if (!target) goto retry_external_pick; + + /* Read the additional testcase into a new buffer. */ + fd = open(target->fname, O_RDONLY); + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + new_buf = ck_alloc_nozero(target->len); + ck_read(fd, new_buf, target->len, target->fname); + close(fd); + + fuzz_py(out_buf, len, new_buf, target->len, &retbuf, &retlen); + + ck_free(new_buf); + + if (retbuf) { + if (!retlen) + goto abandon_entry; + + if (common_fuzz_stuff(argv, retbuf, retlen)) { + free(retbuf); + goto abandon_entry; + } + + /* Reset retbuf/retlen */ + free(retbuf); + retbuf = NULL; + retlen = 0; + + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ + + if (queued_paths != havoc_queued) { + if (perf_score <= havoc_max_mult * 100) { + stage_max *= 2; + perf_score *= 2; + } + + havoc_queued = queued_paths; + } + } + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_PYTHON] += stage_max; + + if (python_only) { + /* Skip other stages */ + ret_val = 0; + goto abandon_entry; + } +#endif + + /**************** + * RANDOM HAVOC * + ****************/ + +havoc_stage: + + stage_cur_byte = -1; + + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ + + if (!splice_cycle) { + + stage_name = "havoc"; + stage_short = "havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; + + } else { + + static u8 tmp[32]; + + perf_score = orig_perf; + + sprintf(tmp, "splice %u", splice_cycle); + stage_name = tmp; + stage_short = "splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + + } + + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; + + temp_len = len; + + orig_hit_cnt = queued_paths + unique_crashes; + + havoc_queued = queued_paths; + + /* We essentially just do several thousand runs (depending on perf_score) + where we take the input file and make random stacked tweaks. */ + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); + + stage_cur_val = use_stacking; + + for (i = 0; i < use_stacking; ++i) { + + switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) { + + case 0: + + /* Flip a single bit somewhere. Spooky! */ + + FLIP_BIT(out_buf, UR(temp_len << 3)); + break; + + case 1: + + /* Set byte to interesting value. */ + + out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; + break; + + case 2: + + /* Set word to interesting value, randomly choosing endian. */ + + if (temp_len < 2) break; + + if (UR(2)) { + + *(u16*)(out_buf + UR(temp_len - 1)) = + interesting_16[UR(sizeof(interesting_16) >> 1)]; + + } else { + + *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( + interesting_16[UR(sizeof(interesting_16) >> 1)]); + + } + + break; + + case 3: + + /* Set dword to interesting value, randomly choosing endian. */ + + if (temp_len < 4) break; + + if (UR(2)) { + + *(u32*)(out_buf + UR(temp_len - 3)) = + interesting_32[UR(sizeof(interesting_32) >> 2)]; + + } else { + + *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( + interesting_32[UR(sizeof(interesting_32) >> 2)]); + + } + + break; + + case 4: + + /* Randomly subtract from byte. */ + + out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); + break; + + case 5: + + /* Randomly add to byte. */ + + out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); + break; + + case 6: + + /* Randomly subtract from word, random endian. */ + + if (temp_len < 2) break; + + if (UR(2)) { + + u32 pos = UR(temp_len - 1); + + *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); + + } else { + + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); + + } + + break; + + case 7: + + /* Randomly add to word, random endian. */ + + if (temp_len < 2) break; + + if (UR(2)) { + + u32 pos = UR(temp_len - 1); + + *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); + + } else { + + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); + + } + + break; + + case 8: + + /* Randomly subtract from dword, random endian. */ + + if (temp_len < 4) break; + + if (UR(2)) { + + u32 pos = UR(temp_len - 3); + + *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); + + } else { + + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); + + } + + break; + + case 9: + + /* Randomly add to dword, random endian. */ + + if (temp_len < 4) break; + + if (UR(2)) { + + u32 pos = UR(temp_len - 3); + + *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); + + } else { + + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); + + } + + break; + + case 10: + + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ + + out_buf[UR(temp_len)] ^= 1 + UR(255); + break; + + case 11 ... 12: { + + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ + + u32 del_from, del_len; + + if (temp_len < 2) break; + + /* Don't delete too much. */ + + del_len = choose_block_len(temp_len - 1); + + del_from = UR(temp_len - del_len + 1); + + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); + + temp_len -= del_len; + + break; + + } + + case 13: + + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + + /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + + u8 actually_clone = UR(4); + u32 clone_from, clone_to, clone_len; + u8* new_buf; + + if (actually_clone) { + + clone_len = choose_block_len(temp_len); + clone_from = UR(temp_len - clone_len + 1); + + } else { + + clone_len = choose_block_len(HAVOC_BLK_XL); + clone_from = 0; + + } + + clone_to = UR(temp_len); + + new_buf = ck_alloc_nozero(temp_len + clone_len); + + /* Head */ + + memcpy(new_buf, out_buf, clone_to); + + /* Inserted part */ + + if (actually_clone) + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + else + memset(new_buf + clone_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); + + ck_free(out_buf); + out_buf = new_buf; + temp_len += clone_len; + + } + + break; + + case 14: { + + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ + + u32 copy_from, copy_to, copy_len; + + if (temp_len < 2) break; + + copy_len = choose_block_len(temp_len - 1); + + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); + + if (UR(4)) { + + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + + } else memset(out_buf + copy_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + + break; + + } + + /* Values 15 and 16 can be selected only if there are any extras + present in the dictionaries. */ + + case 15: { + + /* Overwrite bytes with an extra. */ + + if (!extras_cnt || (a_extras_cnt && UR(2))) { + + /* No user-specified extras or odds in our favor. Let's use an + auto-detected one. */ + + u32 use_extra = UR(a_extras_cnt); + u32 extra_len = a_extras[use_extra].len; + u32 insert_at; + + if (extra_len > temp_len) break; + + insert_at = UR(temp_len - extra_len + 1); + memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len); + + } else { + + /* No auto extras or odds in our favor. Use the dictionary. */ + + u32 use_extra = UR(extras_cnt); + u32 extra_len = extras[use_extra].len; + u32 insert_at; + + if (extra_len > temp_len) break; + + insert_at = UR(temp_len - extra_len + 1); + memcpy(out_buf + insert_at, extras[use_extra].data, extra_len); + + } + + break; + + } + + case 16: { + + u32 use_extra, extra_len, insert_at = UR(temp_len + 1); + u8* new_buf; + + /* Insert an extra. Do the same dice-rolling stuff as for the + previous case. */ + + if (!extras_cnt || (a_extras_cnt && UR(2))) { + + use_extra = UR(a_extras_cnt); + extra_len = a_extras[use_extra].len; + + if (temp_len + extra_len >= MAX_FILE) break; + + new_buf = ck_alloc_nozero(temp_len + extra_len); + + /* Head */ + memcpy(new_buf, out_buf, insert_at); + + /* Inserted part */ + memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len); + + } else { + + use_extra = UR(extras_cnt); + extra_len = extras[use_extra].len; + + if (temp_len + extra_len >= MAX_FILE) break; + + new_buf = ck_alloc_nozero(temp_len + extra_len); + + /* Head */ + memcpy(new_buf, out_buf, insert_at); + + /* Inserted part */ + memcpy(new_buf + insert_at, extras[use_extra].data, extra_len); + + } + + /* Tail */ + memcpy(new_buf + insert_at + extra_len, out_buf + insert_at, + temp_len - insert_at); + + ck_free(out_buf); + out_buf = new_buf; + temp_len += extra_len; + + break; + + } + + } + + } + + if (common_fuzz_stuff(argv, out_buf, temp_len)) + goto abandon_entry; + + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ + + if (temp_len < len) out_buf = ck_realloc(out_buf, len); + temp_len = len; + memcpy(out_buf, in_buf, len); + + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ + + if (queued_paths != havoc_queued) { + + if (perf_score <= havoc_max_mult * 100) { + stage_max *= 2; + perf_score *= 2; + } + + havoc_queued = queued_paths; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + if (!splice_cycle) { + stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_HAVOC] += stage_max; + } else { + stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_SPLICE] += stage_max; + } + +#ifndef IGNORE_FINDS + + /************ + * SPLICING * + ************/ + + /* This is a last-resort strategy triggered by a full round with no findings. + It takes the current input file, randomly selects another input, and + splices them together at some offset, then relies on the havoc + code to mutate that blob. */ + +retry_splicing: + + if (use_splicing && splice_cycle++ < SPLICE_CYCLES && + queued_paths > 1 && queue_cur->len > 1) { + + struct queue_entry* target; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; + + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ + + if (in_buf != orig_in) { + ck_free(in_buf); + in_buf = orig_in; + len = queue_cur->len; + } + + /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + + do { tid = UR(queued_paths); } while (tid == current_entry); + + splicing_with = tid; + target = queue; + + while (tid >= 100) { target = target->next_100; tid -= 100; } + while (tid--) target = target->next; + + /* Make sure that the target has a reasonable length. */ + + while (target && (target->len < 2 || target == queue_cur)) { + target = target->next; + ++splicing_with; + } + + if (!target) goto retry_splicing; + + /* Read the testcase into a new buffer. */ + + fd = open(target->fname, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + + new_buf = ck_alloc_nozero(target->len); + + ck_read(fd, new_buf, target->len, target->fname); + + close(fd); + + /* Find a suitable splicing location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ + + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + ck_free(new_buf); + goto retry_splicing; + } + + /* Split somewhere between the first and last differing byte. */ + + split_at = f_diff + UR(l_diff - f_diff); + + /* Do the thing. */ + + len = target->len; + memcpy(new_buf, in_buf, split_at); + in_buf = new_buf; + + ck_free(out_buf); + out_buf = ck_alloc_nozero(len); + memcpy(out_buf, in_buf, len); + +#ifdef USE_PYTHON + goto python_stage; +#else + goto havoc_stage; +#endif + + } + +#endif /* !IGNORE_FINDS */ + + ret_val = 0; + +abandon_entry: + + splicing_with = -1; + + /* Update pending_not_fuzzed count if we made it through the calibration + cycle and have not seen this entry before. */ + + if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) { + --pending_not_fuzzed; + queue_cur->was_fuzzed = 1; + if (queue_cur->favored) --pending_favored; + } + + ++queue_cur->fuzz_level; + + munmap(orig_in, queue_cur->len); + + if (in_buf != orig_in) ck_free(in_buf); + ck_free(out_buf); + ck_free(eff_map); + + return ret_val; + +#undef FLIP_BIT + +} + +/* MOpt mode */ +u8 pilot_fuzzing(char** argv) { + + s32 len, fd, temp_len, i, j; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; + u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + + u8 ret_val = 1, doing_det = 0; + + u8 a_collect[MAX_AUTO_EXTRA]; + u32 a_len = 0; + +#ifdef IGNORE_FINDS + + /* In IGNORE_FINDS mode, skip any entries that weren't in the + initial data set. */ + + if (queue_cur->depth > 1) return 1; + +#else + + if (pending_favored) { + + /* If we have any favored, non-fuzzed new arrivals in the queue, + possibly skip to them at the expense of already-fuzzed or non-favored + cases. */ + + if ((queue_cur->was_fuzzed || !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) return 1; + + } + else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { + + /* Otherwise, still possibly skip non-favored cases, albeit less often. + The odds of skipping stuff are higher for already-fuzzed inputs and + lower for never-fuzzed entries. */ + + if (queue_cycle > 1 && !queue_cur->was_fuzzed) { + + if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; + + } + else { + + if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; + + } + + } + +#endif /* ^IGNORE_FINDS */ + + if (not_on_tty) { + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", + current_entry, queued_paths, unique_crashes); + fflush(stdout); + } + + /* Map the test case into memory. */ + + fd = open(queue_cur->fname, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + + len = queue_cur->len; + + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); + + close(fd); + + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every + single byte anyway, so it wouldn't give us any performance or memory usage + benefits. */ + + out_buf = ck_alloc_nozero(len); + + subseq_tmouts = 0; + + cur_depth = queue_cur->depth; + + /******************************************* + * CALIBRATION (only if failed earlier on) * + *******************************************/ + + if (queue_cur->cal_failed) { + + u8 res = FAULT_TMOUT; + + if (queue_cur->cal_failed < CAL_CHANCES) { + + res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); + + if (res == FAULT_ERROR) + FATAL("Unable to execute target application"); + + } + + if (stop_soon || res != crash_mode) { + ++cur_skipped_paths; + goto abandon_entry; + } + + } + + /************ + * TRIMMING * + ************/ + + if (!dumb_mode && !queue_cur->trim_done) { + + u8 res = trim_case(argv, queue_cur, in_buf); + + if (res == FAULT_ERROR) + FATAL("Unable to execute target application"); + + if (stop_soon) { + ++cur_skipped_paths; + goto abandon_entry; + } + + /* Don't retry trimming, even if it failed. */ + + queue_cur->trim_done = 1; + + len = queue_cur->len; + + } + + memcpy(out_buf, in_buf, len); + + /********************* + * PERFORMANCE SCORE * + *********************/ + + orig_perf = perf_score = calculate_score(queue_cur); + + /* Skip right away if -d is given, if we have done deterministic fuzzing on + this entry ourselves (was_fuzzed), or if it has gone through deterministic + testing in earlier, resumed runs (passed_det). */ + + if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) + goto havoc_stage; + + /* Skip deterministic fuzzing if exec path checksum puts this out of scope + for this master instance. */ + + if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) + goto havoc_stage; + + + cur_ms_lv = get_cur_time(); + if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || + (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) + { + key_puppet = 1; + goto pacemaker_fuzzing; + } + + doing_det = 1; + + /********************************************* + * SIMPLE BITFLIP (+dictionary construction) * + *********************************************/ + +#define FLIP_BIT(_ar, _b) do { \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ + } while (0) + + /* Single walking bit. */ + + stage_short = "flip1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; + + + + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = queued_paths + unique_crashes; + + prev_cksum = queue_cur->exec_cksum; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + + /* While flipping the least significant bit in every byte, pull of an extra + trick to detect possible syntax tokens. In essence, the idea is that if + you have a binary blob like this: + + xxxxxxxxIHDRxxxxxxxx + + ...and changing the leading and trailing bytes causes variable or no + changes in program flow, but touching any character in the "IHDR" string + always produces the same, distinctive path, it's highly likely that + "IHDR" is an atomically-checked magic value of special significance to + the fuzzed format. + + We do this here, rather than as a separate stage, because it's a nice + way to keep the operation approximately "free" (i.e., no extra execs). + + Empirically, performing the check when flipping the least significant bit + is advantageous, compared to doing it at the time of more disruptive + changes, where the program flow may be affected in more violent ways. + + The caveat is that we won't generate dictionaries in the -d mode or -S + mode - but that's probably a fair trade-off. + + This won't work particularly well with paths that exhibit variable + behavior, but fails gracefully, so we'll carry out the checks anyway. + + */ + + if (!dumb_mode && (stage_cur & 7) == 7) { + + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + + if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + + /* If at end of file and we are still collecting a string, grab the + final character and force output. */ + + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; + + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); + + } + else if (cksum != prev_cksum) { + + /* Otherwise, if the checksum has changed, see if we have something + worthwhile queued up, and collect that if the answer is yes. */ + + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); + + a_len = 0; + prev_cksum = cksum; + + } + + /* Continue collecting string, but only if the bit flip actually made + any difference - we don't want no-op tokens. */ + + if (cksum != queue_cur->exec_cksum) { + + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; + + } + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP1] += stage_max; + + /* Two walking bits. */ + + stage_name = "bitflip 2/1"; + stage_short = "flip2"; + stage_max = (len << 3) - 1; + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP2] += stage_max; + + + + /* Four walking bits. */ + + stage_name = "bitflip 4/1"; + stage_short = "flip4"; + stage_max = (len << 3) - 3; + + + + + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP4] += stage_max; + + + + + /* Effector map setup. These macros calculate: + + EFF_APOS - position of a particular file offset in the map. + EFF_ALEN - length of a map with a particular number of bytes. + EFF_SPAN_ALEN - map span for a sequence of bytes. + + */ + +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) + + /* Initialize effector map for the next step (see comments below). Always + flag first and last byte as doing something. */ + + eff_map = ck_alloc(EFF_ALEN(len)); + eff_map[0] = 1; + + if (EFF_APOS(len - 1) != 0) { + eff_map[EFF_APOS(len - 1)] = 1; + ++eff_cnt; + } + + /* Walking byte. */ + + stage_name = "bitflip 8/8"; + stage_short = "flip8"; + stage_max = len; + + + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur; + + out_buf[stage_cur] ^= 0xFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + /* We also use this stage to pull off a simple trick: we identify + bytes that seem to have no effect on the current execution path + even when fully flipped - and we skip them during more expensive + deterministic stages, such as arithmetics or known ints. */ + + if (!eff_map[EFF_APOS(stage_cur)]) { + + u32 cksum; + + /* If in dumb mode or if the file is very short, just flag everything + without wasting time on checksums. */ + + if (!dumb_mode && len >= EFF_MIN_LEN) + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + else + cksum = ~queue_cur->exec_cksum; + + if (cksum != queue_cur->exec_cksum) { + eff_map[EFF_APOS(stage_cur)] = 1; + ++eff_cnt; + } + + } + + out_buf[stage_cur] ^= 0xFF; + + } + + /* If the effector map is more than EFF_MAX_PERC dense, just flag the + whole thing as worth fuzzing, since we wouldn't be saving much time + anyway. */ + + if (eff_cnt != EFF_ALEN(len) && + eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + + memset(eff_map, 1, EFF_ALEN(len)); + + blocks_eff_select += EFF_ALEN(len); + + } + else { + + blocks_eff_select += eff_cnt; + + } + + blocks_eff_total += EFF_ALEN(len); + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP8] += stage_max; + + + + + + /* Two walking bytes. */ + + if (len < 2) goto skip_bitflip; + + stage_name = "bitflip 16/8"; + stage_short = "flip16"; + stage_cur = 0; + stage_max = len - 1; + + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + --stage_max; + continue; + } + + stage_cur_byte = i; + + *(u16*)(out_buf + i) ^= 0xFFFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + *(u16*)(out_buf + i) ^= 0xFFFF; + + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP16] += stage_max; + + + + + if (len < 4) goto skip_bitflip; + + /* Four walking bytes. */ + + stage_name = "bitflip 32/8"; + stage_short = "flip32"; + stage_cur = 0; + stage_max = len - 3; + + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; ++i) { + + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + --stage_max; + continue; + } + + stage_cur_byte = i; + + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP32] += stage_max; + + + + + + + skip_bitflip: + + if (no_arith) goto skip_arith; + + /********************** + * ARITHMETIC INC/DEC * + **********************/ + + /* 8-bit arithmetics. */ + + stage_name = "arith 8/8"; + stage_short = "arith8"; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; + + + + + stage_val_type = STAGE_VAL_LE; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len; ++i) { + + u8 orig = out_buf[i]; + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)]) { + stage_max -= 2 * ARITH_MAX; + continue; + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u8 r = orig ^ (orig + j); + + /* Do arithmetic operations only if the result couldn't be a product + of a bitflip. */ + + if (!could_be_bitflip(r)) { + + stage_cur_val = j; + out_buf[i] = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + r = orig ^ (orig - j); + + if (!could_be_bitflip(r)) { + + stage_cur_val = -j; + out_buf[i] = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + out_buf[i] = orig; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH8] += stage_max; + + + + + + /* 16-bit arithmetics, both endians. */ + + if (len < 2) goto skip_arith; + + stage_name = "arith 16/8"; + stage_short = "arith16"; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; + + + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + u16 orig = *(u16*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= 4 * ARITH_MAX; + continue; + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u16 r1 = orig ^ (orig + j), + r2 = orig ^ (orig - j), + r3 = orig ^ SWAP16(SWAP16(orig) + j), + r4 = orig ^ SWAP16(SWAP16(orig) - j); + + /* Try little endian addition and subtraction first. Do it only + if the operation would affect more than one byte (hence the + & 0xff overflow checks) and if it couldn't be a product of + a bitflip. */ + + stage_val_type = STAGE_VAL_LE; + + if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + + stage_cur_val = j; + *(u16*)(out_buf + i) = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + + stage_cur_val = -j; + *(u16*)(out_buf + i) = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + /* Big endian comes next. Same deal. */ + + stage_val_type = STAGE_VAL_BE; + + + if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + + stage_cur_val = j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((orig >> 8) < j && !could_be_bitflip(r4)) { + + stage_cur_val = -j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + *(u16*)(out_buf + i) = orig; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH16] += stage_max; + + + + + /* 32-bit arithmetics, both endians. */ + + if (len < 4) goto skip_arith; + + stage_name = "arith 32/8"; + stage_short = "arith32"; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; + + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; ++i) { + + u32 orig = *(u32*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= 4 * ARITH_MAX; + continue; + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u32 r1 = orig ^ (orig + j), + r2 = orig ^ (orig - j), + r3 = orig ^ SWAP32(SWAP32(orig) + j), + r4 = orig ^ SWAP32(SWAP32(orig) - j); + + /* Little endian first. Same deal as with 16-bit: we only want to + try if the operation would have effect on more than two bytes. */ + + stage_val_type = STAGE_VAL_LE; + + if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { + + stage_cur_val = j; + *(u32*)(out_buf + i) = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + + stage_cur_val = -j; + *(u32*)(out_buf + i) = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_cur++; + + } else --stage_max; + + /* Big endian next. */ + + stage_val_type = STAGE_VAL_BE; + + if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + + stage_cur_val = j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + + stage_cur_val = -j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + *(u32*)(out_buf + i) = orig; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH32] += stage_max; + + + + + skip_arith: + + /********************** + * INTERESTING VALUES * + **********************/ + + stage_name = "interest 8/8"; + stage_short = "int8"; + stage_cur = 0; + stage_max = len * sizeof(interesting_8); + + + + stage_val_type = STAGE_VAL_LE; + + orig_hit_cnt = new_hit_cnt; + + /* Setting 8-bit integers. */ + + for (i = 0; i < len; ++i) { + + u8 orig = out_buf[i]; + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)]) { + stage_max -= sizeof(interesting_8); + continue; + } + + stage_cur_byte = i; + + for (j = 0; j < sizeof(interesting_8); ++j) { + + /* Skip if the value could be a product of bitflips or arithmetics. */ + + if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || + could_be_arith(orig, (u8)interesting_8[j], 1)) { + --stage_max; + continue; + } + + stage_cur_val = interesting_8[j]; + out_buf[i] = interesting_8[j]; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + out_buf[i] = orig; + ++stage_cur; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST8] += stage_max; + + + + + /* Setting 16-bit integers, both endians. */ + + if (no_arith || len < 2) goto skip_interest; + + stage_name = "interest 16/8"; + stage_short = "int16"; + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + u16 orig = *(u16*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= sizeof(interesting_16); + continue; + } + + stage_cur_byte = i; + + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + + stage_cur_val = interesting_16[j]; + + /* Skip if this could be a product of a bitflip, arithmetics, + or single-byte interesting value insertion. */ + + if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && + !could_be_arith(orig, (u16)interesting_16[j], 2) && + !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { + + stage_val_type = STAGE_VAL_LE; + + *(u16*)(out_buf + i) = interesting_16[j]; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && + !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && + !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && + !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + + stage_val_type = STAGE_VAL_BE; + + *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + } + + *(u16*)(out_buf + i) = orig; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST16] += stage_max; + + + + + + if (len < 4) goto skip_interest; + + /* Setting 32-bit integers, both endians. */ + + stage_name = "interest 32/8"; + stage_short = "int32"; + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; ++i) { + + u32 orig = *(u32*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= sizeof(interesting_32) >> 1; + continue; + } + + stage_cur_byte = i; + + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { + + stage_cur_val = interesting_32[j]; + + /* Skip if this could be a product of a bitflip, arithmetics, + or word interesting value insertion. */ + + if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && + !could_be_arith(orig, interesting_32[j], 4) && + !could_be_interest(orig, interesting_32[j], 4, 0)) { + + stage_val_type = STAGE_VAL_LE; + + *(u32*)(out_buf + i) = interesting_32[j]; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && + !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && + !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && + !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { + + stage_val_type = STAGE_VAL_BE; + + *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + } + + *(u32*)(out_buf + i) = orig; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST32] += stage_max; + + + + + + skip_interest: + + /******************** + * DICTIONARY STUFF * + ********************/ + + if (!extras_cnt) goto skip_user_extras; + + /* Overwrite with user-supplied extras. */ + + stage_name = "user extras (over)"; + stage_short = "ext_UO"; + stage_cur = 0; + stage_max = extras_cnt * len; + + + + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len; ++i) { + + u32 last_len = 0; + + stage_cur_byte = i; + + /* Extras are sorted by size, from smallest to largest. This means + that we don't have to worry about restoring the buffer in + between writes at a particular offset determined by the outer + loop. */ + + for (j = 0; j < extras_cnt; ++j) { + + /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also + skip them if there's no room to insert the payload, if the token + is redundant, or if its entire span has no bytes set in the effector + map. */ + + if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || + extras[j].len > len - i || + !memcmp(extras[j].data, out_buf + i, extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + + --stage_max; + continue; + + } + + last_len = extras[j].len; + memcpy(out_buf + i, extras[j].data, last_len); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + ++stage_cur; + + } + + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UO] += stage_max; + + /* Insertion of user-supplied extras. */ + + stage_name = "user extras (insert)"; + stage_short = "ext_UI"; + stage_cur = 0; + stage_max = extras_cnt * len; + + + + + orig_hit_cnt = new_hit_cnt; + + ex_tmp = ck_alloc(len + MAX_DICT_FILE); + + for (i = 0; i <= len; ++i) { + + stage_cur_byte = i; + + for (j = 0; j < extras_cnt; ++j) { + + if (len + extras[j].len > MAX_FILE) { + --stage_max; + continue; + } + + /* Insert token */ + memcpy(ex_tmp + i, extras[j].data, extras[j].len); + + /* Copy tail */ + memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + + if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { + ck_free(ex_tmp); + goto abandon_entry; + } + + ++stage_cur; + + } + + /* Copy head */ + ex_tmp[i] = out_buf[i]; + + } + + ck_free(ex_tmp); + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UI] += stage_max; + + skip_user_extras: + + if (!a_extras_cnt) goto skip_extras; + + stage_name = "auto extras (over)"; + stage_short = "ext_AO"; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len; ++i) { + + u32 last_len = 0; + + stage_cur_byte = i; + + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + + /* See the comment in the earlier code; extras are sorted by size. */ + + if (a_extras[j].len > len - i || + !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + + --stage_max; + continue; + + } + + last_len = a_extras[j].len; + memcpy(out_buf + i, a_extras[j].data, last_len); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + ++stage_cur; + + } + + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_AO] += stage_max; + + skip_extras: + + /* If we made this to here without jumping to havoc_stage or abandon_entry, + we're properly done with deterministic steps and can mark it as such + in the .state/ directory. */ + + if (!queue_cur->passed_det) mark_as_det_done(queue_cur); + + /**************** + * RANDOM HAVOC * + ****************/ + + havoc_stage: + pacemaker_fuzzing: + + + stage_cur_byte = -1; + + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ + + if (!splice_cycle) { + + stage_name = "MOpt-havoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; + + } + else { + + static u8 tmp[32]; + + perf_score = orig_perf; + + sprintf(tmp, "MOpt-splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + + } + + s32 temp_len_puppet; + cur_ms_lv = get_cur_time(); + + { + + + if (key_puppet == 1) + { + if (unlikely(orig_hit_cnt_puppet == 0)) + { + orig_hit_cnt_puppet = queued_paths + unique_crashes; + last_limit_time_start = get_cur_time(); + SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + } + } + + + { +#ifndef IGNORE_FINDS + havoc_stage_puppet: +#endif + + stage_cur_byte = -1; + + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ + + if (!splice_cycle) { + + stage_name = "MOpt avoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; + + } + else { + static u8 tmp[32]; + perf_score = orig_perf; + sprintf(tmp, "MOpt splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + } + + + + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; + + temp_len = len; + + orig_hit_cnt = queued_paths + unique_crashes; + + havoc_queued = queued_paths; + + + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); + + stage_cur_val = use_stacking; + + + for (i = 0; i < operator_num; ++i) + { + stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; + } + + + for (i = 0; i < use_stacking; ++i) { + + switch (select_algorithm()) { + + case 0: + /* Flip a single bit somewhere. Spooky! */ + FLIP_BIT(out_buf, UR(temp_len << 3)); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1; + break; + + + case 1: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1; + break; + + case 2: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + FLIP_BIT(out_buf, temp_len_puppet + 2); + FLIP_BIT(out_buf, temp_len_puppet + 3); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1; + break; + + case 3: + if (temp_len < 4) break; + out_buf[UR(temp_len)] ^= 0xFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1; + break; + + case 4: + if (temp_len < 8) break; + *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1; + break; + + case 5: + if (temp_len < 8) break; + *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1; + break; + + case 6: + out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); + out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1; + break; + + case 7: + /* Randomly subtract from word, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); + } + else { + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); + } + /* Randomly add to word, random endian. */ + if (UR(2)) { + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); + } + else { + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); + } + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1; + break; + + + case 8: + /* Randomly subtract from dword, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); + } + else { + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); + } + /* Randomly add to dword, random endian. */ + //if (temp_len < 4) break; + if (UR(2)) { + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); + } + else { + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); + } + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1; + break; + + + case 9: + /* Set byte to interesting value. */ + if (temp_len < 4) break; + out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1; + break; + + case 10: + /* Set word to interesting value, randomly choosing endian. */ + if (temp_len < 8) break; + if (UR(2)) { + *(u16*)(out_buf + UR(temp_len - 1)) = + interesting_16[UR(sizeof(interesting_16) >> 1)]; + } + else { + *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( + interesting_16[UR(sizeof(interesting_16) >> 1)]); + } + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1; + break; + + + case 11: + /* Set dword to interesting value, randomly choosing endian. */ + + if (temp_len < 8) break; + + if (UR(2)) { + *(u32*)(out_buf + UR(temp_len - 3)) = + interesting_32[UR(sizeof(interesting_32) >> 2)]; + } + else { + *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( + interesting_32[UR(sizeof(interesting_32) >> 2)]); + } + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1; + break; + + + case 12: + + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ + + out_buf[UR(temp_len)] ^= 1 + UR(255); + stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1; + break; + + + + case 13: { + + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ + + u32 del_from, del_len; + + if (temp_len < 2) break; + + /* Don't delete too much. */ + + del_len = choose_block_len(temp_len - 1); + + del_from = UR(temp_len - del_len + 1); + + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); + + temp_len -= del_len; + stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1; + break; + + } + + case 14: + + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + + /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + + u8 actually_clone = UR(4); + u32 clone_from, clone_to, clone_len; + u8* new_buf; + + if (actually_clone) { + + clone_len = choose_block_len(temp_len); + clone_from = UR(temp_len - clone_len + 1); + + } + else { + + clone_len = choose_block_len(HAVOC_BLK_XL); + clone_from = 0; + + } + + clone_to = UR(temp_len); + + new_buf = ck_alloc_nozero(temp_len + clone_len); + + /* Head */ + + memcpy(new_buf, out_buf, clone_to); + + /* Inserted part */ + + if (actually_clone) + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + else + memset(new_buf + clone_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); + + ck_free(out_buf); + out_buf = new_buf; + temp_len += clone_len; + stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1; + } + + break; + + case 15: { + + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ + + u32 copy_from, copy_to, copy_len; + + if (temp_len < 2) break; + + copy_len = choose_block_len(temp_len - 1); + + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); + + if (UR(4)) { + + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + + } + else memset(out_buf + copy_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1; + break; + + } + + + } + + } + + + tmp_pilot_time += 1; + + + + + u64 temp_total_found = queued_paths + unique_crashes; + + + + + if (common_fuzz_stuff(argv, out_buf, temp_len)) + goto abandon_entry_puppet; + + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ + + if (temp_len < len) out_buf = ck_realloc(out_buf, len); + temp_len = len; + memcpy(out_buf, in_buf, len); + + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ + + if (queued_paths != havoc_queued) { + + if (perf_score <= havoc_max_mult * 100) { + stage_max *= 2; + perf_score *= 2; + } + + havoc_queued = queued_paths; + + } + + if (unlikely(queued_paths + unique_crashes > temp_total_found)) + { + u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; + total_puppet_find = total_puppet_find + temp_temp_puppet; + for (i = 0; i < 16; ++i) + { + if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i]) + stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet; + } + } + + } + new_hit_cnt = queued_paths + unique_crashes; + + if (!splice_cycle) { + stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_HAVOC] += stage_max; + } else { + stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_SPLICE] += stage_max; + } + +#ifndef IGNORE_FINDS + + /************ + * SPLICING * + ************/ + + + retry_splicing_puppet: + + if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && + queued_paths > 1 && queue_cur->len > 1) { + + struct queue_entry* target; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; + + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ + + if (in_buf != orig_in) { + ck_free(in_buf); + in_buf = orig_in; + len = queue_cur->len; + } + + /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + + do { tid = UR(queued_paths); } while (tid == current_entry); + + splicing_with = tid; + target = queue; + + while (tid >= 100) { target = target->next_100; tid -= 100; } + while (tid--) target = target->next; + + /* Make sure that the target has a reasonable length. */ + + while (target && (target->len < 2 || target == queue_cur)) { + target = target->next; + ++splicing_with; + } + + if (!target) goto retry_splicing_puppet; + + /* Read the testcase into a new buffer. */ + + fd = open(target->fname, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + + new_buf = ck_alloc_nozero(target->len); + + ck_read(fd, new_buf, target->len, target->fname); + + close(fd); + + /* Find a suitable splicin g location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ + + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + ck_free(new_buf); + goto retry_splicing_puppet; + } + + /* Split somewhere between the first and last differing byte. */ + + split_at = f_diff + UR(l_diff - f_diff); + + /* Do the thing. */ + + len = target->len; + memcpy(new_buf, in_buf, split_at); + in_buf = new_buf; + ck_free(out_buf); + out_buf = ck_alloc_nozero(len); + memcpy(out_buf, in_buf, len); + goto havoc_stage_puppet; + + } + +#endif /* !IGNORE_FINDS */ + + ret_val = 0; + + abandon_entry: + abandon_entry_puppet: + + if (splice_cycle >= SPLICE_CYCLES_puppet) + SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + + + splicing_with = -1; + + /* Update pending_not_fuzzed count if we made it through the calibration + cycle and have not seen this entry before. */ + + // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { + // queue_cur->was_fuzzed = 1; + // --pending_not_fuzzed; + // if (queue_cur->favored) --pending_favored; + // } + + munmap(orig_in, queue_cur->len); + + if (in_buf != orig_in) ck_free(in_buf); + ck_free(out_buf); + ck_free(eff_map); + + + if (key_puppet == 1) { + if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) { + key_puppet = 0; + cur_ms_lv = get_cur_time(); + new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt_puppet = 0; + last_limit_time_start = 0; + } + } + + + if (unlikely(tmp_pilot_time > period_pilot)) { + total_pacemaker_time += tmp_pilot_time; + new_hit_cnt = queued_paths + unique_crashes; + swarm_fitness[swarm_now] = (double)(total_puppet_find - temp_puppet_find) / ((double)(tmp_pilot_time)/ period_pilot_tmp); + tmp_pilot_time = 0; + temp_puppet_find = total_puppet_find; + + u64 temp_stage_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { + double temp_eff = 0.0; + + if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i]) + temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - stage_finds_puppet[swarm_now][i]) / + (double)(stage_cycles_puppet_v2[swarm_now][i] - stage_cycles_puppet[swarm_now][i]); + + if (eff_best[swarm_now][i] < temp_eff) { + eff_best[swarm_now][i] = temp_eff; + L_best[swarm_now][i] = x_now[swarm_now][i]; + } + + stage_finds_puppet[swarm_now][i] = stage_finds_puppet_v2[swarm_now][i]; + stage_cycles_puppet[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; + temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i]; + } + + swarm_now = swarm_now + 1; + if (swarm_now == swarm_num) { + key_module = 1; + for (i = 0; i < operator_num; ++i) { + core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i]; + core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i]; + core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i]; + } + + double swarm_eff = 0.0; + swarm_now = 0; + for (i = 0; i < swarm_num; ++i) { + if (swarm_fitness[i] > swarm_eff) { + swarm_eff = swarm_fitness[i]; + swarm_now = i; + } + } + if (swarm_now <0 || swarm_now > swarm_num - 1) + PFATAL("swarm_now error number %d", swarm_now); + + } + } + return ret_val; + } + } + + +#undef FLIP_BIT + +} + + +u8 core_fuzzing(char** argv) { + int i; + + if (swarm_num == 1) { + key_module = 2; + return 0; + } + + + s32 len, fd, temp_len, j; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; + u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + + u8 ret_val = 1, doing_det = 0; + + u8 a_collect[MAX_AUTO_EXTRA]; + u32 a_len = 0; + +#ifdef IGNORE_FINDS + + /* In IGNORE_FINDS mode, skip any entries that weren't in the + initial data set. */ + + if (queue_cur->depth > 1) return 1; + +#else + + if (pending_favored) { + + /* If we have any favored, non-fuzzed new arrivals in the queue, + possibly skip to them at the expense of already-fuzzed or non-favored + cases. */ + + if ((queue_cur->was_fuzzed || !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) return 1; + + } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { + + /* Otherwise, still possibly skip non-favored cases, albeit less often. + The odds of skipping stuff are higher for already-fuzzed inputs and + lower for never-fuzzed entries. */ + + if (queue_cycle > 1 && !queue_cur->was_fuzzed) { + + if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; + + } else { + + if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; + + } + + } + +#endif /* ^IGNORE_FINDS */ + + if (not_on_tty) { + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", + current_entry, queued_paths, unique_crashes); + fflush(stdout); + } + + /* Map the test case into memory. */ + + fd = open(queue_cur->fname, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + + len = queue_cur->len; + + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); + + close(fd); + + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every + single byte anyway, so it wouldn't give us any performance or memory usage + benefits. */ + + out_buf = ck_alloc_nozero(len); + + subseq_tmouts = 0; + + cur_depth = queue_cur->depth; + + /******************************************* + * CALIBRATION (only if failed earlier on) * + *******************************************/ + + if (queue_cur->cal_failed) { + + u8 res = FAULT_TMOUT; + + if (queue_cur->cal_failed < CAL_CHANCES) { + + res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); + + if (res == FAULT_ERROR) + FATAL("Unable to execute target application"); + + } + + if (stop_soon || res != crash_mode) { + ++cur_skipped_paths; + goto abandon_entry; + } + + } + + /************ + * TRIMMING * + ************/ + + if (!dumb_mode && !queue_cur->trim_done) { + + u8 res = trim_case(argv, queue_cur, in_buf); + + if (res == FAULT_ERROR) + FATAL("Unable to execute target application"); + + if (stop_soon) { + ++cur_skipped_paths; + goto abandon_entry; + } + + /* Don't retry trimming, even if it failed. */ + + queue_cur->trim_done = 1; + + len = queue_cur->len; + + } + + memcpy(out_buf, in_buf, len); + + /********************* + * PERFORMANCE SCORE * + *********************/ + + orig_perf = perf_score = calculate_score(queue_cur); + + /* Skip right away if -d is given, if we have done deterministic fuzzing on + this entry ourselves (was_fuzzed), or if it has gone through deterministic + testing in earlier, resumed runs (passed_det). */ + + if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) + goto havoc_stage; + + /* Skip deterministic fuzzing if exec path checksum puts this out of scope + for this master instance. */ + + if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) + goto havoc_stage; + + + cur_ms_lv = get_cur_time(); + if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || + (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) + { + key_puppet = 1; + goto pacemaker_fuzzing; + } + + doing_det = 1; + + /********************************************* + * SIMPLE BITFLIP (+dictionary construction) * + *********************************************/ + +#define FLIP_BIT(_ar, _b) do { \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ + } while (0) + + /* Single walking bit. */ + + stage_short = "flip1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = queued_paths + unique_crashes; + + prev_cksum = queue_cur->exec_cksum; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + + /* While flipping the least significant bit in every byte, pull of an extra + trick to detect possible syntax tokens. In essence, the idea is that if + you have a binary blob like this: + + xxxxxxxxIHDRxxxxxxxx + + ...and changing the leading and trailing bytes causes variable or no + changes in program flow, but touching any character in the "IHDR" string + always produces the same, distinctive path, it's highly likely that + "IHDR" is an atomically-checked magic value of special significance to + the fuzzed format. + + We do this here, rather than as a separate stage, because it's a nice + way to keep the operation approximately "free" (i.e., no extra execs). + + Empirically, performing the check when flipping the least significant bit + is advantageous, compared to doing it at the time of more disruptive + changes, where the program flow may be affected in more violent ways. + + The caveat is that we won't generate dictionaries in the -d mode or -S + mode - but that's probably a fair trade-off. + + This won't work particularly well with paths that exhibit variable + behavior, but fails gracefully, so we'll carry out the checks anyway. + + */ + + if (!dumb_mode && (stage_cur & 7) == 7) { + + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + + if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + + /* If at end of file and we are still collecting a string, grab the + final character and force output. */ + + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; + + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); + + } + else if (cksum != prev_cksum) { + + /* Otherwise, if the checksum has changed, see if we have something + worthwhile queued up, and collect that if the answer is yes. */ + + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); + + a_len = 0; + prev_cksum = cksum; + + } + + /* Continue collecting string, but only if the bit flip actually made + any difference - we don't want no-op tokens. */ + + if (cksum != queue_cur->exec_cksum) { + + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; + + } + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP1] += stage_max; + + + + /* Two walking bits. */ + + stage_name = "bitflip 2/1"; + stage_short = "flip2"; + stage_max = (len << 3) - 1; + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP2] += stage_max; + + + /* Four walking bits. */ + + stage_name = "bitflip 4/1"; + stage_short = "flip4"; + stage_max = (len << 3) - 3; + + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur >> 3; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP4] += stage_max; + + + /* Effector map setup. These macros calculate: + + EFF_APOS - position of a particular file offset in the map. + EFF_ALEN - length of a map with a particular number of bytes. + EFF_SPAN_ALEN - map span for a sequence of bytes. + + */ + +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) + + /* Initialize effector map for the next step (see comments below). Always + flag first and last byte as doing something. */ + + eff_map = ck_alloc(EFF_ALEN(len)); + eff_map[0] = 1; + + if (EFF_APOS(len - 1) != 0) { + eff_map[EFF_APOS(len - 1)] = 1; + ++eff_cnt; + } + + /* Walking byte. */ + + stage_name = "bitflip 8/8"; + stage_short = "flip8"; + stage_max = len; + + + orig_hit_cnt = new_hit_cnt; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + stage_cur_byte = stage_cur; + + out_buf[stage_cur] ^= 0xFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + /* We also use this stage to pull off a simple trick: we identify + bytes that seem to have no effect on the current execution path + even when fully flipped - and we skip them during more expensive + deterministic stages, such as arithmetics or known ints. */ + + if (!eff_map[EFF_APOS(stage_cur)]) { + + u32 cksum; + + /* If in dumb mode or if the file is very short, just flag everything + without wasting time on checksums. */ + + if (!dumb_mode && len >= EFF_MIN_LEN) + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + else + cksum = ~queue_cur->exec_cksum; + + if (cksum != queue_cur->exec_cksum) { + eff_map[EFF_APOS(stage_cur)] = 1; + ++eff_cnt; + } + + } + + out_buf[stage_cur] ^= 0xFF; + + } + + /* If the effector map is more than EFF_MAX_PERC dense, just flag the + whole thing as worth fuzzing, since we wouldn't be saving much time + anyway. */ + + if (eff_cnt != EFF_ALEN(len) && + eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + + memset(eff_map, 1, EFF_ALEN(len)); + + blocks_eff_select += EFF_ALEN(len); + + } + else { + + blocks_eff_select += eff_cnt; + + } + + blocks_eff_total += EFF_ALEN(len); + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP8] += stage_max; + + + + /* Two walking bytes. */ + + if (len < 2) goto skip_bitflip; + + stage_name = "bitflip 16/8"; + stage_short = "flip16"; + stage_cur = 0; + stage_max = len - 1; + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + --stage_max; + continue; + } + + stage_cur_byte = i; + + *(u16*)(out_buf + i) ^= 0xFFFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + *(u16*)(out_buf + i) ^= 0xFFFF; + + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP16] += stage_max; + + + + if (len < 4) goto skip_bitflip; + + /* Four walking bytes. */ + + stage_name = "bitflip 32/8"; + stage_short = "flip32"; + stage_cur = 0; + stage_max = len - 3; + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; ++i) { + + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + --stage_max; + continue; + } + + stage_cur_byte = i; + + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP32] += stage_max; + + + + + skip_bitflip: + + if (no_arith) goto skip_arith; + + /********************** + * ARITHMETIC INC/DEC * + **********************/ + + /* 8-bit arithmetics. */ + + stage_name = "arith 8/8"; + stage_short = "arith8"; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; + + + stage_val_type = STAGE_VAL_LE; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len; ++i) { + + u8 orig = out_buf[i]; + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)]) { + stage_max -= 2 * ARITH_MAX; + continue; + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u8 r = orig ^ (orig + j); + + /* Do arithmetic operations only if the result couldn't be a product + of a bitflip. */ + + if (!could_be_bitflip(r)) { + + stage_cur_val = j; + out_buf[i] = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + r = orig ^ (orig - j); + + if (!could_be_bitflip(r)) { + + stage_cur_val = -j; + out_buf[i] = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + out_buf[i] = orig; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH8] += stage_max; + + + + + /* 16-bit arithmetics, both endians. */ + + if (len < 2) goto skip_arith; + + stage_name = "arith 16/8"; + stage_short = "arith16"; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + u16 orig = *(u16*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= 4 * ARITH_MAX; + continue; + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u16 r1 = orig ^ (orig + j), + r2 = orig ^ (orig - j), + r3 = orig ^ SWAP16(SWAP16(orig) + j), + r4 = orig ^ SWAP16(SWAP16(orig) - j); + + /* Try little endian addition and subtraction first. Do it only + if the operation would affect more than one byte (hence the + & 0xff overflow checks) and if it couldn't be a product of + a bitflip. */ + + stage_val_type = STAGE_VAL_LE; + + if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + + stage_cur_val = j; + *(u16*)(out_buf + i) = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + + stage_cur_val = -j; + *(u16*)(out_buf + i) = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + /* Big endian comes next. Same deal. */ + + stage_val_type = STAGE_VAL_BE; + + + if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + + stage_cur_val = j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((orig >> 8) < j && !could_be_bitflip(r4)) { + + stage_cur_val = -j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + *(u16*)(out_buf + i) = orig; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH16] += stage_max; + + + + /* 32-bit arithmetics, both endians. */ + + if (len < 4) goto skip_arith; + + stage_name = "arith 32/8"; + stage_short = "arith32"; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; ++i) { + + u32 orig = *(u32*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= 4 * ARITH_MAX; + continue; + } + + stage_cur_byte = i; + + for (j = 1; j <= ARITH_MAX; ++j) { + + u32 r1 = orig ^ (orig + j), + r2 = orig ^ (orig - j), + r3 = orig ^ SWAP32(SWAP32(orig) + j), + r4 = orig ^ SWAP32(SWAP32(orig) - j); + + /* Little endian first. Same deal as with 16-bit: we only want to + try if the operation would have effect on more than two bytes. */ + + stage_val_type = STAGE_VAL_LE; + + if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { + + stage_cur_val = j; + *(u32*)(out_buf + i) = orig + j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + + stage_cur_val = -j; + *(u32*)(out_buf + i) = orig - j; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + /* Big endian next. */ + + stage_val_type = STAGE_VAL_BE; + + if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + + stage_cur_val = j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + + stage_cur_val = -j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + *(u32*)(out_buf + i) = orig; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH32] += stage_max; + + + + skip_arith: + + /********************** + * INTERESTING VALUES * + **********************/ + + stage_name = "interest 8/8"; + stage_short = "int8"; + stage_cur = 0; + stage_max = len * sizeof(interesting_8); + + + + stage_val_type = STAGE_VAL_LE; + + orig_hit_cnt = new_hit_cnt; + + /* Setting 8-bit integers. */ + + for (i = 0; i < len; ++i) { + + u8 orig = out_buf[i]; + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)]) { + stage_max -= sizeof(interesting_8); + continue; + } + + stage_cur_byte = i; + + for (j = 0; j < sizeof(interesting_8); ++j) { + + /* Skip if the value could be a product of bitflips or arithmetics. */ + + if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || + could_be_arith(orig, (u8)interesting_8[j], 1)) { + --stage_max; + continue; + } + + stage_cur_val = interesting_8[j]; + out_buf[i] = interesting_8[j]; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + out_buf[i] = orig; + ++stage_cur; + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST8] += stage_max; + + + + /* Setting 16-bit integers, both endians. */ + + if (no_arith || len < 2) goto skip_interest; + + stage_name = "interest 16/8"; + stage_short = "int16"; + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 1; ++i) { + + u16 orig = *(u16*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= sizeof(interesting_16); + continue; + } + + stage_cur_byte = i; + + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + + stage_cur_val = interesting_16[j]; + + /* Skip if this could be a product of a bitflip, arithmetics, + or single-byte interesting value insertion. */ + + if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && + !could_be_arith(orig, (u16)interesting_16[j], 2) && + !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { + + stage_val_type = STAGE_VAL_LE; + + *(u16*)(out_buf + i) = interesting_16[j]; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && + !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && + !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && + !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + + stage_val_type = STAGE_VAL_BE; + + *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + } + + *(u16*)(out_buf + i) = orig; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST16] += stage_max; + + + + + if (len < 4) goto skip_interest; + + /* Setting 32-bit integers, both endians. */ + + stage_name = "interest 32/8"; + stage_short = "int32"; + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len - 3; ++i) { + + u32 orig = *(u32*)(out_buf + i); + + /* Let's consult the effector map... */ + + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= sizeof(interesting_32) >> 1; + continue; + } + + stage_cur_byte = i; + + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { + + stage_cur_val = interesting_32[j]; + + /* Skip if this could be a product of a bitflip, arithmetics, + or word interesting value insertion. */ + + if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && + !could_be_arith(orig, interesting_32[j], 4) && + !could_be_interest(orig, interesting_32[j], 4, 0)) { + + stage_val_type = STAGE_VAL_LE; + + *(u32*)(out_buf + i) = interesting_32[j]; + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && + !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && + !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && + !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { + + stage_val_type = STAGE_VAL_BE; + + *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + + } else --stage_max; + + } + + *(u32*)(out_buf + i) = orig; + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST32] += stage_max; + + + + skip_interest: + + /******************** + * DICTIONARY STUFF * + ********************/ + + if (!extras_cnt) goto skip_user_extras; + + /* Overwrite with user-supplied extras. */ + + stage_name = "user extras (over)"; + stage_short = "ext_UO"; + stage_cur = 0; + stage_max = extras_cnt * len; + + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len; ++i) { + + u32 last_len = 0; + + stage_cur_byte = i; + + /* Extras are sorted by size, from smallest to largest. This means + that we don't have to worry about restoring the buffer in + between writes at a particular offset determined by the outer + loop. */ + + for (j = 0; j < extras_cnt; ++j) { + + /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also + skip them if there's no room to insert the payload, if the token + is redundant, or if its entire span has no bytes set in the effector + map. */ + + if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || + extras[j].len > len - i || + !memcmp(extras[j].data, out_buf + i, extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + + --stage_max; + continue; + + } + + last_len = extras[j].len; + memcpy(out_buf + i, extras[j].data, last_len); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + ++stage_cur; + + } + + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UO] += stage_max; + + /* Insertion of user-supplied extras. */ + + stage_name = "user extras (insert)"; + stage_short = "ext_UI"; + stage_cur = 0; + stage_max = extras_cnt * len; + + + + + orig_hit_cnt = new_hit_cnt; + + ex_tmp = ck_alloc(len + MAX_DICT_FILE); + + for (i = 0; i <= len; ++i) { + + stage_cur_byte = i; + + for (j = 0; j < extras_cnt; ++j) { + + if (len + extras[j].len > MAX_FILE) { + --stage_max; + continue; + } + + /* Insert token */ + memcpy(ex_tmp + i, extras[j].data, extras[j].len); + + /* Copy tail */ + memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + + if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { + ck_free(ex_tmp); + goto abandon_entry; + } + + ++stage_cur; + + } + + /* Copy head */ + ex_tmp[i] = out_buf[i]; + + } + + ck_free(ex_tmp); + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UI] += stage_max; + + skip_user_extras: + + if (!a_extras_cnt) goto skip_extras; + + stage_name = "auto extras (over)"; + stage_short = "ext_AO"; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + + + stage_val_type = STAGE_VAL_NONE; + + orig_hit_cnt = new_hit_cnt; + + for (i = 0; i < len; ++i) { + + u32 last_len = 0; + + stage_cur_byte = i; + + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + + /* See the comment in the earlier code; extras are sorted by size. */ + + if (a_extras[j].len > len - i || + !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + + --stage_max; + continue; + + } + + last_len = a_extras[j].len; + memcpy(out_buf + i, a_extras[j].data, last_len); + + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + + ++stage_cur; + + } + + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); + + } + + new_hit_cnt = queued_paths + unique_crashes; + + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_AO] += stage_max; + + skip_extras: + + /* If we made this to here without jumping to havoc_stage or abandon_entry, + we're properly done with deterministic steps and can mark it as such + in the .state/ directory. */ + + if (!queue_cur->passed_det) mark_as_det_done(queue_cur); + + /**************** + * RANDOM HAVOC * + ****************/ + + havoc_stage: + pacemaker_fuzzing: + + + stage_cur_byte = -1; + + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ + + if (!splice_cycle) { + + stage_name = "MOpt-havoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; + + } else { + + static u8 tmp[32]; + + perf_score = orig_perf; + + sprintf(tmp, "MOpt-core-splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_core_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + + } + + s32 temp_len_puppet; + cur_ms_lv = get_cur_time(); + + //for (; swarm_now < swarm_num; ++swarm_now) + { + if (key_puppet == 1) { + if (unlikely(orig_hit_cnt_puppet == 0)) { + orig_hit_cnt_puppet = queued_paths + unique_crashes; + last_limit_time_start = get_cur_time(); + SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + } + } + { +#ifndef IGNORE_FINDS + havoc_stage_puppet: +#endif + + stage_cur_byte = -1; + + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ + + if (!splice_cycle) { + stage_name = "MOpt core avoc"; + stage_short = "MOpt_core_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; + } else { + static u8 tmp[32]; + perf_score = orig_perf; + sprintf(tmp, "MOpt core splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_core_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + } + + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; + temp_len = len; + orig_hit_cnt = queued_paths + unique_crashes; + havoc_queued = queued_paths; + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); + stage_cur_val = use_stacking; + + for (i = 0; i < operator_num; ++i) { + core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i]; + } + + for (i = 0; i < use_stacking; ++i) { + + switch (select_algorithm()) { + + case 0: + /* Flip a single bit somewhere. Spooky! */ + FLIP_BIT(out_buf, UR(temp_len << 3)); + core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1; + break; + + + case 1: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1; + break; + + case 2: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + FLIP_BIT(out_buf, temp_len_puppet + 2); + FLIP_BIT(out_buf, temp_len_puppet + 3); + core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1; + break; + + case 3: + if (temp_len < 4) break; + out_buf[UR(temp_len)] ^= 0xFF; + core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1; + break; + + case 4: + if (temp_len < 8) break; + *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; + core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1; + break; + + case 5: + if (temp_len < 8) break; + *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; + core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1; + break; + + case 6: + out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); + out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); + core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1; + break; + + case 7: + /* Randomly subtract from word, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); + } else { + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); + } + /* Randomly add to word, random endian. */ + if (UR(2)) { + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); + } else { + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); + } + core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1; + break; + + + case 8: + /* Randomly subtract from dword, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); + } else { + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); + } + /* Randomly add to dword, random endian. */ + if (UR(2)) { + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); + } else { + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); + } + core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1; + break; + + + case 9: + /* Set byte to interesting value. */ + if (temp_len < 4) break; + out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; + core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1; + break; + + case 10: + /* Set word to interesting value, randomly choosing endian. */ + if (temp_len < 8) break; + if (UR(2)) { + *(u16*)(out_buf + UR(temp_len - 1)) = + interesting_16[UR(sizeof(interesting_16) >> 1)]; + } else { + *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( + interesting_16[UR(sizeof(interesting_16) >> 1)]); + } + core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1; + break; + + + case 11: + /* Set dword to interesting value, randomly choosing endian. */ + + if (temp_len < 8) break; + + if (UR(2)) { + *(u32*)(out_buf + UR(temp_len - 3)) = + interesting_32[UR(sizeof(interesting_32) >> 2)]; + } else { + *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( + interesting_32[UR(sizeof(interesting_32) >> 2)]); + } + core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1; + break; + + + case 12: + + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ + + out_buf[UR(temp_len)] ^= 1 + UR(255); + core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1; + break; + + + case 13: { + + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ + + u32 del_from, del_len; + + if (temp_len < 2) break; + + /* Don't delete too much. */ + + del_len = choose_block_len(temp_len - 1); + + del_from = UR(temp_len - del_len + 1); + + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); + + temp_len -= del_len; + core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1; + break; + + } + + case 14: + + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + + /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + + u8 actually_clone = UR(4); + u32 clone_from, clone_to, clone_len; + u8* new_buf; + + if (actually_clone) { + + clone_len = choose_block_len(temp_len); + clone_from = UR(temp_len - clone_len + 1); + + } else { + + clone_len = choose_block_len(HAVOC_BLK_XL); + clone_from = 0; + + } + + clone_to = UR(temp_len); + + new_buf = ck_alloc_nozero(temp_len + clone_len); + + /* Head */ + + memcpy(new_buf, out_buf, clone_to); + + /* Inserted part */ + + if (actually_clone) + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + else + memset(new_buf + clone_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); + + ck_free(out_buf); + out_buf = new_buf; + temp_len += clone_len; + core_operator_cycles_puppet_v2[STAGE_Clone75] += 1; + } + + break; + + case 15: { + + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ + + u32 copy_from, copy_to, copy_len; + + if (temp_len < 2) break; + + copy_len = choose_block_len(temp_len - 1); + + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); + + if (UR(4)) { + + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + + } + else memset(out_buf + copy_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1; + break; + + } + + + } + + } + + tmp_core_time += 1; + + u64 temp_total_found = queued_paths + unique_crashes; + + if (common_fuzz_stuff(argv, out_buf, temp_len)) + goto abandon_entry_puppet; + + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ + + if (temp_len < len) out_buf = ck_realloc(out_buf, len); + temp_len = len; + memcpy(out_buf, in_buf, len); + + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ + + if (queued_paths != havoc_queued) { + + if (perf_score <= havoc_max_mult * 100) { + stage_max *= 2; + perf_score *= 2; + } + + havoc_queued = queued_paths; + + } + + if (unlikely(queued_paths + unique_crashes > temp_total_found)) + { + u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; + total_puppet_find = total_puppet_find + temp_temp_puppet; + for (i = 0; i < 16; ++i) + { + if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i]) + core_operator_finds_puppet_v2[i] += temp_temp_puppet; + } + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + +#ifndef IGNORE_FINDS + + /************ + * SPLICING * + ************/ + + + retry_splicing_puppet: + + + + if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && + queued_paths > 1 && queue_cur->len > 1) { + + struct queue_entry* target; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; + + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ + + if (in_buf != orig_in) { + ck_free(in_buf); + in_buf = orig_in; + len = queue_cur->len; + } + + /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + + do { tid = UR(queued_paths); } while (tid == current_entry); + + splicing_with = tid; + target = queue; + + while (tid >= 100) { target = target->next_100; tid -= 100; } + while (tid--) target = target->next; + + /* Make sure that the target has a reasonable length. */ + + while (target && (target->len < 2 || target == queue_cur)) { + target = target->next; + ++splicing_with; + } + + if (!target) goto retry_splicing_puppet; + + /* Read the testcase into a new buffer. */ + + fd = open(target->fname, O_RDONLY); + + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + + new_buf = ck_alloc_nozero(target->len); + + ck_read(fd, new_buf, target->len, target->fname); + + close(fd); + + /* Find a suitable splicin g location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ + + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + ck_free(new_buf); + goto retry_splicing_puppet; + } + + /* Split somewhere between the first and last differing byte. */ + + split_at = f_diff + UR(l_diff - f_diff); + + /* Do the thing. */ + + len = target->len; + memcpy(new_buf, in_buf, split_at); + in_buf = new_buf; + ck_free(out_buf); + out_buf = ck_alloc_nozero(len); + memcpy(out_buf, in_buf, len); + + goto havoc_stage_puppet; + + } + +#endif /* !IGNORE_FINDS */ + + ret_val = 0; + abandon_entry: + abandon_entry_puppet: + + if (splice_cycle >= SPLICE_CYCLES_puppet) + SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + + + splicing_with = -1; + + + munmap(orig_in, queue_cur->len); + + if (in_buf != orig_in) ck_free(in_buf); + ck_free(out_buf); + ck_free(eff_map); + + + if (key_puppet == 1) + { + if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) + { + key_puppet = 0; + cur_ms_lv = get_cur_time(); + new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt_puppet = 0; + last_limit_time_start = 0; + } + } + + + if (unlikely(tmp_core_time > period_core)) + { + total_pacemaker_time += tmp_core_time; + tmp_core_time = 0; + temp_puppet_find = total_puppet_find; + new_hit_cnt = queued_paths + unique_crashes; + + u64 temp_stage_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) + { + + core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i]; + core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i]; + temp_stage_finds_puppet += core_operator_finds_puppet[i]; + } + + key_module = 2; + + old_hit_count = new_hit_cnt; + } + return ret_val; + } + } + + +#undef FLIP_BIT + +} + + +void pso_updating(void) { + + g_now += 1; + if (g_now > g_max) g_now = 0; + w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end; + int tmp_swarm, i, j; + u64 temp_operator_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) + { + operator_finds_puppet[i] = core_operator_finds_puppet[i]; + + for (j = 0; j < swarm_num; ++j) + { + operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i]; + } + temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i]; + } + + for (i = 0; i < operator_num; ++i) + { + if (operator_finds_puppet[i]) + G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet)); + } + + for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) + { + double x_temp = 0.0; + for (i = 0; i < operator_num; ++i) + { + probability_now[tmp_swarm][i] = 0.0; + v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); + x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; + if (x_now[tmp_swarm][i] > v_max) + x_now[tmp_swarm][i] = v_max; + else if (x_now[tmp_swarm][i] < v_min) + x_now[tmp_swarm][i] = v_min; + x_temp += x_now[tmp_swarm][i]; + } + + for (i = 0; i < operator_num; ++i) + { + x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; + if (likely(i != 0)) + probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; + else + probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; + } + if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) FATAL("ERROR probability"); + } + swarm_now = 0; + key_module = 0; +} + + +/* larger change for MOpt implementation: the original fuzz_one was renamed + to fuzz_one_original. All documentation references to fuzz_one therefore + mean fuzz_one_original */ + +u8 fuzz_one(char** argv) { + int key_val_lv = 0; + if (limit_time_sig == 0) { + key_val_lv = fuzz_one_original(argv); + } else { + if (key_module == 0) + key_val_lv = pilot_fuzzing(argv); + else if (key_module == 1) + key_val_lv = core_fuzzing(argv); + else if (key_module == 2) + pso_updating(); + } + + return key_val_lv; +} + diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index 74ffc524..ed158e6c 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -220,4 +220,110 @@ void trim_py(char** ret, size_t* retlen) { } } +u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { + + static u8 tmp[64]; + static u8 clean_trace[MAP_SIZE]; + + u8 needs_write = 0, fault = 0; + u32 trim_exec = 0; + u32 orig_len = q->len; + + stage_name = tmp; + bytes_trim_in += q->len; + + /* Initialize trimming in the Python module */ + stage_cur = 0; + stage_max = init_trim_py(in_buf, q->len); + + if (not_on_tty && debug) + SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, q->len); + + while(stage_cur < stage_max) { + sprintf(tmp, "ptrim %s", DI(trim_exec)); + + u32 cksum; + + char* retbuf = NULL; + size_t retlen = 0; + + trim_py(&retbuf, &retlen); + + if (retlen > orig_len) + FATAL("Trimmed data returned by Python module is larger than original data"); + + write_to_testcase(retbuf, retlen); + + fault = run_target(argv, exec_tmout); + ++trim_execs; + + if (stop_soon || fault == FAULT_ERROR) goto abort_trimming; + + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + + if (cksum == q->exec_cksum) { + + q->len = retlen; + memcpy(in_buf, retbuf, retlen); + + /* Let's save a clean trace, which will be needed by + update_bitmap_score once we're done with the trimming stuff. */ + + if (!needs_write) { + + needs_write = 1; + memcpy(clean_trace, trace_bits, MAP_SIZE); + + } + + /* Tell the Python module that the trimming was successful */ + stage_cur = post_trim_py(1); + + if (not_on_tty && debug) + SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", stage_cur, stage_max, q->len); + } else { + /* Tell the Python module that the trimming was unsuccessful */ + stage_cur = post_trim_py(0); + if (not_on_tty && debug) + SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, stage_max); + } + + /* Since this can be slow, update the screen every now and then. */ + + if (!(trim_exec++ % stats_update_freq)) show_stats(); + } + + if (not_on_tty && debug) + SAYF("[Python Trimming] DONE: %u bytes -> %u bytes", orig_len, q->len); + + /* If we have made changes to in_buf, we also need to update the on-disk + version of the test case. */ + + if (needs_write) { + + s32 fd; + + unlink(q->fname); /* ignore errors */ + + fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); + + if (fd < 0) PFATAL("Unable to create '%s'", q->fname); + + ck_write(fd, in_buf, q->len, q->fname); + close(fd); + + memcpy(trace_bits, clean_trace, MAP_SIZE); + update_bitmap_score(q); + + } + + + +abort_trimming: + + bytes_trim_out += q->len; + return fault; + +} + #endif /* USE_PYTHON */ diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index ed352bcb..c1547b48 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -284,3 +284,139 @@ void cull_queue(void) { } + +/* Calculate case desirability score to adjust the length of havoc fuzzing. + A helper function for fuzz_one(). Maybe some of these constants should + go into config.h. */ + +u32 calculate_score(struct queue_entry* q) { + + u32 avg_exec_us = total_cal_us / total_cal_cycles; + u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries; + u32 perf_score = 100; + + /* Adjust score based on execution speed of this path, compared to the + global average. Multiplier ranges from 0.1x to 3x. Fast inputs are + less expensive to fuzz, so we're giving them more air time. */ + + // TODO BUG FIXME: is this really a good idea? + // This sounds like looking for lost keys under a street light just because + // the light is better there. + // Longer execution time means longer work on the input, the deeper in + // coverage, the better the fuzzing, right? -mh + + if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10; + else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25; + else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50; + else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75; + else if (q->exec_us * 4 < avg_exec_us) perf_score = 300; + else if (q->exec_us * 3 < avg_exec_us) perf_score = 200; + else if (q->exec_us * 2 < avg_exec_us) perf_score = 150; + + /* Adjust score based on bitmap size. The working theory is that better + coverage translates to better targets. Multiplier from 0.25x to 3x. */ + + if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3; + else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2; + else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5; + else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25; + else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5; + else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75; + + /* Adjust score based on handicap. Handicap is proportional to how late + in the game we learned about this path. Latecomers are allowed to run + for a bit longer until they catch up with the rest. */ + + if (q->handicap >= 4) { + perf_score *= 4; + q->handicap -= 4; + } else if (q->handicap) { + perf_score *= 2; + --q->handicap; + } + + /* Final adjustment based on input depth, under the assumption that fuzzing + deeper test cases is more likely to reveal stuff that can't be + discovered with traditional fuzzers. */ + + switch (q->depth) { + + case 0 ... 3: break; + case 4 ... 7: perf_score *= 2; break; + case 8 ... 13: perf_score *= 3; break; + case 14 ... 25: perf_score *= 4; break; + default: perf_score *= 5; + + } + + u64 fuzz = q->n_fuzz; + u64 fuzz_total; + + u32 n_paths, fuzz_mu; + u32 factor = 1; + + switch (schedule) { + + case EXPLORE: + break; + + case EXPLOIT: + factor = MAX_FACTOR; + break; + + case COE: + fuzz_total = 0; + n_paths = 0; + + struct queue_entry *queue_it = queue; + while (queue_it) { + fuzz_total += queue_it->n_fuzz; + n_paths ++; + queue_it = queue_it->next; + } + + fuzz_mu = fuzz_total / n_paths; + if (fuzz <= fuzz_mu) { + if (q->fuzz_level < 16) + factor = ((u32) (1 << q->fuzz_level)); + else + factor = MAX_FACTOR; + } else { + factor = 0; + } + break; + + case FAST: + if (q->fuzz_level < 16) { + factor = ((u32) (1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz); + } else + factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2 (fuzz)); + break; + + case LIN: + factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); + break; + + case QUAD: + factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz); + break; + + default: + PFATAL ("Unknown Power Schedule"); + } + if (factor > MAX_FACTOR) + factor = MAX_FACTOR; + + perf_score *= factor / POWER_BETA; + + // MOpt mode + if (limit_time_sig != 0 && max_depth - q->depth < 3) perf_score *= 2; + else if (perf_score < 1) perf_score = 1; // Add a lower bound to AFLFast's energy assignment strategies + + /* Make sure that we don't go over limit. */ + + if (perf_score > havoc_max_mult * 100) perf_score = havoc_max_mult * 100; + + return perf_score; + +} diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c new file mode 100644 index 00000000..c14ecc87 --- /dev/null +++ b/src/afl-fuzz-run.c @@ -0,0 +1,775 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + +/* Execute target application, monitoring for timeouts. Return status + information. The called program will update trace_bits[]. */ + +u8 run_target(char** argv, u32 timeout) { + + static struct itimerval it; + static u32 prev_timed_out = 0; + static u64 exec_ms = 0; + + int status = 0; + u32 tb4; + + child_timed_out = 0; + + /* After this memset, trace_bits[] are effectively volatile, so we + must prevent any earlier operations from venturing into that + territory. */ + + memset(trace_bits, 0, MAP_SIZE); + MEM_BARRIER(); + + /* If we're running in "dumb" mode, we can't rely on the fork server + logic compiled into the target program, so we will just keep calling + execve(). There is a bit of code duplication between here and + init_forkserver(), but c'est la vie. */ + + if (dumb_mode == 1 || no_forkserver) { + + child_pid = fork(); + + if (child_pid < 0) PFATAL("fork() failed"); + + if (!child_pid) { + + struct rlimit r; + + if (mem_limit) { + + r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; + +#ifdef RLIMIT_AS + + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + +#else + + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + +#endif /* ^RLIMIT_AS */ + + } + + r.rlim_max = r.rlim_cur = 0; + + setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + + /* Isolate the process and configure standard descriptors. If out_file is + specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ + + setsid(); + + dup2(dev_null_fd, 1); + dup2(dev_null_fd, 2); + + if (out_file) { + + dup2(dev_null_fd, 0); + + } else { + + dup2(out_fd, 0); + close(out_fd); + + } + + /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */ + + close(dev_null_fd); + close(out_dir_fd); +#ifndef HAVE_ARC4RANDOM + close(dev_urandom_fd); +#endif + close(fileno(plot_file)); + + /* Set sane defaults for ASAN if nothing else specified. */ + + setenv("ASAN_OPTIONS", "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", 0); + + setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" + "symbolize=0:" + "msan_track_origins=0", 0); + + execv(target_path, argv); + + /* Use a distinctive bitmap value to tell the parent about execv() + falling through. */ + + *(u32*)trace_bits = EXEC_FAIL_SIG; + exit(0); + + } + + } else { + + s32 res; + + /* In non-dumb mode, we have the fork server up and running, so simply + tell it to have at it, and then read back PID. */ + + if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) { + + if (stop_soon) return 0; + RPFATAL(res, "Unable to request new process from fork server (OOM?)"); + + } + + if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) { + + if (stop_soon) return 0; + RPFATAL(res, "Unable to request new process from fork server (OOM?)"); + + } + + if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)"); + + } + + /* Configure timeout, as requested by user, then wait for child to terminate. */ + + it.it_value.tv_sec = (timeout / 1000); + it.it_value.tv_usec = (timeout % 1000) * 1000; + + setitimer(ITIMER_REAL, &it, NULL); + + /* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */ + + if (dumb_mode == 1 || no_forkserver) { + + if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed"); + + } else { + + s32 res; + + if ((res = read(fsrv_st_fd, &status, 4)) != 4) { + + if (stop_soon) return 0; + RPFATAL(res, "Unable to communicate with fork server (OOM?)"); + + } + + } + + if (!WIFSTOPPED(status)) child_pid = 0; + + getitimer(ITIMER_REAL, &it); + exec_ms = (u64) timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000); + if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms; + + it.it_value.tv_sec = 0; + it.it_value.tv_usec = 0; + + setitimer(ITIMER_REAL, &it, NULL); + + ++total_execs; + + /* Any subsequent operations on trace_bits must not be moved by the + compiler below this point. Past this location, trace_bits[] behave + very normally and do not have to be treated as volatile. */ + + MEM_BARRIER(); + + tb4 = *(u32*)trace_bits; + +#ifdef __x86_64__ + classify_counts((u64*)trace_bits); +#else + classify_counts((u32*)trace_bits); +#endif /* ^__x86_64__ */ + + prev_timed_out = child_timed_out; + + /* Report outcome to caller. */ + + if (WIFSIGNALED(status) && !stop_soon) { + + kill_signal = WTERMSIG(status); + + if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT; + + return FAULT_CRASH; + + } + + /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and + must use a special exit code. */ + + if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) { + kill_signal = 0; + return FAULT_CRASH; + } + + if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG) + return FAULT_ERROR; + + return FAULT_NONE; + +} + + +/* Write modified data to file for testing. If out_file is set, the old file + is unlinked and a new one is created. Otherwise, out_fd is rewound and + truncated. */ + +void write_to_testcase(void* mem, u32 len) { + + s32 fd = out_fd; + + if (out_file) { + + unlink(out_file); /* Ignore errors. */ + + fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); + + if (fd < 0) PFATAL("Unable to create '%s'", out_file); + + } else lseek(fd, 0, SEEK_SET); + + if (pre_save_handler) { + u8* new_data; + size_t new_size = pre_save_handler(mem, len, &new_data); + ck_write(fd, new_data, new_size, out_file); + } else { + ck_write(fd, mem, len, out_file); + } + + if (!out_file) { + + if (ftruncate(fd, len)) PFATAL("ftruncate() failed"); + lseek(fd, 0, SEEK_SET); + + } else close(fd); + +} + + +/* The same, but with an adjustable gap. Used for trimming. */ + +void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) { + + s32 fd = out_fd; + u32 tail_len = len - skip_at - skip_len; + + if (out_file) { + + unlink(out_file); /* Ignore errors. */ + + fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); + + if (fd < 0) PFATAL("Unable to create '%s'", out_file); + + } else lseek(fd, 0, SEEK_SET); + + if (skip_at) ck_write(fd, mem, skip_at, out_file); + + u8 *memu8 = mem; + if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file); + + if (!out_file) { + + if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed"); + lseek(fd, 0, SEEK_SET); + + } else close(fd); + +} + + +/* Calibrate a new test case. This is done when processing the input directory + to warn about flaky or otherwise problematic test cases early on; and when + new paths are discovered to detect variable behavior and so on. */ + +u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, + u32 handicap, u8 from_queue) { + + static u8 first_trace[MAP_SIZE]; + + u8 fault = 0, new_bits = 0, var_detected = 0, + first_run = (q->exec_cksum == 0); + + u64 start_us, stop_us; + + s32 old_sc = stage_cur, old_sm = stage_max; + u32 use_tmout = exec_tmout; + u8* old_sn = stage_name; + + /* Be a bit more generous about timeouts when resuming sessions, or when + trying to calibrate already-added finds. This helps avoid trouble due + to intermittent latency. */ + + if (!from_queue || resuming_fuzz) + use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD, + exec_tmout * CAL_TMOUT_PERC / 100); + + ++q->cal_failed; + + stage_name = "calibration"; + stage_max = fast_cal ? 3 : CAL_CYCLES; + + /* Make sure the forkserver is up before we do anything, and let's not + count its spin-up time toward binary calibration. */ + + if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) + init_forkserver(argv); + + if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE); + + start_us = get_cur_time_us(); + + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + u32 cksum; + + if (!first_run && !(stage_cur % stats_update_freq)) show_stats(); + + write_to_testcase(use_mem, q->len); + + fault = run_target(argv, use_tmout); + + /* stop_soon is set by the handler for Ctrl+C. When it's pressed, + we want to bail out quickly. */ + + if (stop_soon || fault != crash_mode) goto abort_calibration; + + if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) { + fault = FAULT_NOINST; + goto abort_calibration; + } + + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + + if (q->exec_cksum != cksum) { + + u8 hnb = has_new_bits(virgin_bits); + if (hnb > new_bits) new_bits = hnb; + + if (q->exec_cksum) { + + u32 i; + + for (i = 0; i < MAP_SIZE; ++i) { + + if (!var_bytes[i] && first_trace[i] != trace_bits[i]) { + + var_bytes[i] = 1; + stage_max = CAL_CYCLES_LONG; + + } + + } + + var_detected = 1; + + } else { + + q->exec_cksum = cksum; + memcpy(first_trace, trace_bits, MAP_SIZE); + + } + + } + + } + + stop_us = get_cur_time_us(); + + total_cal_us += stop_us - start_us; + total_cal_cycles += stage_max; + + /* OK, let's collect some stats about the performance of this test case. + This is used for fuzzing air time calculations in calculate_score(). */ + + q->exec_us = (stop_us - start_us) / stage_max; + q->bitmap_size = count_bytes(trace_bits); + q->handicap = handicap; + q->cal_failed = 0; + + total_bitmap_size += q->bitmap_size; + ++total_bitmap_entries; + + update_bitmap_score(q); + + /* If this case didn't result in new output from the instrumentation, tell + parent. This is a non-critical problem, but something to warn the user + about. */ + + if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS; + +abort_calibration: + + if (new_bits == 2 && !q->has_new_cov) { + q->has_new_cov = 1; + ++queued_with_cov; + } + + /* Mark variable paths. */ + + if (var_detected) { + + var_byte_count = count_bytes(var_bytes); + + if (!q->var_behavior) { + mark_as_variable(q); + ++queued_variable; + } + + } + + stage_name = old_sn; + stage_cur = old_sc; + stage_max = old_sm; + + if (!first_run) show_stats(); + + return fault; + +} + + +/* Grab interesting test cases from other fuzzers. */ + +void sync_fuzzers(char** argv) { + + DIR* sd; + struct dirent* sd_ent; + u32 sync_cnt = 0; + + sd = opendir(sync_dir); + if (!sd) PFATAL("Unable to open '%s'", sync_dir); + + stage_max = stage_cur = 0; + cur_depth = 0; + + /* Look at the entries created for every other fuzzer in the sync directory. */ + + while ((sd_ent = readdir(sd))) { + + static u8 stage_tmp[128]; + + DIR* qd; + struct dirent* qd_ent; + u8 *qd_path, *qd_synced_path; + u32 min_accept = 0, next_min_accept; + + s32 id_fd; + + /* Skip dot files and our own output directory. */ + + if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue; + + /* Skip anything that doesn't have a queue/ subdirectory. */ + + qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name); + + if (!(qd = opendir(qd_path))) { + ck_free(qd_path); + continue; + } + + /* Retrieve the ID of the last seen test case. */ + + qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name); + + id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600); + + if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path); + + if (read(id_fd, &min_accept, sizeof(u32)) > 0) + lseek(id_fd, 0, SEEK_SET); + + next_min_accept = min_accept; + + /* Show stats */ + + sprintf(stage_tmp, "sync %u", ++sync_cnt); + stage_name = stage_tmp; + stage_cur = 0; + stage_max = 0; + + /* For every file queued by this fuzzer, parse ID and see if we have looked at + it before; exec a test case if not. */ + + while ((qd_ent = readdir(qd))) { + + u8* path; + s32 fd; + struct stat st; + + if (qd_ent->d_name[0] == '.' || + sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || + syncing_case < min_accept) continue; + + /* OK, sounds like a new one. Let's give it a try. */ + + if (syncing_case >= next_min_accept) + next_min_accept = syncing_case + 1; + + path = alloc_printf("%s/%s", qd_path, qd_ent->d_name); + + /* Allow this to fail in case the other fuzzer is resuming or so... */ + + fd = open(path, O_RDONLY); + + if (fd < 0) { + ck_free(path); + continue; + } + + if (fstat(fd, &st)) PFATAL("fstat() failed"); + + /* Ignore zero-sized or oversized files. */ + + if (st.st_size && st.st_size <= MAX_FILE) { + + u8 fault; + u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + + if (mem == MAP_FAILED) PFATAL("Unable to mmap '%s'", path); + + /* See what happens. We rely on save_if_interesting() to catch major + errors and save the test case. */ + + write_to_testcase(mem, st.st_size); + + fault = run_target(argv, exec_tmout); + + if (stop_soon) return; + + syncing_party = sd_ent->d_name; + queued_imported += save_if_interesting(argv, mem, st.st_size, fault); + syncing_party = 0; + + munmap(mem, st.st_size); + + if (!(stage_cur++ % stats_update_freq)) show_stats(); + + } + + ck_free(path); + close(fd); + + } + + ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path); + + close(id_fd); + closedir(qd); + ck_free(qd_path); + ck_free(qd_synced_path); + + } + + closedir(sd); + +} + + +/* Trim all new test cases to save cycles when doing deterministic checks. The + trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of + file size, to keep the stage short and sweet. */ + +u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { + +#ifdef USE_PYTHON + if (py_functions[PY_FUNC_TRIM]) + return trim_case_python(argv, q, in_buf); +#endif + + static u8 tmp[64]; + static u8 clean_trace[MAP_SIZE]; + + u8 needs_write = 0, fault = 0; + u32 trim_exec = 0; + u32 remove_len; + u32 len_p2; + + /* Although the trimmer will be less useful when variable behavior is + detected, it will still work to some extent, so we don't check for + this. */ + + if (q->len < 5) return 0; + + stage_name = tmp; + bytes_trim_in += q->len; + + /* Select initial chunk len, starting with large steps. */ + + len_p2 = next_p2(q->len); + + remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES); + + /* Continue until the number of steps gets too high or the stepover + gets too small. */ + + while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) { + + u32 remove_pos = remove_len; + + sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len)); + + stage_cur = 0; + stage_max = q->len / remove_len; + + while (remove_pos < q->len) { + + u32 trim_avail = MIN(remove_len, q->len - remove_pos); + u32 cksum; + + write_with_gap(in_buf, q->len, remove_pos, trim_avail); + + fault = run_target(argv, exec_tmout); + ++trim_execs; + + if (stop_soon || fault == FAULT_ERROR) goto abort_trimming; + + /* Note that we don't keep track of crashes or hangs here; maybe TODO? */ + + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + + /* If the deletion had no impact on the trace, make it permanent. This + isn't perfect for variable-path inputs, but we're just making a + best-effort pass, so it's not a big deal if we end up with false + negatives every now and then. */ + + if (cksum == q->exec_cksum) { + + u32 move_tail = q->len - remove_pos - trim_avail; + + q->len -= trim_avail; + len_p2 = next_p2(q->len); + + memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, + move_tail); + + /* Let's save a clean trace, which will be needed by + update_bitmap_score once we're done with the trimming stuff. */ + + if (!needs_write) { + + needs_write = 1; + memcpy(clean_trace, trace_bits, MAP_SIZE); + + } + + } else remove_pos += remove_len; + + /* Since this can be slow, update the screen every now and then. */ + + if (!(trim_exec++ % stats_update_freq)) show_stats(); + ++stage_cur; + + } + + remove_len >>= 1; + + } + + /* If we have made changes to in_buf, we also need to update the on-disk + version of the test case. */ + + if (needs_write) { + + s32 fd; + + unlink(q->fname); /* ignore errors */ + + fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); + + if (fd < 0) PFATAL("Unable to create '%s'", q->fname); + + ck_write(fd, in_buf, q->len, q->fname); + close(fd); + + memcpy(trace_bits, clean_trace, MAP_SIZE); + update_bitmap_score(q); + + } + +abort_trimming: + + bytes_trim_out += q->len; + return fault; + +} + + +/* Write a modified test case, run program, process results. Handle + error conditions, returning 1 if it's time to bail out. This is + a helper function for fuzz_one(). */ + +u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) { + + u8 fault; + + if (post_handler) { + + out_buf = post_handler(out_buf, &len); + if (!out_buf || !len) return 0; + + } + + write_to_testcase(out_buf, len); + + fault = run_target(argv, exec_tmout); + + if (stop_soon) return 1; + + if (fault == FAULT_TMOUT) { + + if (subseq_tmouts++ > TMOUT_LIMIT) { + ++cur_skipped_paths; + return 1; + } + + } else subseq_tmouts = 0; + + /* Users can hit us with SIGUSR1 to request the current input + to be abandoned. */ + + if (skip_requested) { + + skip_requested = 0; + ++cur_skipped_paths; + return 1; + + } + + /* This handles FAULT_ERROR for us: */ + + queued_discovered += save_if_interesting(argv, out_buf, len, fault); + + if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max) + show_stats(); + + return 0; + +} + diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c new file mode 100644 index 00000000..5dbd59ac --- /dev/null +++ b/src/afl-fuzz-stats.c @@ -0,0 +1,754 @@ +/* + american fuzzy lop - fuzzer code + -------------------------------- + + Written and maintained by Michal Zalewski + + Forkserver design by Jann Horn + + Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This is the real deal: the program takes an instrumented binary and + attempts a variety of basic fuzzing tricks, paying close attention to + how they affect the execution path. + + */ + +#include "afl-fuzz.h" + +/* Update stats file for unattended monitoring. */ + +void write_stats_file(double bitmap_cvg, double stability, double eps) { + + static double last_bcvg, last_stab, last_eps; + static struct rusage usage; + + u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); + s32 fd; + FILE* f; + + fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); + + if (fd < 0) PFATAL("Unable to create '%s'", fn); + + ck_free(fn); + + f = fdopen(fd, "w"); + + if (!f) PFATAL("fdopen() failed"); + + /* Keep last values in case we're called from another context + where exec/sec stats and such are not readily available. */ + + if (!bitmap_cvg && !stability && !eps) { + bitmap_cvg = last_bcvg; + stability = last_stab; + eps = last_eps; + } else { + last_bcvg = bitmap_cvg; + last_stab = stability; + last_eps = eps; + } + + fprintf(f, "start_time : %llu\n" + "last_update : %llu\n" + "fuzzer_pid : %d\n" + "cycles_done : %llu\n" + "execs_done : %llu\n" + "execs_per_sec : %0.02f\n" + "paths_total : %u\n" + "paths_favored : %u\n" + "paths_found : %u\n" + "paths_imported : %u\n" + "max_depth : %u\n" + "cur_path : %u\n" /* Must match find_start_position() */ + "pending_favs : %u\n" + "pending_total : %u\n" + "variable_paths : %u\n" + "stability : %0.02f%%\n" + "bitmap_cvg : %0.02f%%\n" + "unique_crashes : %llu\n" + "unique_hangs : %llu\n" + "last_path : %llu\n" + "last_crash : %llu\n" + "last_hang : %llu\n" + "execs_since_crash : %llu\n" + "exec_timeout : %u\n" + "slowest_exec_ms : %llu\n" + "peak_rss_mb : %lu\n" + "afl_banner : %s\n" + "afl_version : " VERSION "\n" + "target_mode : %s%s%s%s%s%s%s%s\n" + "command_line : %s\n", + start_time / 1000, get_cur_time() / 1000, getpid(), + queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, + queued_paths, queued_favored, queued_discovered, queued_imported, + max_depth, current_entry, pending_favored, pending_not_fuzzed, + queued_variable, stability, bitmap_cvg, unique_crashes, + unique_hangs, last_path_time / 1000, last_crash_time / 1000, + last_hang_time / 1000, total_execs - last_crash_execs, + exec_tmout, slowest_exec_ms, (unsigned long int)usage.ru_maxrss, use_banner, + unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "", + no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "", + persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "", + (unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode || + persistent_mode || deferred_mode) ? "" : "default", + orig_cmdline); + /* ignore errors */ + + fclose(f); + +} + + +/* Update the plot file if there is a reason to. */ + +void maybe_update_plot_file(double bitmap_cvg, double eps) { + + static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md; + static u64 prev_qc, prev_uc, prev_uh; + + if (prev_qp == queued_paths && prev_pf == pending_favored && + prev_pnf == pending_not_fuzzed && prev_ce == current_entry && + prev_qc == queue_cycle && prev_uc == unique_crashes && + prev_uh == unique_hangs && prev_md == max_depth) return; + + prev_qp = queued_paths; + prev_pf = pending_favored; + prev_pnf = pending_not_fuzzed; + prev_ce = current_entry; + prev_qc = queue_cycle; + prev_uc = unique_crashes; + prev_uh = unique_hangs; + prev_md = max_depth; + + /* Fields in the file: + + unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed, + favored_not_fuzzed, unique_crashes, unique_hangs, max_depth, + execs_per_sec */ + + fprintf(plot_file, + "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n", + get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths, + pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes, + unique_hangs, max_depth, eps); /* ignore errors */ + + fflush(plot_file); + +} + + +/* Check terminal dimensions after resize. */ + +static void check_term_size(void) { + + struct winsize ws; + + term_too_small = 0; + + if (ioctl(1, TIOCGWINSZ, &ws)) return; + + if (ws.ws_row == 0 || ws.ws_col == 0) return; + if (ws.ws_row < 24 || ws.ws_col < 79) term_too_small = 1; + +} + + +/* A spiffy retro stats screen! This is called every stats_update_freq + execve() calls, plus in several other circumstances. */ + +void show_stats(void) { + + static u64 last_stats_ms, last_plot_ms, last_ms, last_execs; + static double avg_exec; + double t_byte_ratio, stab_ratio; + + u64 cur_ms; + u32 t_bytes, t_bits; + + u32 banner_len, banner_pad; + u8 tmp[256]; + + cur_ms = get_cur_time(); + + /* If not enough time has passed since last UI update, bail out. */ + + if (cur_ms - last_ms < 1000 / UI_TARGET_HZ) return; + + /* Check if we're past the 10 minute mark. */ + + if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1; + + /* Calculate smoothed exec speed stats. */ + + if (!last_execs) { + + avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time); + + } else { + + double cur_avg = ((double)(total_execs - last_execs)) * 1000 / + (cur_ms - last_ms); + + /* If there is a dramatic (5x+) jump in speed, reset the indicator + more quickly. */ + + if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) + avg_exec = cur_avg; + + avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) + + cur_avg * (1.0 / AVG_SMOOTHING); + + } + + last_ms = cur_ms; + last_execs = total_execs; + + /* Tell the callers when to contact us (as measured in execs). */ + + stats_update_freq = avg_exec / (UI_TARGET_HZ * 10); + if (!stats_update_freq) stats_update_freq = 1; + + /* Do some bitmap stats. */ + + t_bytes = count_non_255_bytes(virgin_bits); + t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE; + + if (t_bytes) + stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes; + else + stab_ratio = 100; + + /* Roughly every minute, update fuzzer stats and save auto tokens. */ + + if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) { + + last_stats_ms = cur_ms; + write_stats_file(t_byte_ratio, stab_ratio, avg_exec); + save_auto(); + write_bitmap(); + + } + + /* Every now and then, write plot data. */ + + if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) { + + last_plot_ms = cur_ms; + maybe_update_plot_file(t_byte_ratio, avg_exec); + + } + + /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */ + + if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed && + getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2; + + if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2; + + /* If we're not on TTY, bail out. */ + + if (not_on_tty) return; + + /* Compute some mildly useful bitmap stats. */ + + t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits); + + /* Now, for the visuals... */ + + if (clear_screen) { + + SAYF(TERM_CLEAR CURSOR_HIDE); + clear_screen = 0; + + check_term_size(); + + } + + SAYF(TERM_HOME); + + if (term_too_small) { + + SAYF(cBRI "Your terminal is too small to display the UI.\n" + "Please resize terminal window to at least 79x24.\n" cRST); + + return; + + } + + /* Let's start by drawing a centered banner. */ + + banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + strlen(power_name) + 3 + 5; + banner_pad = (79 - banner_len) / 2; + memset(tmp, ' ', banner_pad); + +#ifdef HAVE_AFFINITY + sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN + " (%s) " cPIN "[%s]" cBLU " {%d}", crash_mode ? cPIN "peruvian were-rabbit" : + cYEL "american fuzzy lop", use_banner, power_name, cpu_aff); +#else + sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN + " (%s) " cPIN "[%s]", crash_mode ? cPIN "peruvian were-rabbit" : + cYEL "american fuzzy lop", use_banner, power_name); +#endif /* HAVE_AFFINITY */ + + SAYF("\n%s\n", tmp); + + /* "Handy" shortcuts for drawing boxes... */ + +#define bSTG bSTART cGRA +#define bH2 bH bH +#define bH5 bH2 bH2 bH +#define bH10 bH5 bH5 +#define bH20 bH10 bH10 +#define bH30 bH20 bH10 +#define SP5 " " +#define SP10 SP5 SP5 +#define SP20 SP10 SP10 + + /* Lord, forgive me this. */ + + SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH bHB + bH bSTOP cCYA " overall results " bSTG bH2 bH2 bRT "\n"); + + if (dumb_mode) { + + strcpy(tmp, cRST); + + } else { + + u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60; + + /* First queue cycle: don't stop now! */ + if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else + + /* Subsequent cycles, but we're still making finds. */ + if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else + + /* No finds for a long time and no test cases to try. */ + if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120) + strcpy(tmp, cLGN); + + /* Default: cautiously OK to stop? */ + else strcpy(tmp, cLBL); + + } + + SAYF(bV bSTOP " run time : " cRST "%-33s " bSTG bV bSTOP + " cycles done : %s%-5s " bSTG bV "\n", + DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1)); + + /* We want to warn people about not seeing new paths after a full cycle, + except when resuming fuzzing or running in non-instrumented mode. */ + + if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 || + in_bitmap || crash_mode)) { + + SAYF(bV bSTOP " last new path : " cRST "%-33s ", + DTD(cur_ms, last_path_time)); + + } else { + + if (dumb_mode) + + SAYF(bV bSTOP " last new path : " cPIN "n/a" cRST + " (non-instrumented mode) "); + + else + + SAYF(bV bSTOP " last new path : " cRST "none yet " cLRD + "(odd, check syntax!) "); + + } + + SAYF(bSTG bV bSTOP " total paths : " cRST "%-5s " bSTG bV "\n", + DI(queued_paths)); + + /* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH + limit with a '+' appended to the count. */ + + sprintf(tmp, "%s%s", DI(unique_crashes), + (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : ""); + + SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP + " uniq crashes : %s%-6s" bSTG bV "\n", + DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, + tmp); + + sprintf(tmp, "%s%s", DI(unique_hangs), + (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); + + SAYF(bV bSTOP " last uniq hang : " cRST "%-33s " bSTG bV bSTOP + " uniq hangs : " cRST "%-6s" bSTG bV "\n", + DTD(cur_ms, last_hang_time), tmp); + + SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA + " map coverage " bSTG bH bHT bH20 bH2 bVL "\n"); + + /* This gets funny because we want to print several variable-length variables + together, but then cram them into a fixed-width field - so we need to + put them in a temporary buffer first. */ + + sprintf(tmp, "%s%s%u (%0.02f%%)", DI(current_entry), + queue_cur->favored ? "." : "*", queue_cur->fuzz_level, + ((double)current_entry * 100) / queued_paths); + + SAYF(bV bSTOP " now processing : " cRST "%-16s " bSTG bV bSTOP, tmp); + + sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) * + 100 / MAP_SIZE, t_byte_ratio); + + SAYF(" map density : %s%-21s" bSTG bV "\n", t_byte_ratio > 70 ? cLRD : + ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp); + + sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths), + ((double)cur_skipped_paths * 100) / queued_paths); + + SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp); + + sprintf(tmp, "%0.02f bits/tuple", + t_bytes ? (((double)t_bits) / t_bytes) : 0); + + SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp); + + SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA + " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n"); + + sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored), + ((double)queued_favored) * 100 / queued_paths); + + /* Yeah... it's still going on... halp? */ + + SAYF(bV bSTOP " now trying : " cRST "%-20s " bSTG bV bSTOP + " favored paths : " cRST "%-22s" bSTG bV "\n", stage_name, tmp); + + if (!stage_max) { + + sprintf(tmp, "%s/-", DI(stage_cur)); + + } else { + + sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max), + ((double)stage_cur) * 100 / stage_max); + + } + + SAYF(bV bSTOP " stage execs : " cRST "%-20s " bSTG bV bSTOP, tmp); + + sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov), + ((double)queued_with_cov) * 100 / queued_paths); + + SAYF(" new edges on : " cRST "%-22s" bSTG bV "\n", tmp); + + sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes), + (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : ""); + + if (crash_mode) { + + SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP + " new crashes : %s%-22s" bSTG bV "\n", DI(total_execs), + unique_crashes ? cLRD : cRST, tmp); + + } else { + + SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP + " total crashes : %s%-22s" bSTG bV "\n", DI(total_execs), + unique_crashes ? cLRD : cRST, tmp); + + } + + /* Show a warning about slow execution. */ + + if (avg_exec < 100) { + + sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ? + "zzzz..." : "slow!"); + + SAYF(bV bSTOP " exec speed : " cLRD "%-20s ", tmp); + + } else { + + sprintf(tmp, "%s/sec", DF(avg_exec)); + SAYF(bV bSTOP " exec speed : " cRST "%-20s ", tmp); + + } + + sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts), + (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); + + SAYF (bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp); + + /* Aaaalmost there... hold on! */ + + SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bHT bH10 + bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bVL "\n"); + + if (skip_deterministic) { + + strcpy(tmp, "n/a, n/a, n/a"); + + } else { + + sprintf(tmp, "%s/%s, %s/%s, %s/%s", + DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]), + DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]), + DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4])); + + } + + SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP " levels : " + cRST "%-10s" bSTG bV "\n", tmp, DI(max_depth)); + + if (!skip_deterministic) + sprintf(tmp, "%s/%s, %s/%s, %s/%s", + DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]), + DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]), + DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32])); + + SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP " pending : " + cRST "%-10s" bSTG bV "\n", tmp, DI(pending_not_fuzzed)); + + if (!skip_deterministic) + sprintf(tmp, "%s/%s, %s/%s, %s/%s", + DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]), + DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]), + DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32])); + + SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP " pend fav : " + cRST "%-10s" bSTG bV "\n", tmp, DI(pending_favored)); + + if (!skip_deterministic) + sprintf(tmp, "%s/%s, %s/%s, %s/%s", + DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]), + DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]), + DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32])); + + SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP " own finds : " + cRST "%-10s" bSTG bV "\n", tmp, DI(queued_discovered)); + + if (!skip_deterministic) + sprintf(tmp, "%s/%s, %s/%s, %s/%s", + DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]), + DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]), + DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO])); + + SAYF(bV bSTOP " dictionary : " cRST "%-36s " bSTG bV bSTOP + " imported : " cRST "%-10s" bSTG bV "\n", tmp, + sync_id ? DI(queued_imported) : (u8*)"n/a"); + + sprintf(tmp, "%s/%s, %s/%s, %s/%s", + DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]), + DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]), + DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON])); + + SAYF(bV bSTOP " havoc : " cRST "%-36s " bSTG bV bSTOP, tmp); + + if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio); + else strcpy(tmp, "n/a"); + + SAYF(" stability : %s%-10s" bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40) + ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20)) + ? cMGN : cRST), tmp); + + if (!bytes_trim_out) { + + sprintf(tmp, "n/a, "); + + } else { + + sprintf(tmp, "%0.02f%%/%s, ", + ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in, + DI(trim_execs)); + + } + + if (!blocks_eff_total) { + + u8 tmp2[128]; + + sprintf(tmp2, "n/a"); + strcat(tmp, tmp2); + + } else { + + u8 tmp2[128]; + + sprintf(tmp2, "%0.02f%%", + ((double)(blocks_eff_total - blocks_eff_select)) * 100 / + blocks_eff_total); + + strcat(tmp, tmp2); + + } + if (custom_mutator) { + sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), DI(stage_cycles[STAGE_CUSTOM_MUTATOR])); + SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n" + bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp); + } else { + SAYF(bV bSTOP " trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n" + bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, tmp); + } + + /* Provide some CPU utilization stats. */ + + if (cpu_core_count) { + + double cur_runnable = get_runnable_processes(); + u32 cur_utilization = cur_runnable * 100 / cpu_core_count; + + u8* cpu_color = cCYA; + + /* If we could still run one or more processes, use green. */ + + if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count) + cpu_color = cLGN; + + /* If we're clearly oversubscribed, use red. */ + + if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD; + +#ifdef HAVE_AFFINITY + + if (cpu_aff >= 0) { + + SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, + MIN(cpu_aff, 999), cpu_color, + MIN(cur_utilization, 999)); + + } else { + + SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, + cpu_color, MIN(cur_utilization, 999)); + + } + +#else + + SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, + cpu_color, MIN(cur_utilization, 999)); + +#endif /* ^HAVE_AFFINITY */ + + } else SAYF("\r"); + + /* Hallelujah! */ + + fflush(0); + +} + + +/* Display quick statistics at the end of processing the input directory, + plus a bunch of warnings. Some calibration stuff also ended up here, + along with several hardcoded constants. Maybe clean up eventually. */ + +void show_init_stats(void) { + + struct queue_entry* q = queue; + u32 min_bits = 0, max_bits = 0; + u64 min_us = 0, max_us = 0; + u64 avg_us = 0; + u32 max_len = 0; + + if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles; + + while (q) { + + if (!min_us || q->exec_us < min_us) min_us = q->exec_us; + if (q->exec_us > max_us) max_us = q->exec_us; + + if (!min_bits || q->bitmap_size < min_bits) min_bits = q->bitmap_size; + if (q->bitmap_size > max_bits) max_bits = q->bitmap_size; + + if (q->len > max_len) max_len = q->len; + + q = q->next; + + } + + SAYF("\n"); + + if (avg_us > ((qemu_mode || unicorn_mode) ? 50000 : 10000)) + WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.txt.", + doc_path); + + /* Let's keep things moving with slow binaries. */ + + if (avg_us > 50000) havoc_div = 10; /* 0-19 execs/sec */ + else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec */ + else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */ + + if (!resuming_fuzz) { + + if (max_len > 50 * 1024) + WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.txt!", + DMS(max_len), doc_path); + else if (max_len > 10 * 1024) + WARNF("Some test cases are big (%s) - see %s/perf_tips.txt.", + DMS(max_len), doc_path); + + if (useless_at_start && !in_bitmap) + WARNF(cLRD "Some test cases look useless. Consider using a smaller set."); + + if (queued_paths > 100) + WARNF(cLRD "You probably have far too many input files! Consider trimming down."); + else if (queued_paths > 20) + WARNF("You have lots of input files; try starting small."); + + } + + OKF("Here are some useful stats:\n\n" + + cGRA " Test case count : " cRST "%u favored, %u variable, %u total\n" + cGRA " Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n" + cGRA " Exec timing : " cRST "%s to %s us (average: %s us)\n", + queued_favored, queued_variable, queued_paths, min_bits, max_bits, + ((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1), + DI(min_us), DI(max_us), DI(avg_us)); + + if (!timeout_given) { + + /* Figure out the appropriate timeout. The basic idea is: 5x average or + 1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second. + + If the program is slow, the multiplier is lowered to 2x or 3x, because + random scheduler jitter is less likely to have any impact, and because + our patience is wearing thin =) */ + + if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000; + else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000; + else exec_tmout = avg_us * 5 / 1000; + + exec_tmout = MAX(exec_tmout, max_us / 1000); + exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND; + + if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT; + + ACTF("No -t option specified, so I'll use exec timeout of %u ms.", + exec_tmout); + + timeout_given = 1; + + } else if (timeout_given == 3) { + + ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout); + + } + + /* In dumb mode, re-running every timing out test case with a generous time + limit is very expensive, so let's select a more conservative default. */ + + if (dumb_mode && !getenv("AFL_HANG_TMOUT")) + hang_tmout = MIN(EXEC_TIMEOUT, exec_tmout * 2 + 100); + + OKF("All set and ready to roll!"); + +} + diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index dc21de17..2242dd6b 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -22,9042 +22,6 @@ #include "afl-fuzz.h" -int select_algorithm(void) { - - int i_puppet, j_puppet; - - double sele = ((double)(UR(10000))*0.0001); - j_puppet = 0; - for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) { - if (unlikely(i_puppet == 0)) { - if (sele < probability_now[swarm_now][i_puppet]) - break; - } else { - if (sele < probability_now[swarm_now][i_puppet]) { - j_puppet =1; - break; - } - } - } - if (j_puppet ==1 && sele < probability_now[swarm_now][i_puppet-1]) - FATAL("error select_algorithm"); - return i_puppet; -} - - -/* Shuffle an array of pointers. Might be slightly biased. */ - -static void shuffle_ptrs(void** ptrs, u32 cnt) { - - u32 i; - - for (i = 0; i < cnt - 2; ++i) { - - u32 j = i + UR(cnt - i); - void *s = ptrs[i]; - ptrs[i] = ptrs[j]; - ptrs[j] = s; - - } - -} - - -#ifdef HAVE_AFFINITY - -/* Build a list of processes bound to specific cores. Returns -1 if nothing - can be found. Assumes an upper bound of 4k CPUs. */ - -static void bind_to_free_cpu(void) { - - DIR* d; - struct dirent* de; - cpu_set_t c; - - u8 cpu_used[4096] = { 0 }; - u32 i; - - if (cpu_core_count < 2) return; - - if (getenv("AFL_NO_AFFINITY")) { - - WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set)."); - return; - - } - - d = opendir("/proc"); - - if (!d) { - - WARNF("Unable to access /proc - can't scan for free CPU cores."); - return; - - } - - ACTF("Checking CPU core loadout..."); - - /* Introduce some jitter, in case multiple AFL tasks are doing the same - thing at the same time... */ - - usleep(R(1000) * 250); - - /* Scan all /proc//status entries, checking for Cpus_allowed_list. - Flag all processes bound to a specific CPU using cpu_used[]. This will - fail for some exotic binding setups, but is likely good enough in almost - all real-world use cases. */ - - while ((de = readdir(d))) { - - u8* fn; - FILE* f; - u8 tmp[MAX_LINE]; - u8 has_vmsize = 0; - - if (!isdigit(de->d_name[0])) continue; - - fn = alloc_printf("/proc/%s/status", de->d_name); - - if (!(f = fopen(fn, "r"))) { - ck_free(fn); - continue; - } - - while (fgets(tmp, MAX_LINE, f)) { - - u32 hval; - - /* Processes without VmSize are probably kernel tasks. */ - - if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1; - - if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) && - !strchr(tmp, '-') && !strchr(tmp, ',') && - sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) && - has_vmsize) { - - cpu_used[hval] = 1; - break; - - } - - } - - ck_free(fn); - fclose(f); - - } - - closedir(d); - - for (i = 0; i < cpu_core_count; ++i) if (!cpu_used[i]) break; - - if (i == cpu_core_count) { - - SAYF("\n" cLRD "[-] " cRST - "Uh-oh, looks like all %d CPU cores on your system are allocated to\n" - " other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n" - " another fuzzer on this machine is probably a bad plan, but if you are\n" - " absolutely sure, you can set AFL_NO_AFFINITY and try again.\n", - cpu_core_count); - - FATAL("No more free CPU cores"); - - } - - OKF("Found a free CPU core, binding to #%u.", i); - - cpu_aff = i; - - CPU_ZERO(&c); - CPU_SET(i, &c); - - if (sched_setaffinity(0, sizeof(c), &c)) - PFATAL("sched_setaffinity failed"); - -} - -#endif /* HAVE_AFFINITY */ - -#ifndef IGNORE_FINDS - -/* Helper function to compare buffers; returns first and last differing offset. We - use this to find reasonable locations for splicing two files. */ - -static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { - - s32 f_loc = -1; - s32 l_loc = -1; - u32 pos; - - for (pos = 0; pos < len; ++pos) { - - if (*(ptr1++) != *(ptr2++)) { - - if (f_loc == -1) f_loc = pos; - l_loc = pos; - - } - - } - - *first = f_loc; - *last = l_loc; - - return; - -} - -#endif /* !IGNORE_FINDS */ - - - -/* Load postprocessor, if available. */ - -static void setup_post(void) { - - void* dh; - u8* fn = getenv("AFL_POST_LIBRARY"); - u32 tlen = 6; - - if (!fn) return; - - ACTF("Loading postprocessor from '%s'...", fn); - - dh = dlopen(fn, RTLD_NOW); - if (!dh) FATAL("%s", dlerror()); - - post_handler = dlsym(dh, "afl_postprocess"); - if (!post_handler) FATAL("Symbol 'afl_postprocess' not found."); - - /* Do a quick test. It's better to segfault now than later =) */ - - post_handler("hello", &tlen); - - OKF("Postprocessor installed successfully."); - -} - -static void setup_custom_mutator(void) { - void* dh; - u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY"); - - if (!fn) return; - - ACTF("Loading custom mutator library from '%s'...", fn); - - dh = dlopen(fn, RTLD_NOW); - if (!dh) FATAL("%s", dlerror()); - - custom_mutator = dlsym(dh, "afl_custom_mutator"); - if (!custom_mutator) FATAL("Symbol 'afl_custom_mutator' not found."); - - pre_save_handler = dlsym(dh, "afl_pre_save_handler"); -// if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found."); - - OKF("Custom mutator installed successfully."); -} - - -/* Read all testcases from the input directory, then queue them for testing. - Called at startup. */ - -static void read_testcases(void) { - - struct dirent **nl; - s32 nl_cnt; - u32 i; - u8* fn1; - - /* Auto-detect non-in-place resumption attempts. */ - - fn1 = alloc_printf("%s/queue", in_dir); - if (!access(fn1, F_OK)) in_dir = fn1; else ck_free(fn1); - - ACTF("Scanning '%s'...", in_dir); - - /* We use scandir() + alphasort() rather than readdir() because otherwise, - the ordering of test cases would vary somewhat randomly and would be - difficult to control. */ - - nl_cnt = scandir(in_dir, &nl, NULL, alphasort); - - if (nl_cnt < 0) { - - if (errno == ENOENT || errno == ENOTDIR) - - SAYF("\n" cLRD "[-] " cRST - "The input directory does not seem to be valid - try again. The fuzzer needs\n" - " one or more test case to start with - ideally, a small file under 1 kB\n" - " or so. The cases must be stored as regular files directly in the input\n" - " directory.\n"); - - PFATAL("Unable to open '%s'", in_dir); - - } - - if (shuffle_queue && nl_cnt > 1) { - - ACTF("Shuffling queue..."); - shuffle_ptrs((void**)nl, nl_cnt); - - } - - for (i = 0; i < nl_cnt; ++i) { - - struct stat st; - - u8* fn2 = alloc_printf("%s/%s", in_dir, nl[i]->d_name); - u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name); - - u8 passed_det = 0; - - free(nl[i]); /* not tracked */ - - if (lstat(fn2, &st) || access(fn2, R_OK)) - PFATAL("Unable to access '%s'", fn2); - - /* This also takes care of . and .. */ - - if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn2, "/README.txt")) { - - ck_free(fn2); - ck_free(dfn); - continue; - - } - - if (st.st_size > MAX_FILE) - FATAL("Test case '%s' is too big (%s, limit is %s)", fn2, - DMS(st.st_size), DMS(MAX_FILE)); - - /* Check for metadata that indicates that deterministic fuzzing - is complete for this entry. We don't want to repeat deterministic - fuzzing when resuming aborted scans, because it would be pointless - and probably very time-consuming. */ - - if (!access(dfn, F_OK)) passed_det = 1; - ck_free(dfn); - - add_to_queue(fn2, st.st_size, passed_det); - - } - - free(nl); /* not tracked */ - - if (!queued_paths) { - - SAYF("\n" cLRD "[-] " cRST - "Looks like there are no valid test cases in the input directory! The fuzzer\n" - " needs one or more test case to start with - ideally, a small file under\n" - " 1 kB or so. The cases must be stored as regular files directly in the\n" - " input directory.\n"); - - FATAL("No usable test cases in '%s'", in_dir); - - } - - last_path_time = 0; - queued_at_start = queued_paths; - -} - - -/* Execute target application, monitoring for timeouts. Return status - information. The called program will update trace_bits[]. */ - -static u8 run_target(char** argv, u32 timeout) { - - static struct itimerval it; - static u32 prev_timed_out = 0; - static u64 exec_ms = 0; - - int status = 0; - u32 tb4; - - child_timed_out = 0; - - /* After this memset, trace_bits[] are effectively volatile, so we - must prevent any earlier operations from venturing into that - territory. */ - - memset(trace_bits, 0, MAP_SIZE); - MEM_BARRIER(); - - /* If we're running in "dumb" mode, we can't rely on the fork server - logic compiled into the target program, so we will just keep calling - execve(). There is a bit of code duplication between here and - init_forkserver(), but c'est la vie. */ - - if (dumb_mode == 1 || no_forkserver) { - - child_pid = fork(); - - if (child_pid < 0) PFATAL("fork() failed"); - - if (!child_pid) { - - struct rlimit r; - - if (mem_limit) { - - r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; - -#ifdef RLIMIT_AS - - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ - -#else - - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ - -#endif /* ^RLIMIT_AS */ - - } - - r.rlim_max = r.rlim_cur = 0; - - setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ - - /* Isolate the process and configure standard descriptors. If out_file is - specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ - - setsid(); - - dup2(dev_null_fd, 1); - dup2(dev_null_fd, 2); - - if (out_file) { - - dup2(dev_null_fd, 0); - - } else { - - dup2(out_fd, 0); - close(out_fd); - - } - - /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */ - - close(dev_null_fd); - close(out_dir_fd); -#ifndef HAVE_ARC4RANDOM - close(dev_urandom_fd); -#endif - close(fileno(plot_file)); - - /* Set sane defaults for ASAN if nothing else specified. */ - - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); - - setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" - "symbolize=0:" - "msan_track_origins=0", 0); - - execv(target_path, argv); - - /* Use a distinctive bitmap value to tell the parent about execv() - falling through. */ - - *(u32*)trace_bits = EXEC_FAIL_SIG; - exit(0); - - } - - } else { - - s32 res; - - /* In non-dumb mode, we have the fork server up and running, so simply - tell it to have at it, and then read back PID. */ - - if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) { - - if (stop_soon) return 0; - RPFATAL(res, "Unable to request new process from fork server (OOM?)"); - - } - - if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) { - - if (stop_soon) return 0; - RPFATAL(res, "Unable to request new process from fork server (OOM?)"); - - } - - if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)"); - - } - - /* Configure timeout, as requested by user, then wait for child to terminate. */ - - it.it_value.tv_sec = (timeout / 1000); - it.it_value.tv_usec = (timeout % 1000) * 1000; - - setitimer(ITIMER_REAL, &it, NULL); - - /* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */ - - if (dumb_mode == 1 || no_forkserver) { - - if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed"); - - } else { - - s32 res; - - if ((res = read(fsrv_st_fd, &status, 4)) != 4) { - - if (stop_soon) return 0; - RPFATAL(res, "Unable to communicate with fork server (OOM?)"); - - } - - } - - if (!WIFSTOPPED(status)) child_pid = 0; - - getitimer(ITIMER_REAL, &it); - exec_ms = (u64) timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000); - if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms; - - it.it_value.tv_sec = 0; - it.it_value.tv_usec = 0; - - setitimer(ITIMER_REAL, &it, NULL); - - ++total_execs; - - /* Any subsequent operations on trace_bits must not be moved by the - compiler below this point. Past this location, trace_bits[] behave - very normally and do not have to be treated as volatile. */ - - MEM_BARRIER(); - - tb4 = *(u32*)trace_bits; - -#ifdef __x86_64__ - classify_counts((u64*)trace_bits); -#else - classify_counts((u32*)trace_bits); -#endif /* ^__x86_64__ */ - - prev_timed_out = child_timed_out; - - /* Report outcome to caller. */ - - if (WIFSIGNALED(status) && !stop_soon) { - - kill_signal = WTERMSIG(status); - - if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT; - - return FAULT_CRASH; - - } - - /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and - must use a special exit code. */ - - if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) { - kill_signal = 0; - return FAULT_CRASH; - } - - if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG) - return FAULT_ERROR; - - return FAULT_NONE; - -} - - -/* Write modified data to file for testing. If out_file is set, the old file - is unlinked and a new one is created. Otherwise, out_fd is rewound and - truncated. */ - -static void write_to_testcase(void* mem, u32 len) { - - s32 fd = out_fd; - - if (out_file) { - - unlink(out_file); /* Ignore errors. */ - - fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); - - if (fd < 0) PFATAL("Unable to create '%s'", out_file); - - } else lseek(fd, 0, SEEK_SET); - - if (pre_save_handler) { - u8* new_data; - size_t new_size = pre_save_handler(mem, len, &new_data); - ck_write(fd, new_data, new_size, out_file); - } else { - ck_write(fd, mem, len, out_file); - } - - if (!out_file) { - - if (ftruncate(fd, len)) PFATAL("ftruncate() failed"); - lseek(fd, 0, SEEK_SET); - - } else close(fd); - -} - - -/* The same, but with an adjustable gap. Used for trimming. */ - -static void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) { - - s32 fd = out_fd; - u32 tail_len = len - skip_at - skip_len; - - if (out_file) { - - unlink(out_file); /* Ignore errors. */ - - fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); - - if (fd < 0) PFATAL("Unable to create '%s'", out_file); - - } else lseek(fd, 0, SEEK_SET); - - if (skip_at) ck_write(fd, mem, skip_at, out_file); - - u8 *memu8 = mem; - if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file); - - if (!out_file) { - - if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed"); - lseek(fd, 0, SEEK_SET); - - } else close(fd); - -} - - -static void show_stats(void); - -/* Calibrate a new test case. This is done when processing the input directory - to warn about flaky or otherwise problematic test cases early on; and when - new paths are discovered to detect variable behavior and so on. */ - -static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, - u32 handicap, u8 from_queue) { - - static u8 first_trace[MAP_SIZE]; - - u8 fault = 0, new_bits = 0, var_detected = 0, - first_run = (q->exec_cksum == 0); - - u64 start_us, stop_us; - - s32 old_sc = stage_cur, old_sm = stage_max; - u32 use_tmout = exec_tmout; - u8* old_sn = stage_name; - - /* Be a bit more generous about timeouts when resuming sessions, or when - trying to calibrate already-added finds. This helps avoid trouble due - to intermittent latency. */ - - if (!from_queue || resuming_fuzz) - use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD, - exec_tmout * CAL_TMOUT_PERC / 100); - - ++q->cal_failed; - - stage_name = "calibration"; - stage_max = fast_cal ? 3 : CAL_CYCLES; - - /* Make sure the forkserver is up before we do anything, and let's not - count its spin-up time toward binary calibration. */ - - if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) - init_forkserver(argv); - - if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE); - - start_us = get_cur_time_us(); - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - u32 cksum; - - if (!first_run && !(stage_cur % stats_update_freq)) show_stats(); - - write_to_testcase(use_mem, q->len); - - fault = run_target(argv, use_tmout); - - /* stop_soon is set by the handler for Ctrl+C. When it's pressed, - we want to bail out quickly. */ - - if (stop_soon || fault != crash_mode) goto abort_calibration; - - if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) { - fault = FAULT_NOINST; - goto abort_calibration; - } - - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - - if (q->exec_cksum != cksum) { - - u8 hnb = has_new_bits(virgin_bits); - if (hnb > new_bits) new_bits = hnb; - - if (q->exec_cksum) { - - u32 i; - - for (i = 0; i < MAP_SIZE; ++i) { - - if (!var_bytes[i] && first_trace[i] != trace_bits[i]) { - - var_bytes[i] = 1; - stage_max = CAL_CYCLES_LONG; - - } - - } - - var_detected = 1; - - } else { - - q->exec_cksum = cksum; - memcpy(first_trace, trace_bits, MAP_SIZE); - - } - - } - - } - - stop_us = get_cur_time_us(); - - total_cal_us += stop_us - start_us; - total_cal_cycles += stage_max; - - /* OK, let's collect some stats about the performance of this test case. - This is used for fuzzing air time calculations in calculate_score(). */ - - q->exec_us = (stop_us - start_us) / stage_max; - q->bitmap_size = count_bytes(trace_bits); - q->handicap = handicap; - q->cal_failed = 0; - - total_bitmap_size += q->bitmap_size; - ++total_bitmap_entries; - - update_bitmap_score(q); - - /* If this case didn't result in new output from the instrumentation, tell - parent. This is a non-critical problem, but something to warn the user - about. */ - - if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS; - -abort_calibration: - - if (new_bits == 2 && !q->has_new_cov) { - q->has_new_cov = 1; - ++queued_with_cov; - } - - /* Mark variable paths. */ - - if (var_detected) { - - var_byte_count = count_bytes(var_bytes); - - if (!q->var_behavior) { - mark_as_variable(q); - ++queued_variable; - } - - } - - stage_name = old_sn; - stage_cur = old_sc; - stage_max = old_sm; - - if (!first_run) show_stats(); - - return fault; - -} - - -/* Examine map coverage. Called once, for first test case. */ - -static void check_map_coverage(void) { - - u32 i; - - if (count_bytes(trace_bits) < 100) return; - - for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; ++i) - if (trace_bits[i]) return; - - WARNF("Recompile binary with newer version of afl to improve coverage!"); - -} - - -/* Perform dry run of all test cases to confirm that the app is working as - expected. This is done only for the initial inputs, and only once. */ - -static void perform_dry_run(char** argv) { - - struct queue_entry* q = queue; - u32 cal_failures = 0; - u8* skip_crashes = getenv("AFL_SKIP_CRASHES"); - - while (q) { - - u8* use_mem; - u8 res; - s32 fd; - - u8* fn = strrchr(q->fname, '/') + 1; - - ACTF("Attempting dry run with '%s'...", fn); - - fd = open(q->fname, O_RDONLY); - if (fd < 0) PFATAL("Unable to open '%s'", q->fname); - - use_mem = ck_alloc_nozero(q->len); - - if (read(fd, use_mem, q->len) != q->len) - FATAL("Short read from '%s'", q->fname); - - close(fd); - - res = calibrate_case(argv, q, use_mem, 0, 1); - ck_free(use_mem); - - if (stop_soon) return; - - if (res == crash_mode || res == FAULT_NOBITS) - SAYF(cGRA " len = %u, map size = %u, exec speed = %llu us\n" cRST, - q->len, q->bitmap_size, q->exec_us); - - switch (res) { - - case FAULT_NONE: - - if (q == queue) check_map_coverage(); - - if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn); - - break; - - case FAULT_TMOUT: - - if (timeout_given) { - - /* The -t nn+ syntax in the command line sets timeout_given to '2' and - instructs afl-fuzz to tolerate but skip queue entries that time - out. */ - - if (timeout_given > 1) { - WARNF("Test case results in a timeout (skipping)"); - q->cal_failed = CAL_CHANCES; - ++cal_failures; - break; - } - - SAYF("\n" cLRD "[-] " cRST - "The program took more than %u ms to process one of the initial test cases.\n" - " Usually, the right thing to do is to relax the -t option - or to delete it\n" - " altogether and allow the fuzzer to auto-calibrate. That said, if you know\n" - " what you are doing and want to simply skip the unruly test cases, append\n" - " '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout, - exec_tmout); - - FATAL("Test case '%s' results in a timeout", fn); - - } else { - - SAYF("\n" cLRD "[-] " cRST - "The program took more than %u ms to process one of the initial test cases.\n" - " This is bad news; raising the limit with the -t option is possible, but\n" - " will probably make the fuzzing process extremely slow.\n\n" - - " If this test case is just a fluke, the other option is to just avoid it\n" - " altogether, and find one that is less of a CPU hog.\n", exec_tmout); - - FATAL("Test case '%s' results in a timeout", fn); - - } - - case FAULT_CRASH: - - if (crash_mode) break; - - if (skip_crashes) { - WARNF("Test case results in a crash (skipping)"); - q->cal_failed = CAL_CHANCES; - ++cal_failures; - break; - } - - if (mem_limit) { - - SAYF("\n" cLRD "[-] " cRST - "Oops, the program crashed with one of the test cases provided. There are\n" - " several possible explanations:\n\n" - - " - The test case causes known crashes under normal working conditions. If\n" - " so, please remove it. The fuzzer should be seeded with interesting\n" - " inputs - but not ones that cause an outright crash.\n\n" - - " - The current memory limit (%s) is too low for this program, causing\n" - " it to die due to OOM when parsing valid files. To fix this, try\n" - " bumping it up with the -m setting in the command line. If in doubt,\n" - " try something along the lines of:\n\n" - - MSG_ULIMIT_USAGE " /path/to/binary [...] for troubleshooting tips.\n", - DMS(mem_limit << 20), mem_limit - 1, doc_path); - - } else { - - SAYF("\n" cLRD "[-] " cRST - "Oops, the program crashed with one of the test cases provided. There are\n" - " several possible explanations:\n\n" - - " - The test case causes known crashes under normal working conditions. If\n" - " so, please remove it. The fuzzer should be seeded with interesting\n" - " inputs - but not ones that cause an outright crash.\n\n" - - MSG_FORK_ON_APPLE - - " - Least likely, there is a horrible bug in the fuzzer. If other options\n" - " fail, poke for troubleshooting tips.\n"); - - } -#undef MSG_ULIMIT_USAGE -#undef MSG_FORK_ON_APPLE - - FATAL("Test case '%s' results in a crash", fn); - - case FAULT_ERROR: - - FATAL("Unable to execute target application ('%s')", argv[0]); - - case FAULT_NOINST: - - FATAL("No instrumentation detected"); - - case FAULT_NOBITS: - - ++useless_at_start; - - if (!in_bitmap && !shuffle_queue) - WARNF("No new instrumentation output, test case may be useless."); - - break; - - } - - if (q->var_behavior) WARNF("Instrumentation output varies across runs."); - - q = q->next; - - } - - if (cal_failures) { - - if (cal_failures == queued_paths) - FATAL("All test cases time out%s, giving up!", - skip_crashes ? " or crash" : ""); - - WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures, - ((double)cal_failures) * 100 / queued_paths, - skip_crashes ? " or crashes" : ""); - - if (cal_failures * 5 > queued_paths) - WARNF(cLRD "High percentage of rejected test cases, check settings!"); - - } - - OKF("All test cases processed."); - -} - - -/* Helper function: link() if possible, copy otherwise. */ - -static void link_or_copy(u8* old_path, u8* new_path) { - - s32 i = link(old_path, new_path); - s32 sfd, dfd; - u8* tmp; - - if (!i) return; - - sfd = open(old_path, O_RDONLY); - if (sfd < 0) PFATAL("Unable to open '%s'", old_path); - - dfd = open(new_path, O_WRONLY | O_CREAT | O_EXCL, 0600); - if (dfd < 0) PFATAL("Unable to create '%s'", new_path); - - tmp = ck_alloc(64 * 1024); - - while ((i = read(sfd, tmp, 64 * 1024)) > 0) - ck_write(dfd, tmp, i, new_path); - - if (i < 0) PFATAL("read() failed"); - - ck_free(tmp); - close(sfd); - close(dfd); - -} - - -static void nuke_resume_dir(void); - -/* Create hard links for input test cases in the output directory, choosing - good names and pivoting accordingly. */ - -static void pivot_inputs(void) { - - struct queue_entry* q = queue; - u32 id = 0; - - ACTF("Creating hard links for all input files..."); - - while (q) { - - u8 *nfn, *rsl = strrchr(q->fname, '/'); - u32 orig_id; - - if (!rsl) rsl = q->fname; else ++rsl; - - /* If the original file name conforms to the syntax and the recorded - ID matches the one we'd assign, just use the original file name. - This is valuable for resuming fuzzing runs. */ - -#ifndef SIMPLE_FILES -# define CASE_PREFIX "id:" -#else -# define CASE_PREFIX "id_" -#endif /* ^!SIMPLE_FILES */ - - if (!strncmp(rsl, CASE_PREFIX, 3) && - sscanf(rsl + 3, "%06u", &orig_id) == 1 && orig_id == id) { - - u8* src_str; - u32 src_id; - - resuming_fuzz = 1; - nfn = alloc_printf("%s/queue/%s", out_dir, rsl); - - /* Since we're at it, let's also try to find parent and figure out the - appropriate depth for this entry. */ - - src_str = strchr(rsl + 3, ':'); - - if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) { - - struct queue_entry* s = queue; - while (src_id-- && s) s = s->next; - if (s) q->depth = s->depth + 1; - - if (max_depth < q->depth) max_depth = q->depth; - - } - - } else { - - /* No dice - invent a new name, capturing the original one as a - substring. */ - -#ifndef SIMPLE_FILES - - u8* use_name = strstr(rsl, ",orig:"); - - if (use_name) use_name += 6; else use_name = rsl; - nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name); - -#else - - nfn = alloc_printf("%s/queue/id_%06u", out_dir, id); - -#endif /* ^!SIMPLE_FILES */ - - } - - /* Pivot to the new queue entry. */ - - link_or_copy(q->fname, nfn); - ck_free(q->fname); - q->fname = nfn; - - /* Make sure that the passed_det value carries over, too. */ - - if (q->passed_det) mark_as_det_done(q); - - q = q->next; - ++id; - - } - - if (in_place_resume) nuke_resume_dir(); - -} - - -#ifndef SIMPLE_FILES - -/* Construct a file name for a new test case, capturing the operation - that led to its discovery. Uses a static buffer. */ - -static u8* describe_op(u8 hnb) { - - static u8 ret[256]; - - if (syncing_party) { - - sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case); - - } else { - - sprintf(ret, "src:%06u", current_entry); - - sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time); - - if (splicing_with >= 0) - sprintf(ret + strlen(ret), "+%06d", splicing_with); - - sprintf(ret + strlen(ret), ",op:%s", stage_short); - - if (stage_cur_byte >= 0) { - - sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte); - - if (stage_val_type != STAGE_VAL_NONE) - sprintf(ret + strlen(ret), ",val:%s%+d", - (stage_val_type == STAGE_VAL_BE) ? "be:" : "", - stage_cur_val); - - } else sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val); - - } - - if (hnb == 2) strcat(ret, ",+cov"); - - return ret; - -} - -#endif /* !SIMPLE_FILES */ - - -/* Write a message accompanying the crash directory :-) */ - -static void write_crash_readme(void) { - - u8* fn = alloc_printf("%s/crashes/README.txt", out_dir); - s32 fd; - FILE* f; - - fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); - ck_free(fn); - - /* Do not die on errors here - that would be impolite. */ - - if (fd < 0) return; - - f = fdopen(fd, "w"); - - if (!f) { - close(fd); - return; - } - - fprintf(f, "Command line used to find this crash:\n\n" - - "%s\n\n" - - "If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n" - "memory limit. The limit used for this fuzzing session was %s.\n\n" - - "Need a tool to minimize test cases before investigating the crashes or sending\n" - "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n" - - "Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n" - "an mail at once the issues are fixed\n\n" - - " https://github.com/vanhauser-thc/AFLplusplus\n\n", - - orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */ - - fclose(f); - -} - - -/* Check if the result of an execve() during routine fuzzing is interesting, - save or queue the input test case for further analysis if so. Returns 1 if - entry is saved, 0 otherwise. */ - -static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { - - if (len == 0) return 0; - - u8 *fn = ""; - u8 hnb; - s32 fd; - u8 keeping = 0, res; - - /* Update path frequency. */ - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - - struct queue_entry* q = queue; - while (q) { - if (q->exec_cksum == cksum) - q->n_fuzz = q->n_fuzz + 1; - - q = q->next; - - } - - if (fault == crash_mode) { - - /* Keep only if there are new bits in the map, add to queue for - future fuzzing, etc. */ - - if (!(hnb = has_new_bits(virgin_bits))) { - if (crash_mode) ++total_crashes; - return 0; - } - -#ifndef SIMPLE_FILES - - fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths, - describe_op(hnb)); - -#else - - fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths); - -#endif /* ^!SIMPLE_FILES */ - - add_to_queue(fn, len, 0); - - if (hnb == 2) { - queue_top->has_new_cov = 1; - ++queued_with_cov; - } - - queue_top->exec_cksum = cksum; - - /* Try to calibrate inline; this also calls update_bitmap_score() when - successful. */ - - res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0); - - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); - - fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); - if (fd < 0) PFATAL("Unable to create '%s'", fn); - ck_write(fd, mem, len, fn); - close(fd); - - keeping = 1; - - } - - switch (fault) { - - case FAULT_TMOUT: - - /* Timeouts are not very interesting, but we're still obliged to keep - a handful of samples. We use the presence of new bits in the - hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we - just keep everything. */ - - ++total_tmouts; - - if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping; - - if (!dumb_mode) { - -#ifdef __x86_64__ - simplify_trace((u64*)trace_bits); -#else - simplify_trace((u32*)trace_bits); -#endif /* ^__x86_64__ */ - - if (!has_new_bits(virgin_tmout)) return keeping; - - } - - ++unique_tmouts; - - /* Before saving, we make sure that it's a genuine hang by re-running - the target with a more generous timeout (unless the default timeout - is already generous). */ - - if (exec_tmout < hang_tmout) { - - u8 new_fault; - write_to_testcase(mem, len); - new_fault = run_target(argv, hang_tmout); - - /* A corner case that one user reported bumping into: increasing the - timeout actually uncovers a crash. Make sure we don't discard it if - so. */ - - if (!stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash; - - if (stop_soon || new_fault != FAULT_TMOUT) return keeping; - - } - -#ifndef SIMPLE_FILES - - fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, - unique_hangs, describe_op(0)); - -#else - - fn = alloc_printf("%s/hangs/id_%06llu", out_dir, - unique_hangs); - -#endif /* ^!SIMPLE_FILES */ - - ++unique_hangs; - - last_hang_time = get_cur_time(); - - break; - - case FAULT_CRASH: - -keep_as_crash: - - /* This is handled in a manner roughly similar to timeouts, - except for slightly different limits and no need to re-run test - cases. */ - - ++total_crashes; - - if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping; - - if (!dumb_mode) { - -#ifdef __x86_64__ - simplify_trace((u64*)trace_bits); -#else - simplify_trace((u32*)trace_bits); -#endif /* ^__x86_64__ */ - - if (!has_new_bits(virgin_crash)) return keeping; - - } - - if (!unique_crashes) write_crash_readme(); - -#ifndef SIMPLE_FILES - - fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir, - unique_crashes, kill_signal, describe_op(0)); - -#else - - fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes, - kill_signal); - -#endif /* ^!SIMPLE_FILES */ - - ++unique_crashes; - - last_crash_time = get_cur_time(); - last_crash_execs = total_execs; - - break; - - case FAULT_ERROR: FATAL("Unable to execute target application"); - - default: return keeping; - - } - - /* If we're here, we apparently want to save the crash or hang - test case, too. */ - - fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); - if (fd < 0) PFATAL("Unable to create '%s'", fn); - ck_write(fd, mem, len, fn); - close(fd); - - ck_free(fn); - - return keeping; - -} - - -/* When resuming, try to find the queue position to start from. This makes sense - only when resuming, and when we can find the original fuzzer_stats. */ - -static u32 find_start_position(void) { - - static u8 tmp[4096]; /* Ought to be enough for anybody. */ - - u8 *fn, *off; - s32 fd, i; - u32 ret; - - if (!resuming_fuzz) return 0; - - if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); - else fn = alloc_printf("%s/../fuzzer_stats", in_dir); - - fd = open(fn, O_RDONLY); - ck_free(fn); - - if (fd < 0) return 0; - - i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ - close(fd); - - off = strstr(tmp, "cur_path : "); - if (!off) return 0; - - ret = atoi(off + 20); - if (ret >= queued_paths) ret = 0; - return ret; - -} - - -/* The same, but for timeouts. The idea is that when resuming sessions without - -t given, we don't want to keep auto-scaling the timeout over and over - again to prevent it from growing due to random flukes. */ - -static void find_timeout(void) { - - static u8 tmp[4096]; /* Ought to be enough for anybody. */ - - u8 *fn, *off; - s32 fd, i; - u32 ret; - - if (!resuming_fuzz) return; - - if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); - else fn = alloc_printf("%s/../fuzzer_stats", in_dir); - - fd = open(fn, O_RDONLY); - ck_free(fn); - - if (fd < 0) return; - - i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ - close(fd); - - off = strstr(tmp, "exec_timeout : "); - if (!off) return; - - ret = atoi(off + 17); - if (ret <= 4) return; - - exec_tmout = ret; - timeout_given = 3; - -} - - -/* Update stats file for unattended monitoring. */ - -static void write_stats_file(double bitmap_cvg, double stability, double eps) { - - static double last_bcvg, last_stab, last_eps; - static struct rusage usage; - - u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); - s32 fd; - FILE* f; - - fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); - - if (fd < 0) PFATAL("Unable to create '%s'", fn); - - ck_free(fn); - - f = fdopen(fd, "w"); - - if (!f) PFATAL("fdopen() failed"); - - /* Keep last values in case we're called from another context - where exec/sec stats and such are not readily available. */ - - if (!bitmap_cvg && !stability && !eps) { - bitmap_cvg = last_bcvg; - stability = last_stab; - eps = last_eps; - } else { - last_bcvg = bitmap_cvg; - last_stab = stability; - last_eps = eps; - } - - fprintf(f, "start_time : %llu\n" - "last_update : %llu\n" - "fuzzer_pid : %d\n" - "cycles_done : %llu\n" - "execs_done : %llu\n" - "execs_per_sec : %0.02f\n" - "paths_total : %u\n" - "paths_favored : %u\n" - "paths_found : %u\n" - "paths_imported : %u\n" - "max_depth : %u\n" - "cur_path : %u\n" /* Must match find_start_position() */ - "pending_favs : %u\n" - "pending_total : %u\n" - "variable_paths : %u\n" - "stability : %0.02f%%\n" - "bitmap_cvg : %0.02f%%\n" - "unique_crashes : %llu\n" - "unique_hangs : %llu\n" - "last_path : %llu\n" - "last_crash : %llu\n" - "last_hang : %llu\n" - "execs_since_crash : %llu\n" - "exec_timeout : %u\n" - "slowest_exec_ms : %llu\n" - "peak_rss_mb : %lu\n" - "afl_banner : %s\n" - "afl_version : " VERSION "\n" - "target_mode : %s%s%s%s%s%s%s%s\n" - "command_line : %s\n", - start_time / 1000, get_cur_time() / 1000, getpid(), - queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, - queued_paths, queued_favored, queued_discovered, queued_imported, - max_depth, current_entry, pending_favored, pending_not_fuzzed, - queued_variable, stability, bitmap_cvg, unique_crashes, - unique_hangs, last_path_time / 1000, last_crash_time / 1000, - last_hang_time / 1000, total_execs - last_crash_execs, - exec_tmout, slowest_exec_ms, (unsigned long int)usage.ru_maxrss, use_banner, - unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "", - no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "", - persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "", - (unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode || - persistent_mode || deferred_mode) ? "" : "default", - orig_cmdline); - /* ignore errors */ - - fclose(f); - -} - - -/* Update the plot file if there is a reason to. */ - -static void maybe_update_plot_file(double bitmap_cvg, double eps) { - - static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md; - static u64 prev_qc, prev_uc, prev_uh; - - if (prev_qp == queued_paths && prev_pf == pending_favored && - prev_pnf == pending_not_fuzzed && prev_ce == current_entry && - prev_qc == queue_cycle && prev_uc == unique_crashes && - prev_uh == unique_hangs && prev_md == max_depth) return; - - prev_qp = queued_paths; - prev_pf = pending_favored; - prev_pnf = pending_not_fuzzed; - prev_ce = current_entry; - prev_qc = queue_cycle; - prev_uc = unique_crashes; - prev_uh = unique_hangs; - prev_md = max_depth; - - /* Fields in the file: - - unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed, - favored_not_fuzzed, unique_crashes, unique_hangs, max_depth, - execs_per_sec */ - - fprintf(plot_file, - "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n", - get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths, - pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes, - unique_hangs, max_depth, eps); /* ignore errors */ - - fflush(plot_file); - -} - - - -/* A helper function for maybe_delete_out_dir(), deleting all prefixed - files in a directory. */ - -static u8 delete_files(u8* path, u8* prefix) { - - DIR* d; - struct dirent* d_ent; - - d = opendir(path); - - if (!d) return 0; - - while ((d_ent = readdir(d))) { - - if (d_ent->d_name[0] != '.' && (!prefix || - !strncmp(d_ent->d_name, prefix, strlen(prefix)))) { - - u8* fname = alloc_printf("%s/%s", path, d_ent->d_name); - if (unlink(fname)) PFATAL("Unable to delete '%s'", fname); - ck_free(fname); - - } - - } - - closedir(d); - - return !!rmdir(path); - -} - - -/* Get the number of runnable processes, with some simple smoothing. */ - -static double get_runnable_processes(void) { - - static double res; - -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) - - /* I don't see any portable sysctl or so that would quickly give us the - number of runnable processes; the 1-minute load average can be a - semi-decent approximation, though. */ - - if (getloadavg(&res, 1) != 1) return 0; - -#else - - /* On Linux, /proc/stat is probably the best way; load averages are - computed in funny ways and sometimes don't reflect extremely short-lived - processes well. */ - - FILE* f = fopen("/proc/stat", "r"); - u8 tmp[1024]; - u32 val = 0; - - if (!f) return 0; - - while (fgets(tmp, sizeof(tmp), f)) { - - if (!strncmp(tmp, "procs_running ", 14) || - !strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14); - - } - - fclose(f); - - if (!res) { - - res = val; - - } else { - - res = res * (1.0 - 1.0 / AVG_SMOOTHING) + - ((double)val) * (1.0 / AVG_SMOOTHING); - - } - -#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ - - return res; - -} - - -/* Delete the temporary directory used for in-place session resume. */ - -static void nuke_resume_dir(void) { - - u8* fn; - - fn = alloc_printf("%s/_resume/.state/deterministic_done", out_dir); - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/_resume/.state/auto_extras", out_dir); - if (delete_files(fn, "auto_")) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/_resume/.state/redundant_edges", out_dir); - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/_resume/.state/variable_behavior", out_dir); - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/_resume/.state", out_dir); - if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/_resume", out_dir); - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - return; - -dir_cleanup_failed: - - FATAL("_resume directory cleanup failed"); - -} - - -/* Delete fuzzer output directory if we recognize it as ours, if the fuzzer - is not currently running, and if the last run time isn't too great. */ - -static void maybe_delete_out_dir(void) { - - FILE* f; - u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir); - - /* See if the output directory is locked. If yes, bail out. If not, - create a lock that will persist for the lifetime of the process - (this requires leaving the descriptor open).*/ - - out_dir_fd = open(out_dir, O_RDONLY); - if (out_dir_fd < 0) PFATAL("Unable to open '%s'", out_dir); - -#ifndef __sun - - if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) { - - SAYF("\n" cLRD "[-] " cRST - "Looks like the job output directory is being actively used by another\n" - " instance of afl-fuzz. You will need to choose a different %s\n" - " or stop the other process first.\n", - sync_id ? "fuzzer ID" : "output location"); - - FATAL("Directory '%s' is in use", out_dir); - - } - -#endif /* !__sun */ - - f = fopen(fn, "r"); - - if (f) { - - u64 start_time2, last_update; - - if (fscanf(f, "start_time : %llu\n" - "last_update : %llu\n", &start_time2, &last_update) != 2) - FATAL("Malformed data in '%s'", fn); - - fclose(f); - - /* Let's see how much work is at stake. */ - - if (!in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) { - - SAYF("\n" cLRD "[-] " cRST - "The job output directory already exists and contains the results of more\n" - " than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n" - " automatically delete this data for you.\n\n" - - " If you wish to start a new session, remove or rename the directory manually,\n" - " or specify a different output location for this job. To resume the old\n" - " session, put '-' as the input directory in the command line ('-i -') and\n" - " try again.\n", OUTPUT_GRACE); - - FATAL("At-risk data found in '%s'", out_dir); - - } - - } - - ck_free(fn); - - /* The idea for in-place resume is pretty simple: we temporarily move the old - queue/ to a new location that gets deleted once import to the new queue/ - is finished. If _resume/ already exists, the current queue/ may be - incomplete due to an earlier abort, so we want to use the old _resume/ - dir instead, and we let rename() fail silently. */ - - if (in_place_resume) { - - u8* orig_q = alloc_printf("%s/queue", out_dir); - - in_dir = alloc_printf("%s/_resume", out_dir); - - rename(orig_q, in_dir); /* Ignore errors */ - - OKF("Output directory exists, will attempt session resume."); - - ck_free(orig_q); - - } else { - - OKF("Output directory exists but deemed OK to reuse."); - - } - - ACTF("Deleting old session data..."); - - /* Okay, let's get the ball rolling! First, we need to get rid of the entries - in /.synced/.../id:*, if any are present. */ - - if (!in_place_resume) { - - fn = alloc_printf("%s/.synced", out_dir); - if (delete_files(fn, NULL)) goto dir_cleanup_failed; - ck_free(fn); - - } - - /* Next, we need to clean up /queue/.state/ subdirectories: */ - - fn = alloc_printf("%s/queue/.state/deterministic_done", out_dir); - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/queue/.state/auto_extras", out_dir); - if (delete_files(fn, "auto_")) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/queue/.state/redundant_edges", out_dir); - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/queue/.state/variable_behavior", out_dir); - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - /* Then, get rid of the .state subdirectory itself (should be empty by now) - and everything matching /queue/id:*. */ - - fn = alloc_printf("%s/queue/.state", out_dir); - if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/queue", out_dir); - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - /* All right, let's do /crashes/id:* and /hangs/id:*. */ - - if (!in_place_resume) { - - fn = alloc_printf("%s/crashes/README.txt", out_dir); - unlink(fn); /* Ignore errors */ - ck_free(fn); - - } - - fn = alloc_printf("%s/crashes", out_dir); - - /* Make backup of the crashes directory if it's not empty and if we're - doing in-place resume. */ - - if (in_place_resume && rmdir(fn)) { - - time_t cur_t = time(0); - struct tm* t = localtime(&cur_t); - -#ifndef SIMPLE_FILES - - u8* nfn = alloc_printf("%s.%04d-%02d-%02d-%02d:%02d:%02d", fn, - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, - t->tm_hour, t->tm_min, t->tm_sec); - -#else - - u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, - t->tm_hour, t->tm_min, t->tm_sec); - -#endif /* ^!SIMPLE_FILES */ - - rename(fn, nfn); /* Ignore errors. */ - ck_free(nfn); - - } - - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/hangs", out_dir); - - /* Backup hangs, too. */ - - if (in_place_resume && rmdir(fn)) { - - time_t cur_t = time(0); - struct tm* t = localtime(&cur_t); - -#ifndef SIMPLE_FILES - - u8* nfn = alloc_printf("%s.%04d-%02d-%02d-%02d:%02d:%02d", fn, - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, - t->tm_hour, t->tm_min, t->tm_sec); - -#else - - u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, - t->tm_hour, t->tm_min, t->tm_sec); - -#endif /* ^!SIMPLE_FILES */ - - rename(fn, nfn); /* Ignore errors. */ - ck_free(nfn); - - } - - if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; - ck_free(fn); - - /* And now, for some finishing touches. */ - - if (file_extension) { - fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); - } else { - fn = alloc_printf("%s/.cur_input", out_dir); - } - - if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/fuzz_bitmap", out_dir); - if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; - ck_free(fn); - - if (!in_place_resume) { - fn = alloc_printf("%s/fuzzer_stats", out_dir); - if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; - ck_free(fn); - } - - fn = alloc_printf("%s/plot_data", out_dir); - if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; - ck_free(fn); - - fn = alloc_printf("%s/cmdline", out_dir); - if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; - ck_free(fn); - - OKF("Output dir cleanup successful."); - - /* Wow... is that all? If yes, celebrate! */ - - return; - -dir_cleanup_failed: - - SAYF("\n" cLRD "[-] " cRST - "Whoops, the fuzzer tried to reuse your output directory, but bumped into\n" - " some files that shouldn't be there or that couldn't be removed - so it\n" - " decided to abort! This happened while processing this path:\n\n" - - " %s\n\n" - " Please examine and manually delete the files, or specify a different\n" - " output location for the tool.\n", fn); - - FATAL("Output directory cleanup failed"); - -} - - -static void check_term_size(void); - - -/* A spiffy retro stats screen! This is called every stats_update_freq - execve() calls, plus in several other circumstances. */ - -static void show_stats(void) { - - static u64 last_stats_ms, last_plot_ms, last_ms, last_execs; - static double avg_exec; - double t_byte_ratio, stab_ratio; - - u64 cur_ms; - u32 t_bytes, t_bits; - - u32 banner_len, banner_pad; - u8 tmp[256]; - - cur_ms = get_cur_time(); - - /* If not enough time has passed since last UI update, bail out. */ - - if (cur_ms - last_ms < 1000 / UI_TARGET_HZ) return; - - /* Check if we're past the 10 minute mark. */ - - if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1; - - /* Calculate smoothed exec speed stats. */ - - if (!last_execs) { - - avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time); - - } else { - - double cur_avg = ((double)(total_execs - last_execs)) * 1000 / - (cur_ms - last_ms); - - /* If there is a dramatic (5x+) jump in speed, reset the indicator - more quickly. */ - - if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) - avg_exec = cur_avg; - - avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) + - cur_avg * (1.0 / AVG_SMOOTHING); - - } - - last_ms = cur_ms; - last_execs = total_execs; - - /* Tell the callers when to contact us (as measured in execs). */ - - stats_update_freq = avg_exec / (UI_TARGET_HZ * 10); - if (!stats_update_freq) stats_update_freq = 1; - - /* Do some bitmap stats. */ - - t_bytes = count_non_255_bytes(virgin_bits); - t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE; - - if (t_bytes) - stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes; - else - stab_ratio = 100; - - /* Roughly every minute, update fuzzer stats and save auto tokens. */ - - if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) { - - last_stats_ms = cur_ms; - write_stats_file(t_byte_ratio, stab_ratio, avg_exec); - save_auto(); - write_bitmap(); - - } - - /* Every now and then, write plot data. */ - - if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) { - - last_plot_ms = cur_ms; - maybe_update_plot_file(t_byte_ratio, avg_exec); - - } - - /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */ - - if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed && - getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2; - - if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2; - - /* If we're not on TTY, bail out. */ - - if (not_on_tty) return; - - /* Compute some mildly useful bitmap stats. */ - - t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits); - - /* Now, for the visuals... */ - - if (clear_screen) { - - SAYF(TERM_CLEAR CURSOR_HIDE); - clear_screen = 0; - - check_term_size(); - - } - - SAYF(TERM_HOME); - - if (term_too_small) { - - SAYF(cBRI "Your terminal is too small to display the UI.\n" - "Please resize terminal window to at least 79x24.\n" cRST); - - return; - - } - - /* Let's start by drawing a centered banner. */ - - banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + strlen(power_name) + 3 + 5; - banner_pad = (79 - banner_len) / 2; - memset(tmp, ' ', banner_pad); - -#ifdef HAVE_AFFINITY - sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN - " (%s) " cPIN "[%s]" cBLU " {%d}", crash_mode ? cPIN "peruvian were-rabbit" : - cYEL "american fuzzy lop", use_banner, power_name, cpu_aff); -#else - sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN - " (%s) " cPIN "[%s]", crash_mode ? cPIN "peruvian were-rabbit" : - cYEL "american fuzzy lop", use_banner, power_name); -#endif /* HAVE_AFFINITY */ - - SAYF("\n%s\n", tmp); - - /* "Handy" shortcuts for drawing boxes... */ - -#define bSTG bSTART cGRA -#define bH2 bH bH -#define bH5 bH2 bH2 bH -#define bH10 bH5 bH5 -#define bH20 bH10 bH10 -#define bH30 bH20 bH10 -#define SP5 " " -#define SP10 SP5 SP5 -#define SP20 SP10 SP10 - - /* Lord, forgive me this. */ - - SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH bHB - bH bSTOP cCYA " overall results " bSTG bH2 bH2 bRT "\n"); - - if (dumb_mode) { - - strcpy(tmp, cRST); - - } else { - - u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60; - - /* First queue cycle: don't stop now! */ - if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else - - /* Subsequent cycles, but we're still making finds. */ - if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else - - /* No finds for a long time and no test cases to try. */ - if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120) - strcpy(tmp, cLGN); - - /* Default: cautiously OK to stop? */ - else strcpy(tmp, cLBL); - - } - - SAYF(bV bSTOP " run time : " cRST "%-33s " bSTG bV bSTOP - " cycles done : %s%-5s " bSTG bV "\n", - DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1)); - - /* We want to warn people about not seeing new paths after a full cycle, - except when resuming fuzzing or running in non-instrumented mode. */ - - if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 || - in_bitmap || crash_mode)) { - - SAYF(bV bSTOP " last new path : " cRST "%-33s ", - DTD(cur_ms, last_path_time)); - - } else { - - if (dumb_mode) - - SAYF(bV bSTOP " last new path : " cPIN "n/a" cRST - " (non-instrumented mode) "); - - else - - SAYF(bV bSTOP " last new path : " cRST "none yet " cLRD - "(odd, check syntax!) "); - - } - - SAYF(bSTG bV bSTOP " total paths : " cRST "%-5s " bSTG bV "\n", - DI(queued_paths)); - - /* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH - limit with a '+' appended to the count. */ - - sprintf(tmp, "%s%s", DI(unique_crashes), - (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : ""); - - SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP - " uniq crashes : %s%-6s" bSTG bV "\n", - DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, - tmp); - - sprintf(tmp, "%s%s", DI(unique_hangs), - (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); - - SAYF(bV bSTOP " last uniq hang : " cRST "%-33s " bSTG bV bSTOP - " uniq hangs : " cRST "%-6s" bSTG bV "\n", - DTD(cur_ms, last_hang_time), tmp); - - SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA - " map coverage " bSTG bH bHT bH20 bH2 bVL "\n"); - - /* This gets funny because we want to print several variable-length variables - together, but then cram them into a fixed-width field - so we need to - put them in a temporary buffer first. */ - - sprintf(tmp, "%s%s%u (%0.02f%%)", DI(current_entry), - queue_cur->favored ? "." : "*", queue_cur->fuzz_level, - ((double)current_entry * 100) / queued_paths); - - SAYF(bV bSTOP " now processing : " cRST "%-16s " bSTG bV bSTOP, tmp); - - sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) * - 100 / MAP_SIZE, t_byte_ratio); - - SAYF(" map density : %s%-21s" bSTG bV "\n", t_byte_ratio > 70 ? cLRD : - ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp); - - sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths), - ((double)cur_skipped_paths * 100) / queued_paths); - - SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp); - - sprintf(tmp, "%0.02f bits/tuple", - t_bytes ? (((double)t_bits) / t_bytes) : 0); - - SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp); - - SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA - " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n"); - - sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored), - ((double)queued_favored) * 100 / queued_paths); - - /* Yeah... it's still going on... halp? */ - - SAYF(bV bSTOP " now trying : " cRST "%-20s " bSTG bV bSTOP - " favored paths : " cRST "%-22s" bSTG bV "\n", stage_name, tmp); - - if (!stage_max) { - - sprintf(tmp, "%s/-", DI(stage_cur)); - - } else { - - sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max), - ((double)stage_cur) * 100 / stage_max); - - } - - SAYF(bV bSTOP " stage execs : " cRST "%-20s " bSTG bV bSTOP, tmp); - - sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov), - ((double)queued_with_cov) * 100 / queued_paths); - - SAYF(" new edges on : " cRST "%-22s" bSTG bV "\n", tmp); - - sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes), - (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : ""); - - if (crash_mode) { - - SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP - " new crashes : %s%-22s" bSTG bV "\n", DI(total_execs), - unique_crashes ? cLRD : cRST, tmp); - - } else { - - SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP - " total crashes : %s%-22s" bSTG bV "\n", DI(total_execs), - unique_crashes ? cLRD : cRST, tmp); - - } - - /* Show a warning about slow execution. */ - - if (avg_exec < 100) { - - sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ? - "zzzz..." : "slow!"); - - SAYF(bV bSTOP " exec speed : " cLRD "%-20s ", tmp); - - } else { - - sprintf(tmp, "%s/sec", DF(avg_exec)); - SAYF(bV bSTOP " exec speed : " cRST "%-20s ", tmp); - - } - - sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts), - (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); - - SAYF (bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp); - - /* Aaaalmost there... hold on! */ - - SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bHT bH10 - bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bVL "\n"); - - if (skip_deterministic) { - - strcpy(tmp, "n/a, n/a, n/a"); - - } else { - - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]), - DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]), - DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4])); - - } - - SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP " levels : " - cRST "%-10s" bSTG bV "\n", tmp, DI(max_depth)); - - if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]), - DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]), - DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32])); - - SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP " pending : " - cRST "%-10s" bSTG bV "\n", tmp, DI(pending_not_fuzzed)); - - if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]), - DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]), - DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32])); - - SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP " pend fav : " - cRST "%-10s" bSTG bV "\n", tmp, DI(pending_favored)); - - if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]), - DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]), - DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32])); - - SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP " own finds : " - cRST "%-10s" bSTG bV "\n", tmp, DI(queued_discovered)); - - if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]), - DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]), - DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO])); - - SAYF(bV bSTOP " dictionary : " cRST "%-36s " bSTG bV bSTOP - " imported : " cRST "%-10s" bSTG bV "\n", tmp, - sync_id ? DI(queued_imported) : (u8*)"n/a"); - - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]), - DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]), - DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON])); - - SAYF(bV bSTOP " havoc : " cRST "%-36s " bSTG bV bSTOP, tmp); - - if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio); - else strcpy(tmp, "n/a"); - - SAYF(" stability : %s%-10s" bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40) - ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20)) - ? cMGN : cRST), tmp); - - if (!bytes_trim_out) { - - sprintf(tmp, "n/a, "); - - } else { - - sprintf(tmp, "%0.02f%%/%s, ", - ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in, - DI(trim_execs)); - - } - - if (!blocks_eff_total) { - - u8 tmp2[128]; - - sprintf(tmp2, "n/a"); - strcat(tmp, tmp2); - - } else { - - u8 tmp2[128]; - - sprintf(tmp2, "%0.02f%%", - ((double)(blocks_eff_total - blocks_eff_select)) * 100 / - blocks_eff_total); - - strcat(tmp, tmp2); - - } - if (custom_mutator) { - sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), DI(stage_cycles[STAGE_CUSTOM_MUTATOR])); - SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n" - bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp); - } else { - SAYF(bV bSTOP " trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n" - bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, tmp); - } - - /* Provide some CPU utilization stats. */ - - if (cpu_core_count) { - - double cur_runnable = get_runnable_processes(); - u32 cur_utilization = cur_runnable * 100 / cpu_core_count; - - u8* cpu_color = cCYA; - - /* If we could still run one or more processes, use green. */ - - if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count) - cpu_color = cLGN; - - /* If we're clearly oversubscribed, use red. */ - - if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD; - -#ifdef HAVE_AFFINITY - - if (cpu_aff >= 0) { - - SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, - MIN(cpu_aff, 999), cpu_color, - MIN(cur_utilization, 999)); - - } else { - - SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, - cpu_color, MIN(cur_utilization, 999)); - - } - -#else - - SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, - cpu_color, MIN(cur_utilization, 999)); - -#endif /* ^HAVE_AFFINITY */ - - } else SAYF("\r"); - - /* Hallelujah! */ - - fflush(0); - -} - - -/* Display quick statistics at the end of processing the input directory, - plus a bunch of warnings. Some calibration stuff also ended up here, - along with several hardcoded constants. Maybe clean up eventually. */ - -static void show_init_stats(void) { - - struct queue_entry* q = queue; - u32 min_bits = 0, max_bits = 0; - u64 min_us = 0, max_us = 0; - u64 avg_us = 0; - u32 max_len = 0; - - if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles; - - while (q) { - - if (!min_us || q->exec_us < min_us) min_us = q->exec_us; - if (q->exec_us > max_us) max_us = q->exec_us; - - if (!min_bits || q->bitmap_size < min_bits) min_bits = q->bitmap_size; - if (q->bitmap_size > max_bits) max_bits = q->bitmap_size; - - if (q->len > max_len) max_len = q->len; - - q = q->next; - - } - - SAYF("\n"); - - if (avg_us > ((qemu_mode || unicorn_mode) ? 50000 : 10000)) - WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.txt.", - doc_path); - - /* Let's keep things moving with slow binaries. */ - - if (avg_us > 50000) havoc_div = 10; /* 0-19 execs/sec */ - else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec */ - else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */ - - if (!resuming_fuzz) { - - if (max_len > 50 * 1024) - WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.txt!", - DMS(max_len), doc_path); - else if (max_len > 10 * 1024) - WARNF("Some test cases are big (%s) - see %s/perf_tips.txt.", - DMS(max_len), doc_path); - - if (useless_at_start && !in_bitmap) - WARNF(cLRD "Some test cases look useless. Consider using a smaller set."); - - if (queued_paths > 100) - WARNF(cLRD "You probably have far too many input files! Consider trimming down."); - else if (queued_paths > 20) - WARNF("You have lots of input files; try starting small."); - - } - - OKF("Here are some useful stats:\n\n" - - cGRA " Test case count : " cRST "%u favored, %u variable, %u total\n" - cGRA " Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n" - cGRA " Exec timing : " cRST "%s to %s us (average: %s us)\n", - queued_favored, queued_variable, queued_paths, min_bits, max_bits, - ((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1), - DI(min_us), DI(max_us), DI(avg_us)); - - if (!timeout_given) { - - /* Figure out the appropriate timeout. The basic idea is: 5x average or - 1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second. - - If the program is slow, the multiplier is lowered to 2x or 3x, because - random scheduler jitter is less likely to have any impact, and because - our patience is wearing thin =) */ - - if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000; - else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000; - else exec_tmout = avg_us * 5 / 1000; - - exec_tmout = MAX(exec_tmout, max_us / 1000); - exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND; - - if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT; - - ACTF("No -t option specified, so I'll use exec timeout of %u ms.", - exec_tmout); - - timeout_given = 1; - - } else if (timeout_given == 3) { - - ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout); - - } - - /* In dumb mode, re-running every timing out test case with a generous time - limit is very expensive, so let's select a more conservative default. */ - - if (dumb_mode && !getenv("AFL_HANG_TMOUT")) - hang_tmout = MIN(EXEC_TIMEOUT, exec_tmout * 2 + 100); - - OKF("All set and ready to roll!"); - -} - - -#ifdef USE_PYTHON -static u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { - - static u8 tmp[64]; - static u8 clean_trace[MAP_SIZE]; - - u8 needs_write = 0, fault = 0; - u32 trim_exec = 0; - u32 orig_len = q->len; - - stage_name = tmp; - bytes_trim_in += q->len; - - /* Initialize trimming in the Python module */ - stage_cur = 0; - stage_max = init_trim_py(in_buf, q->len); - - if (not_on_tty && debug) - SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, q->len); - - while(stage_cur < stage_max) { - sprintf(tmp, "ptrim %s", DI(trim_exec)); - - u32 cksum; - - char* retbuf = NULL; - size_t retlen = 0; - - trim_py(&retbuf, &retlen); - - if (retlen > orig_len) - FATAL("Trimmed data returned by Python module is larger than original data"); - - write_to_testcase(retbuf, retlen); - - fault = run_target(argv, exec_tmout); - ++trim_execs; - - if (stop_soon || fault == FAULT_ERROR) goto abort_trimming; - - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - - if (cksum == q->exec_cksum) { - - q->len = retlen; - memcpy(in_buf, retbuf, retlen); - - /* Let's save a clean trace, which will be needed by - update_bitmap_score once we're done with the trimming stuff. */ - - if (!needs_write) { - - needs_write = 1; - memcpy(clean_trace, trace_bits, MAP_SIZE); - - } - - /* Tell the Python module that the trimming was successful */ - stage_cur = post_trim_py(1); - - if (not_on_tty && debug) - SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", stage_cur, stage_max, q->len); - } else { - /* Tell the Python module that the trimming was unsuccessful */ - stage_cur = post_trim_py(0); - if (not_on_tty && debug) - SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, stage_max); - } - - /* Since this can be slow, update the screen every now and then. */ - - if (!(trim_exec++ % stats_update_freq)) show_stats(); - } - - if (not_on_tty && debug) - SAYF("[Python Trimming] DONE: %u bytes -> %u bytes", orig_len, q->len); - - /* If we have made changes to in_buf, we also need to update the on-disk - version of the test case. */ - - if (needs_write) { - - s32 fd; - - unlink(q->fname); /* ignore errors */ - - fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); - - if (fd < 0) PFATAL("Unable to create '%s'", q->fname); - - ck_write(fd, in_buf, q->len, q->fname); - close(fd); - - memcpy(trace_bits, clean_trace, MAP_SIZE); - update_bitmap_score(q); - - } - - - -abort_trimming: - - bytes_trim_out += q->len; - return fault; - -} -#endif - -/* Trim all new test cases to save cycles when doing deterministic checks. The - trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of - file size, to keep the stage short and sweet. */ - -static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { - -#ifdef USE_PYTHON - if (py_functions[PY_FUNC_TRIM]) - return trim_case_python(argv, q, in_buf); -#endif - - static u8 tmp[64]; - static u8 clean_trace[MAP_SIZE]; - - u8 needs_write = 0, fault = 0; - u32 trim_exec = 0; - u32 remove_len; - u32 len_p2; - - /* Although the trimmer will be less useful when variable behavior is - detected, it will still work to some extent, so we don't check for - this. */ - - if (q->len < 5) return 0; - - stage_name = tmp; - bytes_trim_in += q->len; - - /* Select initial chunk len, starting with large steps. */ - - len_p2 = next_p2(q->len); - - remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES); - - /* Continue until the number of steps gets too high or the stepover - gets too small. */ - - while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) { - - u32 remove_pos = remove_len; - - sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len)); - - stage_cur = 0; - stage_max = q->len / remove_len; - - while (remove_pos < q->len) { - - u32 trim_avail = MIN(remove_len, q->len - remove_pos); - u32 cksum; - - write_with_gap(in_buf, q->len, remove_pos, trim_avail); - - fault = run_target(argv, exec_tmout); - ++trim_execs; - - if (stop_soon || fault == FAULT_ERROR) goto abort_trimming; - - /* Note that we don't keep track of crashes or hangs here; maybe TODO? */ - - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - - /* If the deletion had no impact on the trace, make it permanent. This - isn't perfect for variable-path inputs, but we're just making a - best-effort pass, so it's not a big deal if we end up with false - negatives every now and then. */ - - if (cksum == q->exec_cksum) { - - u32 move_tail = q->len - remove_pos - trim_avail; - - q->len -= trim_avail; - len_p2 = next_p2(q->len); - - memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, - move_tail); - - /* Let's save a clean trace, which will be needed by - update_bitmap_score once we're done with the trimming stuff. */ - - if (!needs_write) { - - needs_write = 1; - memcpy(clean_trace, trace_bits, MAP_SIZE); - - } - - } else remove_pos += remove_len; - - /* Since this can be slow, update the screen every now and then. */ - - if (!(trim_exec++ % stats_update_freq)) show_stats(); - ++stage_cur; - - } - - remove_len >>= 1; - - } - - /* If we have made changes to in_buf, we also need to update the on-disk - version of the test case. */ - - if (needs_write) { - - s32 fd; - - unlink(q->fname); /* ignore errors */ - - fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); - - if (fd < 0) PFATAL("Unable to create '%s'", q->fname); - - ck_write(fd, in_buf, q->len, q->fname); - close(fd); - - memcpy(trace_bits, clean_trace, MAP_SIZE); - update_bitmap_score(q); - - } - -abort_trimming: - - bytes_trim_out += q->len; - return fault; - -} - - -/* Write a modified test case, run program, process results. Handle - error conditions, returning 1 if it's time to bail out. This is - a helper function for fuzz_one(). */ - -u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) { - - u8 fault; - - if (post_handler) { - - out_buf = post_handler(out_buf, &len); - if (!out_buf || !len) return 0; - - } - - write_to_testcase(out_buf, len); - - fault = run_target(argv, exec_tmout); - - if (stop_soon) return 1; - - if (fault == FAULT_TMOUT) { - - if (subseq_tmouts++ > TMOUT_LIMIT) { - ++cur_skipped_paths; - return 1; - } - - } else subseq_tmouts = 0; - - /* Users can hit us with SIGUSR1 to request the current input - to be abandoned. */ - - if (skip_requested) { - - skip_requested = 0; - ++cur_skipped_paths; - return 1; - - } - - /* This handles FAULT_ERROR for us: */ - - queued_discovered += save_if_interesting(argv, out_buf, len, fault); - - if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max) - show_stats(); - - return 0; - -} - - -/* Helper to choose random block len for block operations in fuzz_one(). - Doesn't return zero, provided that max_len is > 0. */ - -static u32 choose_block_len(u32 limit) { - - u32 min_value, max_value; - u32 rlim = MIN(queue_cycle, 3); - - if (!run_over10m) rlim = 1; - - switch (UR(rlim)) { - - case 0: min_value = 1; - max_value = HAVOC_BLK_SMALL; - break; - - case 1: min_value = HAVOC_BLK_SMALL; - max_value = HAVOC_BLK_MEDIUM; - break; - - default: - - if (UR(10)) { - - min_value = HAVOC_BLK_MEDIUM; - max_value = HAVOC_BLK_LARGE; - - } else { - - min_value = HAVOC_BLK_LARGE; - max_value = HAVOC_BLK_XL; - - } - - } - - if (min_value >= limit) min_value = 1; - - return min_value + UR(MIN(max_value, limit) - min_value + 1); - -} - - -/* Calculate case desirability score to adjust the length of havoc fuzzing. - A helper function for fuzz_one(). Maybe some of these constants should - go into config.h. */ - -static u32 calculate_score(struct queue_entry* q) { - - u32 avg_exec_us = total_cal_us / total_cal_cycles; - u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries; - u32 perf_score = 100; - - /* Adjust score based on execution speed of this path, compared to the - global average. Multiplier ranges from 0.1x to 3x. Fast inputs are - less expensive to fuzz, so we're giving them more air time. */ - - // TODO BUG FIXME: is this really a good idea? - // This sounds like looking for lost keys under a street light just because - // the light is better there. - // Longer execution time means longer work on the input, the deeper in - // coverage, the better the fuzzing, right? -mh - - if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10; - else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25; - else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50; - else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75; - else if (q->exec_us * 4 < avg_exec_us) perf_score = 300; - else if (q->exec_us * 3 < avg_exec_us) perf_score = 200; - else if (q->exec_us * 2 < avg_exec_us) perf_score = 150; - - /* Adjust score based on bitmap size. The working theory is that better - coverage translates to better targets. Multiplier from 0.25x to 3x. */ - - if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3; - else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2; - else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5; - else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25; - else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5; - else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75; - - /* Adjust score based on handicap. Handicap is proportional to how late - in the game we learned about this path. Latecomers are allowed to run - for a bit longer until they catch up with the rest. */ - - if (q->handicap >= 4) { - perf_score *= 4; - q->handicap -= 4; - } else if (q->handicap) { - perf_score *= 2; - --q->handicap; - } - - /* Final adjustment based on input depth, under the assumption that fuzzing - deeper test cases is more likely to reveal stuff that can't be - discovered with traditional fuzzers. */ - - switch (q->depth) { - - case 0 ... 3: break; - case 4 ... 7: perf_score *= 2; break; - case 8 ... 13: perf_score *= 3; break; - case 14 ... 25: perf_score *= 4; break; - default: perf_score *= 5; - - } - - u64 fuzz = q->n_fuzz; - u64 fuzz_total; - - u32 n_paths, fuzz_mu; - u32 factor = 1; - - switch (schedule) { - - case EXPLORE: - break; - - case EXPLOIT: - factor = MAX_FACTOR; - break; - - case COE: - fuzz_total = 0; - n_paths = 0; - - struct queue_entry *queue_it = queue; - while (queue_it) { - fuzz_total += queue_it->n_fuzz; - n_paths ++; - queue_it = queue_it->next; - } - - fuzz_mu = fuzz_total / n_paths; - if (fuzz <= fuzz_mu) { - if (q->fuzz_level < 16) - factor = ((u32) (1 << q->fuzz_level)); - else - factor = MAX_FACTOR; - } else { - factor = 0; - } - break; - - case FAST: - if (q->fuzz_level < 16) { - factor = ((u32) (1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz); - } else - factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2 (fuzz)); - break; - - case LIN: - factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); - break; - - case QUAD: - factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz); - break; - - default: - PFATAL ("Unknown Power Schedule"); - } - if (factor > MAX_FACTOR) - factor = MAX_FACTOR; - - perf_score *= factor / POWER_BETA; - - // MOpt mode - if (limit_time_sig != 0 && max_depth - q->depth < 3) perf_score *= 2; - else if (perf_score < 1) perf_score = 1; // Add a lower bound to AFLFast's energy assignment strategies - - /* Make sure that we don't go over limit. */ - - if (perf_score > havoc_max_mult * 100) perf_score = havoc_max_mult * 100; - - return perf_score; - -} - - -/* Helper function to see if a particular change (xor_val = old ^ new) could - be a product of deterministic bit flips with the lengths and stepovers - attempted by afl-fuzz. This is used to avoid dupes in some of the - deterministic fuzzing operations that follow bit flips. We also - return 1 if xor_val is zero, which implies that the old and attempted new - values are identical and the exec would be a waste of time. */ - -static u8 could_be_bitflip(u32 xor_val) { - - u32 sh = 0; - - if (!xor_val) return 1; - - /* Shift left until first bit set. */ - - while (!(xor_val & 1)) { ++sh; xor_val >>= 1; } - - /* 1-, 2-, and 4-bit patterns are OK anywhere. */ - - if (xor_val == 1 || xor_val == 3 || xor_val == 15) return 1; - - /* 8-, 16-, and 32-bit patterns are OK only if shift factor is - divisible by 8, since that's the stepover for these ops. */ - - if (sh & 7) return 0; - - if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) - return 1; - - return 0; - -} - - -/* Helper function to see if a particular value is reachable through - arithmetic operations. Used for similar purposes. */ - -static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { - - u32 i, ov = 0, nv = 0, diffs = 0; - - if (old_val == new_val) return 1; - - /* See if one-byte adjustments to any byte could produce this result. */ - - for (i = 0; i < blen; ++i) { - - u8 a = old_val >> (8 * i), - b = new_val >> (8 * i); - - if (a != b) { ++diffs; ov = a; nv = b; } - - } - - /* If only one byte differs and the values are within range, return 1. */ - - if (diffs == 1) { - - if ((u8)(ov - nv) <= ARITH_MAX || - (u8)(nv - ov) <= ARITH_MAX) return 1; - - } - - if (blen == 1) return 0; - - /* See if two-byte adjustments to any byte would produce this result. */ - - diffs = 0; - - for (i = 0; i < blen / 2; ++i) { - - u16 a = old_val >> (16 * i), - b = new_val >> (16 * i); - - if (a != b) { ++diffs; ov = a; nv = b; } - - } - - /* If only one word differs and the values are within range, return 1. */ - - if (diffs == 1) { - - if ((u16)(ov - nv) <= ARITH_MAX || - (u16)(nv - ov) <= ARITH_MAX) return 1; - - ov = SWAP16(ov); nv = SWAP16(nv); - - if ((u16)(ov - nv) <= ARITH_MAX || - (u16)(nv - ov) <= ARITH_MAX) return 1; - - } - - /* Finally, let's do the same thing for dwords. */ - - if (blen == 4) { - - if ((u32)(old_val - new_val) <= ARITH_MAX || - (u32)(new_val - old_val) <= ARITH_MAX) return 1; - - new_val = SWAP32(new_val); - old_val = SWAP32(old_val); - - if ((u32)(old_val - new_val) <= ARITH_MAX || - (u32)(new_val - old_val) <= ARITH_MAX) return 1; - - } - - return 0; - -} - - -/* Last but not least, a similar helper to see if insertion of an - interesting integer is redundant given the insertions done for - shorter blen. The last param (check_le) is set if the caller - already executed LE insertion for current blen and wants to see - if BE variant passed in new_val is unique. */ - -static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { - - u32 i, j; - - if (old_val == new_val) return 1; - - /* See if one-byte insertions from interesting_8 over old_val could - produce new_val. */ - - for (i = 0; i < blen; ++i) { - - for (j = 0; j < sizeof(interesting_8); ++j) { - - u32 tval = (old_val & ~(0xff << (i * 8))) | - (((u8)interesting_8[j]) << (i * 8)); - - if (new_val == tval) return 1; - - } - - } - - /* Bail out unless we're also asked to examine two-byte LE insertions - as a preparation for BE attempts. */ - - if (blen == 2 && !check_le) return 0; - - /* See if two-byte insertions over old_val could give us new_val. */ - - for (i = 0; i < blen - 1; ++i) { - - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - - u32 tval = (old_val & ~(0xffff << (i * 8))) | - (((u16)interesting_16[j]) << (i * 8)); - - if (new_val == tval) return 1; - - /* Continue here only if blen > 2. */ - - if (blen > 2) { - - tval = (old_val & ~(0xffff << (i * 8))) | - (SWAP16(interesting_16[j]) << (i * 8)); - - if (new_val == tval) return 1; - - } - - } - - } - - if (blen == 4 && check_le) { - - /* See if four-byte insertions could produce the same result - (LE only). */ - - for (j = 0; j < sizeof(interesting_32) / 4; ++j) - if (new_val == (u32)interesting_32[j]) return 1; - - } - - return 0; - -} - - -/* Take the current entry from the queue, fuzz it for a while. This - function is a tad too long... returns 0 if fuzzed successfully, 1 if - skipped or bailed out. */ - -static u8 fuzz_one_original(char** argv) { - - s32 len, fd, temp_len, i, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt; - u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - - u8 ret_val = 1, doing_det = 0; - - u8 a_collect[MAX_AUTO_EXTRA]; - u32 a_len = 0; - -#ifdef IGNORE_FINDS - - /* In IGNORE_FINDS mode, skip any entries that weren't in the - initial data set. */ - - if (queue_cur->depth > 1) return 1; - -#else - - if (pending_favored) { - - /* If we have any favored, non-fuzzed new arrivals in the queue, - possibly skip to them at the expense of already-fuzzed or non-favored - cases. */ - - if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; - - } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { - - /* Otherwise, still possibly skip non-favored cases, albeit less often. - The odds of skipping stuff are higher for already-fuzzed inputs and - lower for never-fuzzed entries. */ - - if (queue_cycle > 1 && (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) { - - if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; - - } else { - - if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; - - } - - } - -#endif /* ^IGNORE_FINDS */ - - if (not_on_tty) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - current_entry, queued_paths, unique_crashes); - fflush(stdout); - } - - /* Map the test case into memory. */ - - fd = open(queue_cur->fname, O_RDONLY); - - if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); - - len = queue_cur->len; - - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len); - - close(fd); - - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ - - out_buf = ck_alloc_nozero(len); - - subseq_tmouts = 0; - - cur_depth = queue_cur->depth; - - /******************************************* - * CALIBRATION (only if failed earlier on) * - *******************************************/ - - if (queue_cur->cal_failed) { - - u8 res = FAULT_TMOUT; - - if (queue_cur->cal_failed < CAL_CHANCES) { - - res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); - - } - - if (stop_soon || res != crash_mode) { - ++cur_skipped_paths; - goto abandon_entry; - } - - } - - /************ - * TRIMMING * - ************/ - - if (!dumb_mode && !queue_cur->trim_done && !custom_mutator) { - - u8 res = trim_case(argv, queue_cur, in_buf); - - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); - - if (stop_soon) { - ++cur_skipped_paths; - goto abandon_entry; - } - - /* Don't retry trimming, even if it failed. */ - - queue_cur->trim_done = 1; - - len = queue_cur->len; - - } - - memcpy(out_buf, in_buf, len); - - /********************* - * PERFORMANCE SCORE * - *********************/ - - orig_perf = perf_score = calculate_score(queue_cur); - - if (perf_score == 0) goto abandon_entry; - - if (custom_mutator) { - stage_short = "custom"; - stage_name = "custom mutator"; - stage_max = len << 3; - stage_val_type = STAGE_VAL_NONE; - - const u32 max_seed_size = 4096*4096; - u8* mutated_buf = ck_alloc(max_seed_size); - - orig_hit_cnt = queued_paths + unique_crashes; - - for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) { - size_t orig_size = (size_t) len; - size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX)); - if (mutated_size > 0) { - out_buf = ck_realloc(out_buf, mutated_size); - memcpy(out_buf, mutated_buf, mutated_size); - if (common_fuzz_stuff(argv, out_buf, (u32) mutated_size)) { - goto abandon_entry; - } - } - } - - ck_free(mutated_buf); - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max; - goto abandon_entry; - } - - - /* Skip right away if -d is given, if it has not been chosen sufficiently - often to warrant the expensive deterministic stage (fuzz_level), or - if it has gone through deterministic testing in earlier, resumed runs - (passed_det). */ - - if (skip_deterministic - || ((!queue_cur->passed_det) - && perf_score < ( - queue_cur->depth * 30 <= havoc_max_mult * 100 - ? queue_cur->depth * 30 - : havoc_max_mult * 100)) - || queue_cur->passed_det) -#ifdef USE_PYTHON - goto python_stage; -#else - goto havoc_stage; -#endif - - /* Skip deterministic fuzzing if exec path checksum puts this out of scope - for this master instance. */ - - if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) -#ifdef USE_PYTHON - goto python_stage; -#else - goto havoc_stage; -#endif - - doing_det = 1; - - /********************************************* - * SIMPLE BITFLIP (+dictionary construction) * - *********************************************/ - -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ - } while (0) - - /* Single walking bit. */ - - stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; - - stage_val_type = STAGE_VAL_NONE; - - orig_hit_cnt = queued_paths + unique_crashes; - - prev_cksum = queue_cur->exec_cksum; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur >> 3; - - FLIP_BIT(out_buf, stage_cur); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - FLIP_BIT(out_buf, stage_cur); - - /* While flipping the least significant bit in every byte, pull of an extra - trick to detect possible syntax tokens. In essence, the idea is that if - you have a binary blob like this: - - xxxxxxxxIHDRxxxxxxxx - - ...and changing the leading and trailing bytes causes variable or no - changes in program flow, but touching any character in the "IHDR" string - always produces the same, distinctive path, it's highly likely that - "IHDR" is an atomically-checked magic value of special significance to - the fuzzed format. - - We do this here, rather than as a separate stage, because it's a nice - way to keep the operation approximately "free" (i.e., no extra execs). - - Empirically, performing the check when flipping the least significant bit - is advantageous, compared to doing it at the time of more disruptive - changes, where the program flow may be affected in more violent ways. - - The caveat is that we won't generate dictionaries in the -d mode or -S - mode - but that's probably a fair trade-off. - - This won't work particularly well with paths that exhibit variable - behavior, but fails gracefully, so we'll carry out the checks anyway. - - */ - - if (!dumb_mode && (stage_cur & 7) == 7) { - - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - - if (stage_cur == stage_max - 1 && cksum == prev_cksum) { - - /* If at end of file and we are still collecting a string, grab the - final character and force output. */ - - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; - - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); - - } else if (cksum != prev_cksum) { - - /* Otherwise, if the checksum has changed, see if we have something - worthwhile queued up, and collect that if the answer is yes. */ - - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); - - a_len = 0; - prev_cksum = cksum; - - } - - /* Continue collecting string, but only if the bit flip actually made - any difference - we don't want no-op tokens. */ - - if (cksum != queue_cur->exec_cksum) { - - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; - - } - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP1] += stage_max; - - /* Two walking bits. */ - - stage_name = "bitflip 2/1"; - stage_short = "flip2"; - stage_max = (len << 3) - 1; - - orig_hit_cnt = new_hit_cnt; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur >> 3; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP2] += stage_max; - - /* Four walking bits. */ - - stage_name = "bitflip 4/1"; - stage_short = "flip4"; - stage_max = (len << 3) - 3; - - orig_hit_cnt = new_hit_cnt; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur >> 3; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP4] += stage_max; - - /* Effector map setup. These macros calculate: - - EFF_APOS - position of a particular file offset in the map. - EFF_ALEN - length of a map with a particular number of bytes. - EFF_SPAN_ALEN - map span for a sequence of bytes. - - */ - -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) - - /* Initialize effector map for the next step (see comments below). Always - flag first and last byte as doing something. */ - - eff_map = ck_alloc(EFF_ALEN(len)); - eff_map[0] = 1; - - if (EFF_APOS(len - 1) != 0) { - eff_map[EFF_APOS(len - 1)] = 1; - ++eff_cnt; - } - - /* Walking byte. */ - - stage_name = "bitflip 8/8"; - stage_short = "flip8"; - stage_max = len; - - orig_hit_cnt = new_hit_cnt; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur; - - out_buf[stage_cur] ^= 0xFF; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - /* We also use this stage to pull off a simple trick: we identify - bytes that seem to have no effect on the current execution path - even when fully flipped - and we skip them during more expensive - deterministic stages, such as arithmetics or known ints. */ - - if (!eff_map[EFF_APOS(stage_cur)]) { - - u32 cksum; - - /* If in dumb mode or if the file is very short, just flag everything - without wasting time on checksums. */ - - if (!dumb_mode && len >= EFF_MIN_LEN) - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - else - cksum = ~queue_cur->exec_cksum; - - if (cksum != queue_cur->exec_cksum) { - eff_map[EFF_APOS(stage_cur)] = 1; - ++eff_cnt; - } - - } - - out_buf[stage_cur] ^= 0xFF; - - } - - /* If the effector map is more than EFF_MAX_PERC dense, just flag the - whole thing as worth fuzzing, since we wouldn't be saving much time - anyway. */ - - if (eff_cnt != EFF_ALEN(len) && - eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { - - memset(eff_map, 1, EFF_ALEN(len)); - - blocks_eff_select += EFF_ALEN(len); - - } else { - - blocks_eff_select += eff_cnt; - - } - - blocks_eff_total += EFF_ALEN(len); - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP8] += stage_max; - - /* Two walking bytes. */ - - if (len < 2) goto skip_bitflip; - - stage_name = "bitflip 16/8"; - stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 1; ++i) { - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - --stage_max; - continue; - } - - stage_cur_byte = i; - - *(u16*)(out_buf + i) ^= 0xFFFF; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - *(u16*)(out_buf + i) ^= 0xFFFF; - - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP16] += stage_max; - - if (len < 4) goto skip_bitflip; - - /* Four walking bytes. */ - - stage_name = "bitflip 32/8"; - stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 3; ++i) { - - /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - --stage_max; - continue; - } - - stage_cur_byte = i; - - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP32] += stage_max; - -skip_bitflip: - - if (no_arith) goto skip_arith; - - /********************** - * ARITHMETIC INC/DEC * - **********************/ - - /* 8-bit arithmetics. */ - - stage_name = "arith 8/8"; - stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; - - stage_val_type = STAGE_VAL_LE; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len; ++i) { - - u8 orig = out_buf[i]; - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)]) { - stage_max -= 2 * ARITH_MAX; - continue; - } - - stage_cur_byte = i; - - for (j = 1; j <= ARITH_MAX; ++j) { - - u8 r = orig ^ (orig + j); - - /* Do arithmetic operations only if the result couldn't be a product - of a bitflip. */ - - if (!could_be_bitflip(r)) { - - stage_cur_val = j; - out_buf[i] = orig + j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - r = orig ^ (orig - j); - - if (!could_be_bitflip(r)) { - - stage_cur_val = -j; - out_buf[i] = orig - j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - out_buf[i] = orig; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH8] += stage_max; - - /* 16-bit arithmetics, both endians. */ - - if (len < 2) goto skip_arith; - - stage_name = "arith 16/8"; - stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 1; ++i) { - - u16 orig = *(u16*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } - - stage_cur_byte = i; - - for (j = 1; j <= ARITH_MAX; ++j) { - - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP16(SWAP16(orig) + j), - r4 = orig ^ SWAP16(SWAP16(orig) - j); - - /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the - & 0xff overflow checks) and if it couldn't be a product of - a bitflip. */ - - stage_val_type = STAGE_VAL_LE; - - if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { - - stage_cur_val = j; - *(u16*)(out_buf + i) = orig + j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((orig & 0xff) < j && !could_be_bitflip(r2)) { - - stage_cur_val = -j; - *(u16*)(out_buf + i) = orig - j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - /* Big endian comes next. Same deal. */ - - stage_val_type = STAGE_VAL_BE; - - - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { - - stage_cur_val = j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((orig >> 8) < j && !could_be_bitflip(r4)) { - - stage_cur_val = -j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - *(u16*)(out_buf + i) = orig; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH16] += stage_max; - - /* 32-bit arithmetics, both endians. */ - - if (len < 4) goto skip_arith; - - stage_name = "arith 32/8"; - stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 3; ++i) { - - u32 orig = *(u32*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } - - stage_cur_byte = i; - - for (j = 1; j <= ARITH_MAX; ++j) { - - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP32(SWAP32(orig) + j), - r4 = orig ^ SWAP32(SWAP32(orig) - j); - - /* Little endian first. Same deal as with 16-bit: we only want to - try if the operation would have effect on more than two bytes. */ - - stage_val_type = STAGE_VAL_LE; - - if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { - - stage_cur_val = j; - *(u32*)(out_buf + i) = orig + j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { - - stage_cur_val = -j; - *(u32*)(out_buf + i) = orig - j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - /* Big endian next. */ - - stage_val_type = STAGE_VAL_BE; - - if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { - - stage_cur_val = j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { - - stage_cur_val = -j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - *(u32*)(out_buf + i) = orig; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH32] += stage_max; - -skip_arith: - - /********************** - * INTERESTING VALUES * - **********************/ - - stage_name = "interest 8/8"; - stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); - - stage_val_type = STAGE_VAL_LE; - - orig_hit_cnt = new_hit_cnt; - - /* Setting 8-bit integers. */ - - for (i = 0; i < len; ++i) { - - u8 orig = out_buf[i]; - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)]) { - stage_max -= sizeof(interesting_8); - continue; - } - - stage_cur_byte = i; - - for (j = 0; j < sizeof(interesting_8); ++j) { - - /* Skip if the value could be a product of bitflips or arithmetics. */ - - if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || - could_be_arith(orig, (u8)interesting_8[j], 1)) { - --stage_max; - continue; - } - - stage_cur_val = interesting_8[j]; - out_buf[i] = interesting_8[j]; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - out_buf[i] = orig; - ++stage_cur; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST8] += stage_max; - - /* Setting 16-bit integers, both endians. */ - - if (no_arith || len < 2) goto skip_interest; - - stage_name = "interest 16/8"; - stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 1; ++i) { - - u16 orig = *(u16*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= sizeof(interesting_16); - continue; - } - - stage_cur_byte = i; - - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - - stage_cur_val = interesting_16[j]; - - /* Skip if this could be a product of a bitflip, arithmetics, - or single-byte interesting value insertion. */ - - if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && - !could_be_arith(orig, (u16)interesting_16[j], 2) && - !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { - - stage_val_type = STAGE_VAL_LE; - - *(u16*)(out_buf + i) = interesting_16[j]; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && - !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && - !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && - !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { - - stage_val_type = STAGE_VAL_BE; - - *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - } - - *(u16*)(out_buf + i) = orig; - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST16] += stage_max; - - if (len < 4) goto skip_interest; - - /* Setting 32-bit integers, both endians. */ - - stage_name = "interest 32/8"; - stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 3; i++) { - - u32 orig = *(u32*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= sizeof(interesting_32) >> 1; - continue; - } - - stage_cur_byte = i; - - for (j = 0; j < sizeof(interesting_32) / 4; ++j) { - - stage_cur_val = interesting_32[j]; - - /* Skip if this could be a product of a bitflip, arithmetics, - or word interesting value insertion. */ - - if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && - !could_be_arith(orig, interesting_32[j], 4) && - !could_be_interest(orig, interesting_32[j], 4, 0)) { - - stage_val_type = STAGE_VAL_LE; - - *(u32*)(out_buf + i) = interesting_32[j]; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && - !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && - !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && - !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { - - stage_val_type = STAGE_VAL_BE; - - *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - } - - *(u32*)(out_buf + i) = orig; - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST32] += stage_max; - -skip_interest: - - /******************** - * DICTIONARY STUFF * - ********************/ - - if (!extras_cnt) goto skip_user_extras; - - /* Overwrite with user-supplied extras. */ - - stage_name = "user extras (over)"; - stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; - - stage_val_type = STAGE_VAL_NONE; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len; ++i) { - - u32 last_len = 0; - - stage_cur_byte = i; - - /* Extras are sorted by size, from smallest to largest. This means - that we don't have to worry about restoring the buffer in - between writes at a particular offset determined by the outer - loop. */ - - for (j = 0; j < extras_cnt; ++j) { - - /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token - is redundant, or if its entire span has no bytes set in the effector - map. */ - - if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || - extras[j].len > len - i || - !memcmp(extras[j].data, out_buf + i, extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { - - --stage_max; - continue; - - } - - last_len = extras[j].len; - memcpy(out_buf + i, extras[j].data, last_len); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - ++stage_cur; - - } - - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UO] += stage_max; - - /* Insertion of user-supplied extras. */ - - stage_name = "user extras (insert)"; - stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; - - orig_hit_cnt = new_hit_cnt; - - ex_tmp = ck_alloc(len + MAX_DICT_FILE); - - for (i = 0; i <= len; ++i) { - - stage_cur_byte = i; - - for (j = 0; j < extras_cnt; ++j) { - - if (len + extras[j].len > MAX_FILE) { - --stage_max; - continue; - } - - /* Insert token */ - memcpy(ex_tmp + i, extras[j].data, extras[j].len); - - /* Copy tail */ - memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); - - if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - ck_free(ex_tmp); - goto abandon_entry; - } - - ++stage_cur; - - } - - /* Copy head */ - ex_tmp[i] = out_buf[i]; - - } - - ck_free(ex_tmp); - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UI] += stage_max; - -skip_user_extras: - - if (!a_extras_cnt) goto skip_extras; - - stage_name = "auto extras (over)"; - stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; - - stage_val_type = STAGE_VAL_NONE; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len; ++i) { - - u32 last_len = 0; - - stage_cur_byte = i; - - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { - - /* See the comment in the earlier code; extras are sorted by size. */ - - if (a_extras[j].len > len - i || - !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { - - --stage_max; - continue; - - } - - last_len = a_extras[j].len; - memcpy(out_buf + i, a_extras[j].data, last_len); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - ++stage_cur; - - } - - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_AO] += stage_max; - -skip_extras: - - /* If we made this to here without jumping to havoc_stage or abandon_entry, - we're properly done with deterministic steps and can mark it as such - in the .state/ directory. */ - - if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - -#ifdef USE_PYTHON -python_stage: - /********************************** - * EXTERNAL MUTATORS (Python API) * - **********************************/ - - if (!py_module) goto havoc_stage; - - stage_name = "python"; - stage_short = "python"; - stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100; - - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - - orig_hit_cnt = queued_paths + unique_crashes; - - char* retbuf = NULL; - size_t retlen = 0; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - struct queue_entry* target; - u32 tid; - u8* new_buf; - -retry_external_pick: - /* Pick a random other queue entry for passing to external API */ - do { tid = UR(queued_paths); } while (tid == current_entry && queued_paths > 1); - - target = queue; - - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; - - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) { - target = target->next; - ++splicing_with; - } - - if (!target) goto retry_external_pick; - - /* Read the additional testcase into a new buffer. */ - fd = open(target->fname, O_RDONLY); - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - new_buf = ck_alloc_nozero(target->len); - ck_read(fd, new_buf, target->len, target->fname); - close(fd); - - fuzz_py(out_buf, len, new_buf, target->len, &retbuf, &retlen); - - ck_free(new_buf); - - if (retbuf) { - if (!retlen) - goto abandon_entry; - - if (common_fuzz_stuff(argv, retbuf, retlen)) { - free(retbuf); - goto abandon_entry; - } - - /* Reset retbuf/retlen */ - free(retbuf); - retbuf = NULL; - retlen = 0; - - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ - - if (queued_paths != havoc_queued) { - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } - - havoc_queued = queued_paths; - } - } - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_PYTHON] += stage_max; - - if (python_only) { - /* Skip other stages */ - ret_val = 0; - goto abandon_entry; - } -#endif - - /**************** - * RANDOM HAVOC * - ****************/ - -havoc_stage: - - stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - - stage_name = "havoc"; - stage_short = "havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; - - } else { - - static u8 tmp[32]; - - perf_score = orig_perf; - - sprintf(tmp, "splice %u", splice_cycle); - stage_name = tmp; - stage_short = "splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - - } - - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - - temp_len = len; - - orig_hit_cnt = queued_paths + unique_crashes; - - havoc_queued = queued_paths; - - /* We essentially just do several thousand runs (depending on perf_score) - where we take the input file and make random stacked tweaks. */ - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - - stage_cur_val = use_stacking; - - for (i = 0; i < use_stacking; ++i) { - - switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) { - - case 0: - - /* Flip a single bit somewhere. Spooky! */ - - FLIP_BIT(out_buf, UR(temp_len << 3)); - break; - - case 1: - - /* Set byte to interesting value. */ - - out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; - break; - - case 2: - - /* Set word to interesting value, randomly choosing endian. */ - - if (temp_len < 2) break; - - if (UR(2)) { - - *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; - - } else { - - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); - - } - - break; - - case 3: - - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 4) break; - - if (UR(2)) { - - *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; - - } else { - - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); - - } - - break; - - case 4: - - /* Randomly subtract from byte. */ - - out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); - break; - - case 5: - - /* Randomly add to byte. */ - - out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); - break; - - case 6: - - /* Randomly subtract from word, random endian. */ - - if (temp_len < 2) break; - - if (UR(2)) { - - u32 pos = UR(temp_len - 1); - - *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - - } else { - - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - - } - - break; - - case 7: - - /* Randomly add to word, random endian. */ - - if (temp_len < 2) break; - - if (UR(2)) { - - u32 pos = UR(temp_len - 1); - - *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - - } else { - - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - - } - - break; - - case 8: - - /* Randomly subtract from dword, random endian. */ - - if (temp_len < 4) break; - - if (UR(2)) { - - u32 pos = UR(temp_len - 3); - - *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - - } else { - - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - - } - - break; - - case 9: - - /* Randomly add to dword, random endian. */ - - if (temp_len < 4) break; - - if (UR(2)) { - - u32 pos = UR(temp_len - 3); - - *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - - } else { - - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - - } - - break; - - case 10: - - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ - - out_buf[UR(temp_len)] ^= 1 + UR(255); - break; - - case 11 ... 12: { - - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ - - u32 del_from, del_len; - - if (temp_len < 2) break; - - /* Don't delete too much. */ - - del_len = choose_block_len(temp_len - 1); - - del_from = UR(temp_len - del_len + 1); - - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); - - temp_len -= del_len; - - break; - - } - - case 13: - - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ - - u8 actually_clone = UR(4); - u32 clone_from, clone_to, clone_len; - u8* new_buf; - - if (actually_clone) { - - clone_len = choose_block_len(temp_len); - clone_from = UR(temp_len - clone_len + 1); - - } else { - - clone_len = choose_block_len(HAVOC_BLK_XL); - clone_from = 0; - - } - - clone_to = UR(temp_len); - - new_buf = ck_alloc_nozero(temp_len + clone_len); - - /* Head */ - - memcpy(new_buf, out_buf, clone_to); - - /* Inserted part */ - - if (actually_clone) - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - else - memset(new_buf + clone_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); - - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); - - ck_free(out_buf); - out_buf = new_buf; - temp_len += clone_len; - - } - - break; - - case 14: { - - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ - - u32 copy_from, copy_to, copy_len; - - if (temp_len < 2) break; - - copy_len = choose_block_len(temp_len - 1); - - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); - - if (UR(4)) { - - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - - } else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); - - break; - - } - - /* Values 15 and 16 can be selected only if there are any extras - present in the dictionaries. */ - - case 15: { - - /* Overwrite bytes with an extra. */ - - if (!extras_cnt || (a_extras_cnt && UR(2))) { - - /* No user-specified extras or odds in our favor. Let's use an - auto-detected one. */ - - u32 use_extra = UR(a_extras_cnt); - u32 extra_len = a_extras[use_extra].len; - u32 insert_at; - - if (extra_len > temp_len) break; - - insert_at = UR(temp_len - extra_len + 1); - memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len); - - } else { - - /* No auto extras or odds in our favor. Use the dictionary. */ - - u32 use_extra = UR(extras_cnt); - u32 extra_len = extras[use_extra].len; - u32 insert_at; - - if (extra_len > temp_len) break; - - insert_at = UR(temp_len - extra_len + 1); - memcpy(out_buf + insert_at, extras[use_extra].data, extra_len); - - } - - break; - - } - - case 16: { - - u32 use_extra, extra_len, insert_at = UR(temp_len + 1); - u8* new_buf; - - /* Insert an extra. Do the same dice-rolling stuff as for the - previous case. */ - - if (!extras_cnt || (a_extras_cnt && UR(2))) { - - use_extra = UR(a_extras_cnt); - extra_len = a_extras[use_extra].len; - - if (temp_len + extra_len >= MAX_FILE) break; - - new_buf = ck_alloc_nozero(temp_len + extra_len); - - /* Head */ - memcpy(new_buf, out_buf, insert_at); - - /* Inserted part */ - memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len); - - } else { - - use_extra = UR(extras_cnt); - extra_len = extras[use_extra].len; - - if (temp_len + extra_len >= MAX_FILE) break; - - new_buf = ck_alloc_nozero(temp_len + extra_len); - - /* Head */ - memcpy(new_buf, out_buf, insert_at); - - /* Inserted part */ - memcpy(new_buf + insert_at, extras[use_extra].data, extra_len); - - } - - /* Tail */ - memcpy(new_buf + insert_at + extra_len, out_buf + insert_at, - temp_len - insert_at); - - ck_free(out_buf); - out_buf = new_buf; - temp_len += extra_len; - - break; - - } - - } - - } - - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry; - - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ - - if (temp_len < len) out_buf = ck_realloc(out_buf, len); - temp_len = len; - memcpy(out_buf, in_buf, len); - - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ - - if (queued_paths != havoc_queued) { - - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } - - havoc_queued = queued_paths; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - if (!splice_cycle) { - stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_HAVOC] += stage_max; - } else { - stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_SPLICE] += stage_max; - } - -#ifndef IGNORE_FINDS - - /************ - * SPLICING * - ************/ - - /* This is a last-resort strategy triggered by a full round with no findings. - It takes the current input file, randomly selects another input, and - splices them together at some offset, then relies on the havoc - code to mutate that blob. */ - -retry_splicing: - - if (use_splicing && splice_cycle++ < SPLICE_CYCLES && - queued_paths > 1 && queue_cur->len > 1) { - - struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; - - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ - - if (in_buf != orig_in) { - ck_free(in_buf); - in_buf = orig_in; - len = queue_cur->len; - } - - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ - - do { tid = UR(queued_paths); } while (tid == current_entry); - - splicing_with = tid; - target = queue; - - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; - - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == queue_cur)) { - target = target->next; - ++splicing_with; - } - - if (!target) goto retry_splicing; - - /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - - new_buf = ck_alloc_nozero(target->len); - - ck_read(fd, new_buf, target->len, target->fname); - - close(fd); - - /* Find a suitable splicing location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ - - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); - - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ck_free(new_buf); - goto retry_splicing; - } - - /* Split somewhere between the first and last differing byte. */ - - split_at = f_diff + UR(l_diff - f_diff); - - /* Do the thing. */ - - len = target->len; - memcpy(new_buf, in_buf, split_at); - in_buf = new_buf; - - ck_free(out_buf); - out_buf = ck_alloc_nozero(len); - memcpy(out_buf, in_buf, len); - -#ifdef USE_PYTHON - goto python_stage; -#else - goto havoc_stage; -#endif - - } - -#endif /* !IGNORE_FINDS */ - - ret_val = 0; - -abandon_entry: - - splicing_with = -1; - - /* Update pending_not_fuzzed count if we made it through the calibration - cycle and have not seen this entry before. */ - - if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) { - --pending_not_fuzzed; - queue_cur->was_fuzzed = 1; - if (queue_cur->favored) --pending_favored; - } - - ++queue_cur->fuzz_level; - - munmap(orig_in, queue_cur->len); - - if (in_buf != orig_in) ck_free(in_buf); - ck_free(out_buf); - ck_free(eff_map); - - return ret_val; - -#undef FLIP_BIT - -} - -/* MOpt mode */ -static u8 pilot_fuzzing(char** argv) { - - s32 len, fd, temp_len, i, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; - u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - - u8 ret_val = 1, doing_det = 0; - - u8 a_collect[MAX_AUTO_EXTRA]; - u32 a_len = 0; - -#ifdef IGNORE_FINDS - - /* In IGNORE_FINDS mode, skip any entries that weren't in the - initial data set. */ - - if (queue_cur->depth > 1) return 1; - -#else - - if (pending_favored) { - - /* If we have any favored, non-fuzzed new arrivals in the queue, - possibly skip to them at the expense of already-fuzzed or non-favored - cases. */ - - if ((queue_cur->was_fuzzed || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; - - } - else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { - - /* Otherwise, still possibly skip non-favored cases, albeit less often. - The odds of skipping stuff are higher for already-fuzzed inputs and - lower for never-fuzzed entries. */ - - if (queue_cycle > 1 && !queue_cur->was_fuzzed) { - - if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; - - } - else { - - if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; - - } - - } - -#endif /* ^IGNORE_FINDS */ - - if (not_on_tty) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - current_entry, queued_paths, unique_crashes); - fflush(stdout); - } - - /* Map the test case into memory. */ - - fd = open(queue_cur->fname, O_RDONLY); - - if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); - - len = queue_cur->len; - - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); - - close(fd); - - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ - - out_buf = ck_alloc_nozero(len); - - subseq_tmouts = 0; - - cur_depth = queue_cur->depth; - - /******************************************* - * CALIBRATION (only if failed earlier on) * - *******************************************/ - - if (queue_cur->cal_failed) { - - u8 res = FAULT_TMOUT; - - if (queue_cur->cal_failed < CAL_CHANCES) { - - res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); - - } - - if (stop_soon || res != crash_mode) { - ++cur_skipped_paths; - goto abandon_entry; - } - - } - - /************ - * TRIMMING * - ************/ - - if (!dumb_mode && !queue_cur->trim_done) { - - u8 res = trim_case(argv, queue_cur, in_buf); - - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); - - if (stop_soon) { - ++cur_skipped_paths; - goto abandon_entry; - } - - /* Don't retry trimming, even if it failed. */ - - queue_cur->trim_done = 1; - - len = queue_cur->len; - - } - - memcpy(out_buf, in_buf, len); - - /********************* - * PERFORMANCE SCORE * - *********************/ - - orig_perf = perf_score = calculate_score(queue_cur); - - /* Skip right away if -d is given, if we have done deterministic fuzzing on - this entry ourselves (was_fuzzed), or if it has gone through deterministic - testing in earlier, resumed runs (passed_det). */ - - if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) - goto havoc_stage; - - /* Skip deterministic fuzzing if exec path checksum puts this out of scope - for this master instance. */ - - if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) - goto havoc_stage; - - - cur_ms_lv = get_cur_time(); - if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || - (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) - { - key_puppet = 1; - goto pacemaker_fuzzing; - } - - doing_det = 1; - - /********************************************* - * SIMPLE BITFLIP (+dictionary construction) * - *********************************************/ - -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ - } while (0) - - /* Single walking bit. */ - - stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; - - - - - stage_val_type = STAGE_VAL_NONE; - - orig_hit_cnt = queued_paths + unique_crashes; - - prev_cksum = queue_cur->exec_cksum; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur >> 3; - - FLIP_BIT(out_buf, stage_cur); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - FLIP_BIT(out_buf, stage_cur); - - /* While flipping the least significant bit in every byte, pull of an extra - trick to detect possible syntax tokens. In essence, the idea is that if - you have a binary blob like this: - - xxxxxxxxIHDRxxxxxxxx - - ...and changing the leading and trailing bytes causes variable or no - changes in program flow, but touching any character in the "IHDR" string - always produces the same, distinctive path, it's highly likely that - "IHDR" is an atomically-checked magic value of special significance to - the fuzzed format. - - We do this here, rather than as a separate stage, because it's a nice - way to keep the operation approximately "free" (i.e., no extra execs). - - Empirically, performing the check when flipping the least significant bit - is advantageous, compared to doing it at the time of more disruptive - changes, where the program flow may be affected in more violent ways. - - The caveat is that we won't generate dictionaries in the -d mode or -S - mode - but that's probably a fair trade-off. - - This won't work particularly well with paths that exhibit variable - behavior, but fails gracefully, so we'll carry out the checks anyway. - - */ - - if (!dumb_mode && (stage_cur & 7) == 7) { - - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - - if (stage_cur == stage_max - 1 && cksum == prev_cksum) { - - /* If at end of file and we are still collecting a string, grab the - final character and force output. */ - - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; - - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); - - } - else if (cksum != prev_cksum) { - - /* Otherwise, if the checksum has changed, see if we have something - worthwhile queued up, and collect that if the answer is yes. */ - - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); - - a_len = 0; - prev_cksum = cksum; - - } - - /* Continue collecting string, but only if the bit flip actually made - any difference - we don't want no-op tokens. */ - - if (cksum != queue_cur->exec_cksum) { - - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; - - } - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP1] += stage_max; - - /* Two walking bits. */ - - stage_name = "bitflip 2/1"; - stage_short = "flip2"; - stage_max = (len << 3) - 1; - - orig_hit_cnt = new_hit_cnt; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur >> 3; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP2] += stage_max; - - - - /* Four walking bits. */ - - stage_name = "bitflip 4/1"; - stage_short = "flip4"; - stage_max = (len << 3) - 3; - - - - - - orig_hit_cnt = new_hit_cnt; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur >> 3; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP4] += stage_max; - - - - - /* Effector map setup. These macros calculate: - - EFF_APOS - position of a particular file offset in the map. - EFF_ALEN - length of a map with a particular number of bytes. - EFF_SPAN_ALEN - map span for a sequence of bytes. - - */ - -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) - - /* Initialize effector map for the next step (see comments below). Always - flag first and last byte as doing something. */ - - eff_map = ck_alloc(EFF_ALEN(len)); - eff_map[0] = 1; - - if (EFF_APOS(len - 1) != 0) { - eff_map[EFF_APOS(len - 1)] = 1; - ++eff_cnt; - } - - /* Walking byte. */ - - stage_name = "bitflip 8/8"; - stage_short = "flip8"; - stage_max = len; - - - - orig_hit_cnt = new_hit_cnt; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur; - - out_buf[stage_cur] ^= 0xFF; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - /* We also use this stage to pull off a simple trick: we identify - bytes that seem to have no effect on the current execution path - even when fully flipped - and we skip them during more expensive - deterministic stages, such as arithmetics or known ints. */ - - if (!eff_map[EFF_APOS(stage_cur)]) { - - u32 cksum; - - /* If in dumb mode or if the file is very short, just flag everything - without wasting time on checksums. */ - - if (!dumb_mode && len >= EFF_MIN_LEN) - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - else - cksum = ~queue_cur->exec_cksum; - - if (cksum != queue_cur->exec_cksum) { - eff_map[EFF_APOS(stage_cur)] = 1; - ++eff_cnt; - } - - } - - out_buf[stage_cur] ^= 0xFF; - - } - - /* If the effector map is more than EFF_MAX_PERC dense, just flag the - whole thing as worth fuzzing, since we wouldn't be saving much time - anyway. */ - - if (eff_cnt != EFF_ALEN(len) && - eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { - - memset(eff_map, 1, EFF_ALEN(len)); - - blocks_eff_select += EFF_ALEN(len); - - } - else { - - blocks_eff_select += eff_cnt; - - } - - blocks_eff_total += EFF_ALEN(len); - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP8] += stage_max; - - - - - - /* Two walking bytes. */ - - if (len < 2) goto skip_bitflip; - - stage_name = "bitflip 16/8"; - stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; - - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 1; ++i) { - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - --stage_max; - continue; - } - - stage_cur_byte = i; - - *(u16*)(out_buf + i) ^= 0xFFFF; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - *(u16*)(out_buf + i) ^= 0xFFFF; - - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP16] += stage_max; - - - - - if (len < 4) goto skip_bitflip; - - /* Four walking bytes. */ - - stage_name = "bitflip 32/8"; - stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; - - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 3; ++i) { - - /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - --stage_max; - continue; - } - - stage_cur_byte = i; - - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP32] += stage_max; - - - - - - - skip_bitflip: - - if (no_arith) goto skip_arith; - - /********************** - * ARITHMETIC INC/DEC * - **********************/ - - /* 8-bit arithmetics. */ - - stage_name = "arith 8/8"; - stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; - - - - - stage_val_type = STAGE_VAL_LE; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len; ++i) { - - u8 orig = out_buf[i]; - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)]) { - stage_max -= 2 * ARITH_MAX; - continue; - } - - stage_cur_byte = i; - - for (j = 1; j <= ARITH_MAX; ++j) { - - u8 r = orig ^ (orig + j); - - /* Do arithmetic operations only if the result couldn't be a product - of a bitflip. */ - - if (!could_be_bitflip(r)) { - - stage_cur_val = j; - out_buf[i] = orig + j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - r = orig ^ (orig - j); - - if (!could_be_bitflip(r)) { - - stage_cur_val = -j; - out_buf[i] = orig - j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - out_buf[i] = orig; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH8] += stage_max; - - - - - - /* 16-bit arithmetics, both endians. */ - - if (len < 2) goto skip_arith; - - stage_name = "arith 16/8"; - stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; - - - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 1; ++i) { - - u16 orig = *(u16*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } - - stage_cur_byte = i; - - for (j = 1; j <= ARITH_MAX; ++j) { - - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP16(SWAP16(orig) + j), - r4 = orig ^ SWAP16(SWAP16(orig) - j); - - /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the - & 0xff overflow checks) and if it couldn't be a product of - a bitflip. */ - - stage_val_type = STAGE_VAL_LE; - - if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { - - stage_cur_val = j; - *(u16*)(out_buf + i) = orig + j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((orig & 0xff) < j && !could_be_bitflip(r2)) { - - stage_cur_val = -j; - *(u16*)(out_buf + i) = orig - j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - /* Big endian comes next. Same deal. */ - - stage_val_type = STAGE_VAL_BE; - - - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { - - stage_cur_val = j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((orig >> 8) < j && !could_be_bitflip(r4)) { - - stage_cur_val = -j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - *(u16*)(out_buf + i) = orig; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH16] += stage_max; - - - - - /* 32-bit arithmetics, both endians. */ - - if (len < 4) goto skip_arith; - - stage_name = "arith 32/8"; - stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; - - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 3; ++i) { - - u32 orig = *(u32*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } - - stage_cur_byte = i; - - for (j = 1; j <= ARITH_MAX; ++j) { - - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP32(SWAP32(orig) + j), - r4 = orig ^ SWAP32(SWAP32(orig) - j); - - /* Little endian first. Same deal as with 16-bit: we only want to - try if the operation would have effect on more than two bytes. */ - - stage_val_type = STAGE_VAL_LE; - - if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { - - stage_cur_val = j; - *(u32*)(out_buf + i) = orig + j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { - - stage_cur_val = -j; - *(u32*)(out_buf + i) = orig - j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; - - } else --stage_max; - - /* Big endian next. */ - - stage_val_type = STAGE_VAL_BE; - - if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { - - stage_cur_val = j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { - - stage_cur_val = -j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - *(u32*)(out_buf + i) = orig; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH32] += stage_max; - - - - - skip_arith: - - /********************** - * INTERESTING VALUES * - **********************/ - - stage_name = "interest 8/8"; - stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); - - - - stage_val_type = STAGE_VAL_LE; - - orig_hit_cnt = new_hit_cnt; - - /* Setting 8-bit integers. */ - - for (i = 0; i < len; ++i) { - - u8 orig = out_buf[i]; - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)]) { - stage_max -= sizeof(interesting_8); - continue; - } - - stage_cur_byte = i; - - for (j = 0; j < sizeof(interesting_8); ++j) { - - /* Skip if the value could be a product of bitflips or arithmetics. */ - - if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || - could_be_arith(orig, (u8)interesting_8[j], 1)) { - --stage_max; - continue; - } - - stage_cur_val = interesting_8[j]; - out_buf[i] = interesting_8[j]; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - out_buf[i] = orig; - ++stage_cur; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST8] += stage_max; - - - - - /* Setting 16-bit integers, both endians. */ - - if (no_arith || len < 2) goto skip_interest; - - stage_name = "interest 16/8"; - stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); - - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 1; ++i) { - - u16 orig = *(u16*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= sizeof(interesting_16); - continue; - } - - stage_cur_byte = i; - - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - - stage_cur_val = interesting_16[j]; - - /* Skip if this could be a product of a bitflip, arithmetics, - or single-byte interesting value insertion. */ - - if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && - !could_be_arith(orig, (u16)interesting_16[j], 2) && - !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { - - stage_val_type = STAGE_VAL_LE; - - *(u16*)(out_buf + i) = interesting_16[j]; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && - !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && - !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && - !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { - - stage_val_type = STAGE_VAL_BE; - - *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - } - - *(u16*)(out_buf + i) = orig; - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST16] += stage_max; - - - - - - if (len < 4) goto skip_interest; - - /* Setting 32-bit integers, both endians. */ - - stage_name = "interest 32/8"; - stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 3; ++i) { - - u32 orig = *(u32*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= sizeof(interesting_32) >> 1; - continue; - } - - stage_cur_byte = i; - - for (j = 0; j < sizeof(interesting_32) / 4; ++j) { - - stage_cur_val = interesting_32[j]; - - /* Skip if this could be a product of a bitflip, arithmetics, - or word interesting value insertion. */ - - if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && - !could_be_arith(orig, interesting_32[j], 4) && - !could_be_interest(orig, interesting_32[j], 4, 0)) { - - stage_val_type = STAGE_VAL_LE; - - *(u32*)(out_buf + i) = interesting_32[j]; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && - !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && - !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && - !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { - - stage_val_type = STAGE_VAL_BE; - - *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - } - - *(u32*)(out_buf + i) = orig; - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST32] += stage_max; - - - - - - skip_interest: - - /******************** - * DICTIONARY STUFF * - ********************/ - - if (!extras_cnt) goto skip_user_extras; - - /* Overwrite with user-supplied extras. */ - - stage_name = "user extras (over)"; - stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; - - - - - stage_val_type = STAGE_VAL_NONE; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len; ++i) { - - u32 last_len = 0; - - stage_cur_byte = i; - - /* Extras are sorted by size, from smallest to largest. This means - that we don't have to worry about restoring the buffer in - between writes at a particular offset determined by the outer - loop. */ - - for (j = 0; j < extras_cnt; ++j) { - - /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token - is redundant, or if its entire span has no bytes set in the effector - map. */ - - if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || - extras[j].len > len - i || - !memcmp(extras[j].data, out_buf + i, extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { - - --stage_max; - continue; - - } - - last_len = extras[j].len; - memcpy(out_buf + i, extras[j].data, last_len); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - ++stage_cur; - - } - - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UO] += stage_max; - - /* Insertion of user-supplied extras. */ - - stage_name = "user extras (insert)"; - stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; - - - - - orig_hit_cnt = new_hit_cnt; - - ex_tmp = ck_alloc(len + MAX_DICT_FILE); - - for (i = 0; i <= len; ++i) { - - stage_cur_byte = i; - - for (j = 0; j < extras_cnt; ++j) { - - if (len + extras[j].len > MAX_FILE) { - --stage_max; - continue; - } - - /* Insert token */ - memcpy(ex_tmp + i, extras[j].data, extras[j].len); - - /* Copy tail */ - memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); - - if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - ck_free(ex_tmp); - goto abandon_entry; - } - - ++stage_cur; - - } - - /* Copy head */ - ex_tmp[i] = out_buf[i]; - - } - - ck_free(ex_tmp); - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UI] += stage_max; - - skip_user_extras: - - if (!a_extras_cnt) goto skip_extras; - - stage_name = "auto extras (over)"; - stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; - - - stage_val_type = STAGE_VAL_NONE; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len; ++i) { - - u32 last_len = 0; - - stage_cur_byte = i; - - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { - - /* See the comment in the earlier code; extras are sorted by size. */ - - if (a_extras[j].len > len - i || - !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { - - --stage_max; - continue; - - } - - last_len = a_extras[j].len; - memcpy(out_buf + i, a_extras[j].data, last_len); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - ++stage_cur; - - } - - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_AO] += stage_max; - - skip_extras: - - /* If we made this to here without jumping to havoc_stage or abandon_entry, - we're properly done with deterministic steps and can mark it as such - in the .state/ directory. */ - - if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - - /**************** - * RANDOM HAVOC * - ****************/ - - havoc_stage: - pacemaker_fuzzing: - - - stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - - stage_name = "MOpt-havoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; - - } - else { - - static u8 tmp[32]; - - perf_score = orig_perf; - - sprintf(tmp, "MOpt-splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - - } - - s32 temp_len_puppet; - cur_ms_lv = get_cur_time(); - - { - - - if (key_puppet == 1) - { - if (unlikely(orig_hit_cnt_puppet == 0)) - { - orig_hit_cnt_puppet = queued_paths + unique_crashes; - last_limit_time_start = get_cur_time(); - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - } - } - - - { -#ifndef IGNORE_FINDS - havoc_stage_puppet: -#endif - - stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - - stage_name = "MOpt avoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; - - } - else { - static u8 tmp[32]; - perf_score = orig_perf; - sprintf(tmp, "MOpt splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - } - - - - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - - temp_len = len; - - orig_hit_cnt = queued_paths + unique_crashes; - - havoc_queued = queued_paths; - - - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - - stage_cur_val = use_stacking; - - - for (i = 0; i < operator_num; ++i) - { - stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; - } - - - for (i = 0; i < use_stacking; ++i) { - - switch (select_algorithm()) { - - case 0: - /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, UR(temp_len << 3)); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1; - break; - - - case 1: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1; - break; - - case 2: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - FLIP_BIT(out_buf, temp_len_puppet + 2); - FLIP_BIT(out_buf, temp_len_puppet + 3); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1; - break; - - case 3: - if (temp_len < 4) break; - out_buf[UR(temp_len)] ^= 0xFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1; - break; - - case 4: - if (temp_len < 8) break; - *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1; - break; - - case 5: - if (temp_len < 8) break; - *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1; - break; - - case 6: - out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); - out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1; - break; - - case 7: - /* Randomly subtract from word, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } - /* Randomly add to word, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - } - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1; - break; - - - case 8: - /* Randomly subtract from dword, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - } - /* Randomly add to dword, random endian. */ - //if (temp_len < 4) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - } - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1; - break; - - - case 9: - /* Set byte to interesting value. */ - if (temp_len < 4) break; - out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1; - break; - - case 10: - /* Set word to interesting value, randomly choosing endian. */ - if (temp_len < 8) break; - if (UR(2)) { - *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; - } - else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); - } - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1; - break; - - - case 11: - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 8) break; - - if (UR(2)) { - *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; - } - else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); - } - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1; - break; - - - case 12: - - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ - - out_buf[UR(temp_len)] ^= 1 + UR(255); - stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1; - break; - - - - case 13: { - - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ - - u32 del_from, del_len; - - if (temp_len < 2) break; - - /* Don't delete too much. */ - - del_len = choose_block_len(temp_len - 1); - - del_from = UR(temp_len - del_len + 1); - - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); - - temp_len -= del_len; - stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1; - break; - - } - - case 14: - - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ - - u8 actually_clone = UR(4); - u32 clone_from, clone_to, clone_len; - u8* new_buf; - - if (actually_clone) { - - clone_len = choose_block_len(temp_len); - clone_from = UR(temp_len - clone_len + 1); - - } - else { - - clone_len = choose_block_len(HAVOC_BLK_XL); - clone_from = 0; - - } - - clone_to = UR(temp_len); - - new_buf = ck_alloc_nozero(temp_len + clone_len); - - /* Head */ - - memcpy(new_buf, out_buf, clone_to); - - /* Inserted part */ - - if (actually_clone) - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - else - memset(new_buf + clone_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); - - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); - - ck_free(out_buf); - out_buf = new_buf; - temp_len += clone_len; - stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1; - } - - break; - - case 15: { - - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ - - u32 copy_from, copy_to, copy_len; - - if (temp_len < 2) break; - - copy_len = choose_block_len(temp_len - 1); - - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); - - if (UR(4)) { - - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - - } - else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); - stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1; - break; - - } - - - } - - } - - - tmp_pilot_time += 1; - - - - - u64 temp_total_found = queued_paths + unique_crashes; - - - - - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry_puppet; - - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ - - if (temp_len < len) out_buf = ck_realloc(out_buf, len); - temp_len = len; - memcpy(out_buf, in_buf, len); - - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ - - if (queued_paths != havoc_queued) { - - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } - - havoc_queued = queued_paths; - - } - - if (unlikely(queued_paths + unique_crashes > temp_total_found)) - { - u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; - total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; ++i) - { - if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i]) - stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet; - } - } - - } - new_hit_cnt = queued_paths + unique_crashes; - - if (!splice_cycle) { - stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_HAVOC] += stage_max; - } else { - stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_SPLICE] += stage_max; - } - -#ifndef IGNORE_FINDS - - /************ - * SPLICING * - ************/ - - - retry_splicing_puppet: - - if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && - queued_paths > 1 && queue_cur->len > 1) { - - struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; - - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ - - if (in_buf != orig_in) { - ck_free(in_buf); - in_buf = orig_in; - len = queue_cur->len; - } - - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ - - do { tid = UR(queued_paths); } while (tid == current_entry); - - splicing_with = tid; - target = queue; - - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; - - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == queue_cur)) { - target = target->next; - ++splicing_with; - } - - if (!target) goto retry_splicing_puppet; - - /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - - new_buf = ck_alloc_nozero(target->len); - - ck_read(fd, new_buf, target->len, target->fname); - - close(fd); - - /* Find a suitable splicin g location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ - - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); - - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ck_free(new_buf); - goto retry_splicing_puppet; - } - - /* Split somewhere between the first and last differing byte. */ - - split_at = f_diff + UR(l_diff - f_diff); - - /* Do the thing. */ - - len = target->len; - memcpy(new_buf, in_buf, split_at); - in_buf = new_buf; - ck_free(out_buf); - out_buf = ck_alloc_nozero(len); - memcpy(out_buf, in_buf, len); - goto havoc_stage_puppet; - - } - -#endif /* !IGNORE_FINDS */ - - ret_val = 0; - - abandon_entry: - abandon_entry_puppet: - - if (splice_cycle >= SPLICE_CYCLES_puppet) - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - - - splicing_with = -1; - - /* Update pending_not_fuzzed count if we made it through the calibration - cycle and have not seen this entry before. */ - - // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { - // queue_cur->was_fuzzed = 1; - // --pending_not_fuzzed; - // if (queue_cur->favored) --pending_favored; - // } - - munmap(orig_in, queue_cur->len); - - if (in_buf != orig_in) ck_free(in_buf); - ck_free(out_buf); - ck_free(eff_map); - - - if (key_puppet == 1) { - if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) { - key_puppet = 0; - cur_ms_lv = get_cur_time(); - new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt_puppet = 0; - last_limit_time_start = 0; - } - } - - - if (unlikely(tmp_pilot_time > period_pilot)) { - total_pacemaker_time += tmp_pilot_time; - new_hit_cnt = queued_paths + unique_crashes; - swarm_fitness[swarm_now] = (double)(total_puppet_find - temp_puppet_find) / ((double)(tmp_pilot_time)/ period_pilot_tmp); - tmp_pilot_time = 0; - temp_puppet_find = total_puppet_find; - - u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) { - double temp_eff = 0.0; - - if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i]) - temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - stage_finds_puppet[swarm_now][i]) / - (double)(stage_cycles_puppet_v2[swarm_now][i] - stage_cycles_puppet[swarm_now][i]); - - if (eff_best[swarm_now][i] < temp_eff) { - eff_best[swarm_now][i] = temp_eff; - L_best[swarm_now][i] = x_now[swarm_now][i]; - } - - stage_finds_puppet[swarm_now][i] = stage_finds_puppet_v2[swarm_now][i]; - stage_cycles_puppet[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; - temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i]; - } - - swarm_now = swarm_now + 1; - if (swarm_now == swarm_num) { - key_module = 1; - for (i = 0; i < operator_num; ++i) { - core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i]; - core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i]; - core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i]; - } - - double swarm_eff = 0.0; - swarm_now = 0; - for (i = 0; i < swarm_num; ++i) { - if (swarm_fitness[i] > swarm_eff) { - swarm_eff = swarm_fitness[i]; - swarm_now = i; - } - } - if (swarm_now <0 || swarm_now > swarm_num - 1) - PFATAL("swarm_now error number %d", swarm_now); - - } - } - return ret_val; - } - } - - -#undef FLIP_BIT - -} - - -static u8 core_fuzzing(char** argv) { - int i; - - if (swarm_num == 1) { - key_module = 2; - return 0; - } - - - s32 len, fd, temp_len, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; - u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - - u8 ret_val = 1, doing_det = 0; - - u8 a_collect[MAX_AUTO_EXTRA]; - u32 a_len = 0; - -#ifdef IGNORE_FINDS - - /* In IGNORE_FINDS mode, skip any entries that weren't in the - initial data set. */ - - if (queue_cur->depth > 1) return 1; - -#else - - if (pending_favored) { - - /* If we have any favored, non-fuzzed new arrivals in the queue, - possibly skip to them at the expense of already-fuzzed or non-favored - cases. */ - - if ((queue_cur->was_fuzzed || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; - - } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { - - /* Otherwise, still possibly skip non-favored cases, albeit less often. - The odds of skipping stuff are higher for already-fuzzed inputs and - lower for never-fuzzed entries. */ - - if (queue_cycle > 1 && !queue_cur->was_fuzzed) { - - if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; - - } else { - - if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; - - } - - } - -#endif /* ^IGNORE_FINDS */ - - if (not_on_tty) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - current_entry, queued_paths, unique_crashes); - fflush(stdout); - } - - /* Map the test case into memory. */ - - fd = open(queue_cur->fname, O_RDONLY); - - if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); - - len = queue_cur->len; - - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); - - close(fd); - - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ - - out_buf = ck_alloc_nozero(len); - - subseq_tmouts = 0; - - cur_depth = queue_cur->depth; - - /******************************************* - * CALIBRATION (only if failed earlier on) * - *******************************************/ - - if (queue_cur->cal_failed) { - - u8 res = FAULT_TMOUT; - - if (queue_cur->cal_failed < CAL_CHANCES) { - - res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); - - } - - if (stop_soon || res != crash_mode) { - ++cur_skipped_paths; - goto abandon_entry; - } - - } - - /************ - * TRIMMING * - ************/ - - if (!dumb_mode && !queue_cur->trim_done) { - - u8 res = trim_case(argv, queue_cur, in_buf); - - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); - - if (stop_soon) { - ++cur_skipped_paths; - goto abandon_entry; - } - - /* Don't retry trimming, even if it failed. */ - - queue_cur->trim_done = 1; - - len = queue_cur->len; - - } - - memcpy(out_buf, in_buf, len); - - /********************* - * PERFORMANCE SCORE * - *********************/ - - orig_perf = perf_score = calculate_score(queue_cur); - - /* Skip right away if -d is given, if we have done deterministic fuzzing on - this entry ourselves (was_fuzzed), or if it has gone through deterministic - testing in earlier, resumed runs (passed_det). */ - - if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) - goto havoc_stage; - - /* Skip deterministic fuzzing if exec path checksum puts this out of scope - for this master instance. */ - - if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) - goto havoc_stage; - - - cur_ms_lv = get_cur_time(); - if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || - (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) - { - key_puppet = 1; - goto pacemaker_fuzzing; - } - - doing_det = 1; - - /********************************************* - * SIMPLE BITFLIP (+dictionary construction) * - *********************************************/ - -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ - } while (0) - - /* Single walking bit. */ - - stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; - - stage_val_type = STAGE_VAL_NONE; - - orig_hit_cnt = queued_paths + unique_crashes; - - prev_cksum = queue_cur->exec_cksum; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur >> 3; - - FLIP_BIT(out_buf, stage_cur); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - FLIP_BIT(out_buf, stage_cur); - - /* While flipping the least significant bit in every byte, pull of an extra - trick to detect possible syntax tokens. In essence, the idea is that if - you have a binary blob like this: - - xxxxxxxxIHDRxxxxxxxx - - ...and changing the leading and trailing bytes causes variable or no - changes in program flow, but touching any character in the "IHDR" string - always produces the same, distinctive path, it's highly likely that - "IHDR" is an atomically-checked magic value of special significance to - the fuzzed format. - - We do this here, rather than as a separate stage, because it's a nice - way to keep the operation approximately "free" (i.e., no extra execs). - - Empirically, performing the check when flipping the least significant bit - is advantageous, compared to doing it at the time of more disruptive - changes, where the program flow may be affected in more violent ways. - - The caveat is that we won't generate dictionaries in the -d mode or -S - mode - but that's probably a fair trade-off. - - This won't work particularly well with paths that exhibit variable - behavior, but fails gracefully, so we'll carry out the checks anyway. - - */ - - if (!dumb_mode && (stage_cur & 7) == 7) { - - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - - if (stage_cur == stage_max - 1 && cksum == prev_cksum) { - - /* If at end of file and we are still collecting a string, grab the - final character and force output. */ - - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; - - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); - - } - else if (cksum != prev_cksum) { - - /* Otherwise, if the checksum has changed, see if we have something - worthwhile queued up, and collect that if the answer is yes. */ - - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); - - a_len = 0; - prev_cksum = cksum; - - } - - /* Continue collecting string, but only if the bit flip actually made - any difference - we don't want no-op tokens. */ - - if (cksum != queue_cur->exec_cksum) { - - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; - - } - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP1] += stage_max; - - - - /* Two walking bits. */ - - stage_name = "bitflip 2/1"; - stage_short = "flip2"; - stage_max = (len << 3) - 1; - - orig_hit_cnt = new_hit_cnt; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur >> 3; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP2] += stage_max; - - - /* Four walking bits. */ - - stage_name = "bitflip 4/1"; - stage_short = "flip4"; - stage_max = (len << 3) - 3; - - - orig_hit_cnt = new_hit_cnt; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur >> 3; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP4] += stage_max; - - - /* Effector map setup. These macros calculate: - - EFF_APOS - position of a particular file offset in the map. - EFF_ALEN - length of a map with a particular number of bytes. - EFF_SPAN_ALEN - map span for a sequence of bytes. - - */ - -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) - - /* Initialize effector map for the next step (see comments below). Always - flag first and last byte as doing something. */ - - eff_map = ck_alloc(EFF_ALEN(len)); - eff_map[0] = 1; - - if (EFF_APOS(len - 1) != 0) { - eff_map[EFF_APOS(len - 1)] = 1; - ++eff_cnt; - } - - /* Walking byte. */ - - stage_name = "bitflip 8/8"; - stage_short = "flip8"; - stage_max = len; - - - orig_hit_cnt = new_hit_cnt; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - stage_cur_byte = stage_cur; - - out_buf[stage_cur] ^= 0xFF; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - /* We also use this stage to pull off a simple trick: we identify - bytes that seem to have no effect on the current execution path - even when fully flipped - and we skip them during more expensive - deterministic stages, such as arithmetics or known ints. */ - - if (!eff_map[EFF_APOS(stage_cur)]) { - - u32 cksum; - - /* If in dumb mode or if the file is very short, just flag everything - without wasting time on checksums. */ - - if (!dumb_mode && len >= EFF_MIN_LEN) - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - else - cksum = ~queue_cur->exec_cksum; - - if (cksum != queue_cur->exec_cksum) { - eff_map[EFF_APOS(stage_cur)] = 1; - ++eff_cnt; - } - - } - - out_buf[stage_cur] ^= 0xFF; - - } - - /* If the effector map is more than EFF_MAX_PERC dense, just flag the - whole thing as worth fuzzing, since we wouldn't be saving much time - anyway. */ - - if (eff_cnt != EFF_ALEN(len) && - eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { - - memset(eff_map, 1, EFF_ALEN(len)); - - blocks_eff_select += EFF_ALEN(len); - - } - else { - - blocks_eff_select += eff_cnt; - - } - - blocks_eff_total += EFF_ALEN(len); - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP8] += stage_max; - - - - /* Two walking bytes. */ - - if (len < 2) goto skip_bitflip; - - stage_name = "bitflip 16/8"; - stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 1; ++i) { - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - --stage_max; - continue; - } - - stage_cur_byte = i; - - *(u16*)(out_buf + i) ^= 0xFFFF; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - *(u16*)(out_buf + i) ^= 0xFFFF; - - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP16] += stage_max; - - - - if (len < 4) goto skip_bitflip; - - /* Four walking bytes. */ - - stage_name = "bitflip 32/8"; - stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 3; ++i) { - - /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - --stage_max; - continue; - } - - stage_cur_byte = i; - - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP32] += stage_max; - - - - - skip_bitflip: - - if (no_arith) goto skip_arith; - - /********************** - * ARITHMETIC INC/DEC * - **********************/ - - /* 8-bit arithmetics. */ - - stage_name = "arith 8/8"; - stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; - - - stage_val_type = STAGE_VAL_LE; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len; ++i) { - - u8 orig = out_buf[i]; - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)]) { - stage_max -= 2 * ARITH_MAX; - continue; - } - - stage_cur_byte = i; - - for (j = 1; j <= ARITH_MAX; ++j) { - - u8 r = orig ^ (orig + j); - - /* Do arithmetic operations only if the result couldn't be a product - of a bitflip. */ - - if (!could_be_bitflip(r)) { - - stage_cur_val = j; - out_buf[i] = orig + j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - r = orig ^ (orig - j); - - if (!could_be_bitflip(r)) { - - stage_cur_val = -j; - out_buf[i] = orig - j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - out_buf[i] = orig; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH8] += stage_max; - - - - - /* 16-bit arithmetics, both endians. */ - - if (len < 2) goto skip_arith; - - stage_name = "arith 16/8"; - stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 1; ++i) { - - u16 orig = *(u16*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } - - stage_cur_byte = i; - - for (j = 1; j <= ARITH_MAX; ++j) { - - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP16(SWAP16(orig) + j), - r4 = orig ^ SWAP16(SWAP16(orig) - j); - - /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the - & 0xff overflow checks) and if it couldn't be a product of - a bitflip. */ - - stage_val_type = STAGE_VAL_LE; - - if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { - - stage_cur_val = j; - *(u16*)(out_buf + i) = orig + j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((orig & 0xff) < j && !could_be_bitflip(r2)) { - - stage_cur_val = -j; - *(u16*)(out_buf + i) = orig - j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - /* Big endian comes next. Same deal. */ - - stage_val_type = STAGE_VAL_BE; - - - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { - - stage_cur_val = j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((orig >> 8) < j && !could_be_bitflip(r4)) { - - stage_cur_val = -j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - *(u16*)(out_buf + i) = orig; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH16] += stage_max; - - - - /* 32-bit arithmetics, both endians. */ - - if (len < 4) goto skip_arith; - - stage_name = "arith 32/8"; - stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 3; ++i) { - - u32 orig = *(u32*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } - - stage_cur_byte = i; - - for (j = 1; j <= ARITH_MAX; ++j) { - - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP32(SWAP32(orig) + j), - r4 = orig ^ SWAP32(SWAP32(orig) - j); - - /* Little endian first. Same deal as with 16-bit: we only want to - try if the operation would have effect on more than two bytes. */ - - stage_val_type = STAGE_VAL_LE; - - if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { - - stage_cur_val = j; - *(u32*)(out_buf + i) = orig + j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { - - stage_cur_val = -j; - *(u32*)(out_buf + i) = orig - j; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - /* Big endian next. */ - - stage_val_type = STAGE_VAL_BE; - - if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { - - stage_cur_val = j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { - - stage_cur_val = -j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - *(u32*)(out_buf + i) = orig; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH32] += stage_max; - - - - skip_arith: - - /********************** - * INTERESTING VALUES * - **********************/ - - stage_name = "interest 8/8"; - stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); - - - - stage_val_type = STAGE_VAL_LE; - - orig_hit_cnt = new_hit_cnt; - - /* Setting 8-bit integers. */ - - for (i = 0; i < len; ++i) { - - u8 orig = out_buf[i]; - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)]) { - stage_max -= sizeof(interesting_8); - continue; - } - - stage_cur_byte = i; - - for (j = 0; j < sizeof(interesting_8); ++j) { - - /* Skip if the value could be a product of bitflips or arithmetics. */ - - if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || - could_be_arith(orig, (u8)interesting_8[j], 1)) { - --stage_max; - continue; - } - - stage_cur_val = interesting_8[j]; - out_buf[i] = interesting_8[j]; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - out_buf[i] = orig; - ++stage_cur; - - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST8] += stage_max; - - - - /* Setting 16-bit integers, both endians. */ - - if (no_arith || len < 2) goto skip_interest; - - stage_name = "interest 16/8"; - stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 1; ++i) { - - u16 orig = *(u16*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= sizeof(interesting_16); - continue; - } - - stage_cur_byte = i; - - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - - stage_cur_val = interesting_16[j]; - - /* Skip if this could be a product of a bitflip, arithmetics, - or single-byte interesting value insertion. */ - - if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && - !could_be_arith(orig, (u16)interesting_16[j], 2) && - !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { - - stage_val_type = STAGE_VAL_LE; - - *(u16*)(out_buf + i) = interesting_16[j]; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && - !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && - !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && - !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { - - stage_val_type = STAGE_VAL_BE; - - *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - } - - *(u16*)(out_buf + i) = orig; - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST16] += stage_max; - - - - - if (len < 4) goto skip_interest; - - /* Setting 32-bit integers, both endians. */ - - stage_name = "interest 32/8"; - stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); - - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len - 3; ++i) { - - u32 orig = *(u32*)(out_buf + i); - - /* Let's consult the effector map... */ - - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= sizeof(interesting_32) >> 1; - continue; - } - - stage_cur_byte = i; - - for (j = 0; j < sizeof(interesting_32) / 4; ++j) { - - stage_cur_val = interesting_32[j]; - - /* Skip if this could be a product of a bitflip, arithmetics, - or word interesting value insertion. */ - - if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && - !could_be_arith(orig, interesting_32[j], 4) && - !could_be_interest(orig, interesting_32[j], 4, 0)) { - - stage_val_type = STAGE_VAL_LE; - - *(u32*)(out_buf + i) = interesting_32[j]; - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && - !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && - !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && - !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { - - stage_val_type = STAGE_VAL_BE; - - *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; - - } else --stage_max; - - } - - *(u32*)(out_buf + i) = orig; - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST32] += stage_max; - - - - skip_interest: - - /******************** - * DICTIONARY STUFF * - ********************/ - - if (!extras_cnt) goto skip_user_extras; - - /* Overwrite with user-supplied extras. */ - - stage_name = "user extras (over)"; - stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; - - - stage_val_type = STAGE_VAL_NONE; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len; ++i) { - - u32 last_len = 0; - - stage_cur_byte = i; - - /* Extras are sorted by size, from smallest to largest. This means - that we don't have to worry about restoring the buffer in - between writes at a particular offset determined by the outer - loop. */ - - for (j = 0; j < extras_cnt; ++j) { - - /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token - is redundant, or if its entire span has no bytes set in the effector - map. */ - - if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || - extras[j].len > len - i || - !memcmp(extras[j].data, out_buf + i, extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { - - --stage_max; - continue; - - } - - last_len = extras[j].len; - memcpy(out_buf + i, extras[j].data, last_len); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - ++stage_cur; - - } - - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UO] += stage_max; - - /* Insertion of user-supplied extras. */ - - stage_name = "user extras (insert)"; - stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; - - - - - orig_hit_cnt = new_hit_cnt; - - ex_tmp = ck_alloc(len + MAX_DICT_FILE); - - for (i = 0; i <= len; ++i) { - - stage_cur_byte = i; - - for (j = 0; j < extras_cnt; ++j) { - - if (len + extras[j].len > MAX_FILE) { - --stage_max; - continue; - } - - /* Insert token */ - memcpy(ex_tmp + i, extras[j].data, extras[j].len); - - /* Copy tail */ - memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); - - if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - ck_free(ex_tmp); - goto abandon_entry; - } - - ++stage_cur; - - } - - /* Copy head */ - ex_tmp[i] = out_buf[i]; - - } - - ck_free(ex_tmp); - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UI] += stage_max; - - skip_user_extras: - - if (!a_extras_cnt) goto skip_extras; - - stage_name = "auto extras (over)"; - stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; - - - stage_val_type = STAGE_VAL_NONE; - - orig_hit_cnt = new_hit_cnt; - - for (i = 0; i < len; ++i) { - - u32 last_len = 0; - - stage_cur_byte = i; - - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { - - /* See the comment in the earlier code; extras are sorted by size. */ - - if (a_extras[j].len > len - i || - !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { - - --stage_max; - continue; - - } - - last_len = a_extras[j].len; - memcpy(out_buf + i, a_extras[j].data, last_len); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - ++stage_cur; - - } - - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_AO] += stage_max; - - skip_extras: - - /* If we made this to here without jumping to havoc_stage or abandon_entry, - we're properly done with deterministic steps and can mark it as such - in the .state/ directory. */ - - if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - - /**************** - * RANDOM HAVOC * - ****************/ - - havoc_stage: - pacemaker_fuzzing: - - - stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - - stage_name = "MOpt-havoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; - - } else { - - static u8 tmp[32]; - - perf_score = orig_perf; - - sprintf(tmp, "MOpt-core-splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_core_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - - } - - s32 temp_len_puppet; - cur_ms_lv = get_cur_time(); - - //for (; swarm_now < swarm_num; ++swarm_now) - { - if (key_puppet == 1) { - if (unlikely(orig_hit_cnt_puppet == 0)) { - orig_hit_cnt_puppet = queued_paths + unique_crashes; - last_limit_time_start = get_cur_time(); - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - } - } - { -#ifndef IGNORE_FINDS - havoc_stage_puppet: -#endif - - stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - stage_name = "MOpt core avoc"; - stage_short = "MOpt_core_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; - } else { - static u8 tmp[32]; - perf_score = orig_perf; - sprintf(tmp, "MOpt core splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_core_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - } - - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - temp_len = len; - orig_hit_cnt = queued_paths + unique_crashes; - havoc_queued = queued_paths; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - stage_cur_val = use_stacking; - - for (i = 0; i < operator_num; ++i) { - core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i]; - } - - for (i = 0; i < use_stacking; ++i) { - - switch (select_algorithm()) { - - case 0: - /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, UR(temp_len << 3)); - core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1; - break; - - - case 1: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1; - break; - - case 2: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - FLIP_BIT(out_buf, temp_len_puppet + 2); - FLIP_BIT(out_buf, temp_len_puppet + 3); - core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1; - break; - - case 3: - if (temp_len < 4) break; - out_buf[UR(temp_len)] ^= 0xFF; - core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1; - break; - - case 4: - if (temp_len < 8) break; - *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; - core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1; - break; - - case 5: - if (temp_len < 8) break; - *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; - core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1; - break; - - case 6: - out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); - out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); - core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1; - break; - - case 7: - /* Randomly subtract from word, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } - /* Randomly add to word, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - } - core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1; - break; - - - case 8: - /* Randomly subtract from dword, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - } - /* Randomly add to dword, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - } - core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1; - break; - - - case 9: - /* Set byte to interesting value. */ - if (temp_len < 4) break; - out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; - core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1; - break; - - case 10: - /* Set word to interesting value, randomly choosing endian. */ - if (temp_len < 8) break; - if (UR(2)) { - *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; - } else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); - } - core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1; - break; - - - case 11: - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 8) break; - - if (UR(2)) { - *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; - } else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); - } - core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1; - break; - - - case 12: - - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ - - out_buf[UR(temp_len)] ^= 1 + UR(255); - core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1; - break; - - - case 13: { - - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ - - u32 del_from, del_len; - - if (temp_len < 2) break; - - /* Don't delete too much. */ - - del_len = choose_block_len(temp_len - 1); - - del_from = UR(temp_len - del_len + 1); - - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); - - temp_len -= del_len; - core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1; - break; - - } - - case 14: - - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ - - u8 actually_clone = UR(4); - u32 clone_from, clone_to, clone_len; - u8* new_buf; - - if (actually_clone) { - - clone_len = choose_block_len(temp_len); - clone_from = UR(temp_len - clone_len + 1); - - } else { - - clone_len = choose_block_len(HAVOC_BLK_XL); - clone_from = 0; - - } - - clone_to = UR(temp_len); - - new_buf = ck_alloc_nozero(temp_len + clone_len); - - /* Head */ - - memcpy(new_buf, out_buf, clone_to); - - /* Inserted part */ - - if (actually_clone) - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - else - memset(new_buf + clone_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); - - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); - - ck_free(out_buf); - out_buf = new_buf; - temp_len += clone_len; - core_operator_cycles_puppet_v2[STAGE_Clone75] += 1; - } - - break; - - case 15: { - - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ - - u32 copy_from, copy_to, copy_len; - - if (temp_len < 2) break; - - copy_len = choose_block_len(temp_len - 1); - - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); - - if (UR(4)) { - - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - - } - else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); - core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1; - break; - - } - - - } - - } - - tmp_core_time += 1; - - u64 temp_total_found = queued_paths + unique_crashes; - - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry_puppet; - - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ - - if (temp_len < len) out_buf = ck_realloc(out_buf, len); - temp_len = len; - memcpy(out_buf, in_buf, len); - - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ - - if (queued_paths != havoc_queued) { - - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } - - havoc_queued = queued_paths; - - } - - if (unlikely(queued_paths + unique_crashes > temp_total_found)) - { - u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; - total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; ++i) - { - if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i]) - core_operator_finds_puppet_v2[i] += temp_temp_puppet; - } - } - - } - - new_hit_cnt = queued_paths + unique_crashes; - - -#ifndef IGNORE_FINDS - - /************ - * SPLICING * - ************/ - - - retry_splicing_puppet: - - - - if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && - queued_paths > 1 && queue_cur->len > 1) { - - struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; - - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ - - if (in_buf != orig_in) { - ck_free(in_buf); - in_buf = orig_in; - len = queue_cur->len; - } - - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ - - do { tid = UR(queued_paths); } while (tid == current_entry); - - splicing_with = tid; - target = queue; - - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; - - /* Make sure that the target has a reasonable length. */ - - while (target && (target->len < 2 || target == queue_cur)) { - target = target->next; - ++splicing_with; - } - - if (!target) goto retry_splicing_puppet; - - /* Read the testcase into a new buffer. */ - - fd = open(target->fname, O_RDONLY); - - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - - new_buf = ck_alloc_nozero(target->len); - - ck_read(fd, new_buf, target->len, target->fname); - - close(fd); - - /* Find a suitable splicin g location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ - - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); - - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ck_free(new_buf); - goto retry_splicing_puppet; - } - - /* Split somewhere between the first and last differing byte. */ - - split_at = f_diff + UR(l_diff - f_diff); - - /* Do the thing. */ - - len = target->len; - memcpy(new_buf, in_buf, split_at); - in_buf = new_buf; - ck_free(out_buf); - out_buf = ck_alloc_nozero(len); - memcpy(out_buf, in_buf, len); - - goto havoc_stage_puppet; - - } - -#endif /* !IGNORE_FINDS */ - - ret_val = 0; - abandon_entry: - abandon_entry_puppet: - - if (splice_cycle >= SPLICE_CYCLES_puppet) - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - - - splicing_with = -1; - - - munmap(orig_in, queue_cur->len); - - if (in_buf != orig_in) ck_free(in_buf); - ck_free(out_buf); - ck_free(eff_map); - - - if (key_puppet == 1) - { - if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) - { - key_puppet = 0; - cur_ms_lv = get_cur_time(); - new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt_puppet = 0; - last_limit_time_start = 0; - } - } - - - if (unlikely(tmp_core_time > period_core)) - { - total_pacemaker_time += tmp_core_time; - tmp_core_time = 0; - temp_puppet_find = total_puppet_find; - new_hit_cnt = queued_paths + unique_crashes; - - u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) - { - - core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i]; - core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i]; - temp_stage_finds_puppet += core_operator_finds_puppet[i]; - } - - key_module = 2; - - old_hit_count = new_hit_cnt; - } - return ret_val; - } - } - - -#undef FLIP_BIT - -} - - -void pso_updating(void) { - - g_now += 1; - if (g_now > g_max) g_now = 0; - w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end; - int tmp_swarm, i, j; - u64 temp_operator_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) - { - operator_finds_puppet[i] = core_operator_finds_puppet[i]; - - for (j = 0; j < swarm_num; ++j) - { - operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i]; - } - temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i]; - } - - for (i = 0; i < operator_num; ++i) - { - if (operator_finds_puppet[i]) - G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet)); - } - - for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) - { - double x_temp = 0.0; - for (i = 0; i < operator_num; ++i) - { - probability_now[tmp_swarm][i] = 0.0; - v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); - x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; - if (x_now[tmp_swarm][i] > v_max) - x_now[tmp_swarm][i] = v_max; - else if (x_now[tmp_swarm][i] < v_min) - x_now[tmp_swarm][i] = v_min; - x_temp += x_now[tmp_swarm][i]; - } - - for (i = 0; i < operator_num; ++i) - { - x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; - if (likely(i != 0)) - probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; - else - probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; - } - if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) FATAL("ERROR probability"); - } - swarm_now = 0; - key_module = 0; -} - - -/* larger change for MOpt implementation: the original fuzz_one was renamed - to fuzz_one_original. All documentation references to fuzz_one therefore - mean fuzz_one_original */ -static u8 fuzz_one(char** argv) { - int key_val_lv = 0; - if (limit_time_sig == 0) { - key_val_lv = fuzz_one_original(argv); - } else { - if (key_module == 0) - key_val_lv = pilot_fuzzing(argv); - else if (key_module == 1) - key_val_lv = core_fuzzing(argv); - else if (key_module == 2) - pso_updating(); - } - - return key_val_lv; -} - - -/* Grab interesting test cases from other fuzzers. */ - -static void sync_fuzzers(char** argv) { - - DIR* sd; - struct dirent* sd_ent; - u32 sync_cnt = 0; - - sd = opendir(sync_dir); - if (!sd) PFATAL("Unable to open '%s'", sync_dir); - - stage_max = stage_cur = 0; - cur_depth = 0; - - /* Look at the entries created for every other fuzzer in the sync directory. */ - - while ((sd_ent = readdir(sd))) { - - static u8 stage_tmp[128]; - - DIR* qd; - struct dirent* qd_ent; - u8 *qd_path, *qd_synced_path; - u32 min_accept = 0, next_min_accept; - - s32 id_fd; - - /* Skip dot files and our own output directory. */ - - if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue; - - /* Skip anything that doesn't have a queue/ subdirectory. */ - - qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name); - - if (!(qd = opendir(qd_path))) { - ck_free(qd_path); - continue; - } - - /* Retrieve the ID of the last seen test case. */ - - qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name); - - id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600); - - if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path); - - if (read(id_fd, &min_accept, sizeof(u32)) > 0) - lseek(id_fd, 0, SEEK_SET); - - next_min_accept = min_accept; - - /* Show stats */ - - sprintf(stage_tmp, "sync %u", ++sync_cnt); - stage_name = stage_tmp; - stage_cur = 0; - stage_max = 0; - - /* For every file queued by this fuzzer, parse ID and see if we have looked at - it before; exec a test case if not. */ - - while ((qd_ent = readdir(qd))) { - - u8* path; - s32 fd; - struct stat st; - - if (qd_ent->d_name[0] == '.' || - sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || - syncing_case < min_accept) continue; - - /* OK, sounds like a new one. Let's give it a try. */ - - if (syncing_case >= next_min_accept) - next_min_accept = syncing_case + 1; - - path = alloc_printf("%s/%s", qd_path, qd_ent->d_name); - - /* Allow this to fail in case the other fuzzer is resuming or so... */ - - fd = open(path, O_RDONLY); - - if (fd < 0) { - ck_free(path); - continue; - } - - if (fstat(fd, &st)) PFATAL("fstat() failed"); - - /* Ignore zero-sized or oversized files. */ - - if (st.st_size && st.st_size <= MAX_FILE) { - - u8 fault; - u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); - - if (mem == MAP_FAILED) PFATAL("Unable to mmap '%s'", path); - - /* See what happens. We rely on save_if_interesting() to catch major - errors and save the test case. */ - - write_to_testcase(mem, st.st_size); - - fault = run_target(argv, exec_tmout); - - if (stop_soon) return; - - syncing_party = sd_ent->d_name; - queued_imported += save_if_interesting(argv, mem, st.st_size, fault); - syncing_party = 0; - - munmap(mem, st.st_size); - - if (!(stage_cur++ % stats_update_freq)) show_stats(); - - } - - ck_free(path); - close(fd); - - } - - ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path); - - close(id_fd); - closedir(qd); - ck_free(qd_path); - ck_free(qd_synced_path); - - } - - closedir(sd); - -} - - -/* Handle stop signal (Ctrl-C, etc). */ - -static void handle_stop_sig(int sig) { - - stop_soon = 1; - - if (child_pid > 0) kill(child_pid, SIGKILL); - if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL); - -} - - -/* Handle skip request (SIGUSR1). */ - -static void handle_skipreq(int sig) { - - skip_requested = 1; - -} - - -/* Do a PATH search and find target binary to see that it exists and - isn't a shell script - a common and painful mistake. We also check for - a valid ELF header and for evidence of AFL instrumentation. */ - -void check_binary(u8* fname) { - - u8* env_path = 0; - struct stat st; - - s32 fd; - u8* f_data; - u32 f_len = 0; - - ACTF("Validating target binary..."); - - if (strchr(fname, '/') || !(env_path = getenv("PATH"))) { - - target_path = ck_strdup(fname); - if (stat(target_path, &st) || !S_ISREG(st.st_mode) || - !(st.st_mode & 0111) || (f_len = st.st_size) < 4) - FATAL("Program '%s' not found or not executable", fname); - - } else { - - while (env_path) { - - u8 *cur_elem, *delim = strchr(env_path, ':'); - - if (delim) { - - cur_elem = ck_alloc(delim - env_path + 1); - memcpy(cur_elem, env_path, delim - env_path); - ++delim; - - } else cur_elem = ck_strdup(env_path); - - env_path = delim; - - if (cur_elem[0]) - target_path = alloc_printf("%s/%s", cur_elem, fname); - else - target_path = ck_strdup(fname); - - ck_free(cur_elem); - - if (!stat(target_path, &st) && S_ISREG(st.st_mode) && - (st.st_mode & 0111) && (f_len = st.st_size) >= 4) break; - - ck_free(target_path); - target_path = 0; - - } - - if (!target_path) FATAL("Program '%s' not found or not executable", fname); - - } - - if (getenv("AFL_SKIP_BIN_CHECK")) return; - - /* Check for blatant user errors. */ - - if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) || - (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/'))) - FATAL("Please don't keep binaries in /tmp or /var/tmp"); - - fd = open(target_path, O_RDONLY); - - if (fd < 0) PFATAL("Unable to open '%s'", target_path); - - f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0); - - if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", target_path); - - close(fd); - - if (f_data[0] == '#' && f_data[1] == '!') { - - SAYF("\n" cLRD "[-] " cRST - "Oops, the target binary looks like a shell script. Some build systems will\n" - " sometimes generate shell stubs for dynamically linked programs; try static\n" - " library mode (./configure --disable-shared) if that's the case.\n\n" - - " Another possible cause is that you are actually trying to use a shell\n" - " wrapper around the fuzzed component. Invoking shell can slow down the\n" - " fuzzing process by a factor of 20x or more; it's best to write the wrapper\n" - " in a compiled language instead.\n"); - - FATAL("Program '%s' is a shell script", target_path); - - } - -#ifndef __APPLE__ - - if (f_data[0] != 0x7f || memcmp(f_data + 1, "ELF", 3)) - FATAL("Program '%s' is not an ELF binary", target_path); - -#else - -#if !defined(__arm__) && !defined(__arm64__) - if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED) - FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path); -#endif - -#endif /* ^!__APPLE__ */ - - if (!qemu_mode && !unicorn_mode && !dumb_mode && - !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { - - SAYF("\n" cLRD "[-] " cRST - "Looks like the target binary is not instrumented! The fuzzer depends on\n" - " compile-time instrumentation to isolate interesting test cases while\n" - " mutating the input data. For more information, and for tips on how to\n" - " instrument binaries, please see %s/README.\n\n" - - " When source code is not available, you may be able to leverage QEMU\n" - " mode support. Consult the README for tips on how to enable this.\n" - - " (It is also possible to use afl-fuzz as a traditional, \"dumb\" fuzzer.\n" - " For that, you can use the -n option - but expect much worse results.)\n", - doc_path); - - FATAL("No instrumentation detected"); - - } - - if ((qemu_mode || unicorn_mode) && - memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { - - SAYF("\n" cLRD "[-] " cRST - "This program appears to be instrumented with afl-gcc, but is being run in\n" - " QEMU or Unicorn mode (-Q or -U). This is probably not what you want -\n" - " this setup will be slow and offer no practical benefits.\n"); - - FATAL("Instrumentation found in -Q or -U mode"); - - } - - if (memmem(f_data, f_len, "libasan.so", 10) || - memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1; - - /* Detect persistent & deferred init signatures in the binary. */ - - if (memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) { - - OKF(cPIN "Persistent mode binary detected."); - setenv(PERSIST_ENV_VAR, "1", 1); - persistent_mode = 1; - - } else if (getenv("AFL_PERSISTENT")) { - - WARNF("AFL_PERSISTENT is no longer supported and may misbehave!"); - - } - - if (memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) { - - OKF(cPIN "Deferred forkserver binary detected."); - setenv(DEFER_ENV_VAR, "1", 1); - deferred_mode = 1; - - } else if (getenv("AFL_DEFER_FORKSRV")) { - - WARNF("AFL_DEFER_FORKSRV is no longer supported and may misbehave!"); - - } - - if (munmap(f_data, f_len)) PFATAL("unmap() failed"); - -} - - -/* Trim and possibly create a banner for the run. */ - -static void fix_up_banner(u8* name) { - - if (!use_banner) { - - if (sync_id) { - - use_banner = sync_id; - - } else { - - u8* trim = strrchr(name, '/'); - if (!trim) use_banner = name; else use_banner = trim + 1; - - } - - } - - if (strlen(use_banner) > 32) { - - u8* tmp = ck_alloc(36); - sprintf(tmp, "%.32s...", use_banner); - use_banner = tmp; - - } - -} - - -/* Check if we're on TTY. */ - -static void check_if_tty(void) { - - struct winsize ws; - - if (getenv("AFL_NO_UI")) { - OKF("Disabling the UI because AFL_NO_UI is set."); - not_on_tty = 1; - return; - } - - if (ioctl(1, TIOCGWINSZ, &ws)) { - - if (errno == ENOTTY) { - OKF("Looks like we're not running on a tty, so I'll be a bit less verbose."); - not_on_tty = 1; - } - - return; - } - -} - - -/* Check terminal dimensions after resize. */ - -static void check_term_size(void) { - - struct winsize ws; - - term_too_small = 0; - - if (ioctl(1, TIOCGWINSZ, &ws)) return; - - if (ws.ws_row == 0 || ws.ws_col == 0) return; - if (ws.ws_row < 24 || ws.ws_col < 79) term_too_small = 1; - -} - - - /* Display usage hints. */ static void usage(u8* argv0) { @@ -9116,622 +80,9 @@ static void usage(u8* argv0) { } +#ifndef AFL_LIB -/* Prepare output directories and fds. */ - -void setup_dirs_fds(void) { - - u8* tmp; - s32 fd; - - ACTF("Setting up output directories..."); - - if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST) - PFATAL("Unable to create '%s'", sync_dir); - - if (mkdir(out_dir, 0700)) { - - if (errno != EEXIST) PFATAL("Unable to create '%s'", out_dir); - - maybe_delete_out_dir(); - - } else { - - if (in_place_resume) - FATAL("Resume attempted but old output directory not found"); - - out_dir_fd = open(out_dir, O_RDONLY); - -#ifndef __sun - - if (out_dir_fd < 0 || flock(out_dir_fd, LOCK_EX | LOCK_NB)) - PFATAL("Unable to flock() output directory."); - -#endif /* !__sun */ - - } - - /* Queue directory for any starting & discovered paths. */ - - tmp = alloc_printf("%s/queue", out_dir); - if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - /* Top-level directory for queue metadata used for session - resume and related tasks. */ - - tmp = alloc_printf("%s/queue/.state/", out_dir); - if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - /* Directory for flagging queue entries that went through - deterministic fuzzing in the past. */ - - tmp = alloc_printf("%s/queue/.state/deterministic_done/", out_dir); - if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - /* Directory with the auto-selected dictionary entries. */ - - tmp = alloc_printf("%s/queue/.state/auto_extras/", out_dir); - if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - /* The set of paths currently deemed redundant. */ - - tmp = alloc_printf("%s/queue/.state/redundant_edges/", out_dir); - if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - /* The set of paths showing variable behavior. */ - - tmp = alloc_printf("%s/queue/.state/variable_behavior/", out_dir); - if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - /* Sync directory for keeping track of cooperating fuzzers. */ - - if (sync_id) { - - tmp = alloc_printf("%s/.synced/", out_dir); - - if (mkdir(tmp, 0700) && (!in_place_resume || errno != EEXIST)) - PFATAL("Unable to create '%s'", tmp); - - ck_free(tmp); - - } - - /* All recorded crashes. */ - - tmp = alloc_printf("%s/crashes", out_dir); - if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - /* All recorded hangs. */ - - tmp = alloc_printf("%s/hangs", out_dir); - if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - /* Generally useful file descriptors. */ - - dev_null_fd = open("/dev/null", O_RDWR); - if (dev_null_fd < 0) PFATAL("Unable to open /dev/null"); - -#ifndef HAVE_ARC4RANDOM - dev_urandom_fd = open("/dev/urandom", O_RDONLY); - if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom"); -#endif - - /* Gnuplot output file. */ - - tmp = alloc_printf("%s/plot_data", out_dir); - fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600); - if (fd < 0) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - plot_file = fdopen(fd, "w"); - if (!plot_file) PFATAL("fdopen() failed"); - - fprintf(plot_file, "# unix_time, cycles_done, cur_path, paths_total, " - "pending_total, pending_favs, map_size, unique_crashes, " - "unique_hangs, max_depth, execs_per_sec\n"); - /* ignore errors */ - -} - -static void setup_cmdline_file(char** argv) { - u8* tmp; - s32 fd; - u32 i = 0; - - FILE* cmdline_file = NULL; - - /* Store the command line to reproduce our findings */ - tmp = alloc_printf("%s/cmdline", out_dir); - fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600); - if (fd < 0) PFATAL("Unable to create '%s'", tmp); - ck_free(tmp); - - cmdline_file = fdopen(fd, "w"); - if (!cmdline_file) PFATAL("fdopen() failed"); - - while (argv[i]) { - fprintf(cmdline_file, "%s\n", argv[i]); - ++i; - } - - fclose(cmdline_file); -} - - -/* Setup the output file for fuzzed data, if not using -f. */ - -void setup_stdio_file(void) { - - u8* fn; - if (file_extension) { - fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); - } else { - fn = alloc_printf("%s/.cur_input", out_dir); - } - - unlink(fn); /* Ignore errors */ - - out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600); - - if (out_fd < 0) PFATAL("Unable to create '%s'", fn); - - ck_free(fn); - -} - - -/* Make sure that core dumps don't go to a program. */ - -static void check_crash_handling(void) { - -#ifdef __APPLE__ - - /* Yuck! There appears to be no simple C API to query for the state of - loaded daemons on MacOS X, and I'm a bit hesitant to do something - more sophisticated, such as disabling crash reporting via Mach ports, - until I get a box to test the code. So, for now, we check for crash - reporting the awful way. */ - - if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return; - - SAYF("\n" cLRD "[-] " cRST - "Whoops, your system is configured to forward crash notifications to an\n" - " external crash reporting utility. This will cause issues due to the\n" - " extended delay between the fuzzed binary malfunctioning and this fact\n" - " being relayed to the fuzzer via the standard waitpid() API.\n\n" - " To avoid having crashes misinterpreted as timeouts, please run the\n" - " following commands:\n\n" - - " SL=/System/Library; PL=com.apple.ReportCrash\n" - " launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n" - " sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n"); - - if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) - FATAL("Crash reporter detected"); - -#else - - /* This is Linux specific, but I don't think there's anything equivalent on - *BSD, so we can just let it slide for now. */ - - s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY); - u8 fchar; - - if (fd < 0) return; - - ACTF("Checking core_pattern..."); - - if (read(fd, &fchar, 1) == 1 && fchar == '|') { - - SAYF("\n" cLRD "[-] " cRST - "Hmm, your system is configured to send core dump notifications to an\n" - " external utility. This will cause issues: there will be an extended delay\n" - " between stumbling upon a crash and having this information relayed to the\n" - " fuzzer via the standard waitpid() API.\n\n" - - " To avoid having crashes misinterpreted as timeouts, please log in as root\n" - " and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n" - - " echo core >/proc/sys/kernel/core_pattern\n"); - - if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) - FATAL("Pipe at the beginning of 'core_pattern'"); - - } - - close(fd); - -#endif /* ^__APPLE__ */ - -} - - -/* Check CPU governor. */ - -static void check_cpu_governor(void) { -#ifdef __linux__ - FILE* f; - u8 tmp[128]; - u64 min = 0, max = 0; - - if (getenv("AFL_SKIP_CPUFREQ")) return; - - if (cpu_aff > 0) - snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff, "/cpufreq/scaling_governor"); - else - snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"); - f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r"); - if (!f) { - if (cpu_aff > 0) - snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpufreq/policy", cpu_aff, "/scaling_governor"); - else - snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor"); - f = fopen(tmp, "r"); - } - if (!f) { - WARNF("Could not check CPU scaling governor"); - return; - } - - ACTF("Checking CPU scaling governor..."); - - if (!fgets(tmp, 128, f)) PFATAL("fgets() failed"); - - fclose(f); - - if (!strncmp(tmp, "perf", 4)) return; - - f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r"); - - if (f) { - if (fscanf(f, "%llu", &min) != 1) min = 0; - fclose(f); - } - - f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r"); - - if (f) { - if (fscanf(f, "%llu", &max) != 1) max = 0; - fclose(f); - } - - if (min == max) return; - - SAYF("\n" cLRD "[-] " cRST - "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n" - " between %llu and %llu MHz. Unfortunately, the scaling algorithm in the\n" - " kernel is imperfect and can miss the short-lived processes spawned by\n" - " afl-fuzz. To keep things moving, run these commands as root:\n\n" - - " cd /sys/devices/system/cpu\n" - " echo performance | tee cpu*/cpufreq/scaling_governor\n\n" - - " You can later go back to the original state by replacing 'performance' with\n" - " 'ondemand'. If you don't want to change the settings, set AFL_SKIP_CPUFREQ\n" - " to make afl-fuzz skip this check - but expect some performance drop.\n", - min / 1024, max / 1024); - - FATAL("Suboptimal CPU scaling governor"); -#endif -} - - -/* Count the number of logical CPU cores. */ - -static void get_core_count(void) { - -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) - - size_t s = sizeof(cpu_core_count); - - /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */ - -#ifdef __APPLE__ - - if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) - return; - -#else - - int s_name[2] = { CTL_HW, HW_NCPU }; - - if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return; - -#endif /* ^__APPLE__ */ - -#else - -#ifdef HAVE_AFFINITY - - cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN); - -#else - - FILE* f = fopen("/proc/stat", "r"); - u8 tmp[1024]; - - if (!f) return; - - while (fgets(tmp, sizeof(tmp), f)) - if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) ++cpu_core_count; - - fclose(f); - -#endif /* ^HAVE_AFFINITY */ - -#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ - - if (cpu_core_count > 0) { - - u32 cur_runnable = 0; - - cur_runnable = (u32)get_runnable_processes(); - -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) - - /* Add ourselves, since the 1-minute average doesn't include that yet. */ - - ++cur_runnable; - -#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ - - OKF("You have %d CPU core%s and %u runnable tasks (utilization: %0.0f%%).", - cpu_core_count, cpu_core_count > 1 ? "s" : "", - cur_runnable, cur_runnable * 100.0 / cpu_core_count); - - if (cpu_core_count > 1) { - - if (cur_runnable > cpu_core_count * 1.5) { - - WARNF("System under apparent load, performance may be spotty."); - - } else if (cur_runnable + 1 <= cpu_core_count) { - - OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path); - - } - - } - - } else { - - cpu_core_count = 0; - WARNF("Unable to figure out the number of CPU cores."); - - } - -} - - -/* Validate and fix up out_dir and sync_dir when using -S. */ - -static void fix_up_sync(void) { - - u8* x = sync_id; - - if (dumb_mode) - FATAL("-S / -M and -n are mutually exclusive"); - - if (skip_deterministic) { - - if (force_deterministic) - FATAL("use -S instead of -M -d"); - //else - // FATAL("-S already implies -d"); - - } - - while (*x) { - - if (!isalnum(*x) && *x != '_' && *x != '-') - FATAL("Non-alphanumeric fuzzer ID specified via -S or -M"); - - ++x; - - } - - if (strlen(sync_id) > 32) FATAL("Fuzzer ID too long"); - - x = alloc_printf("%s/%s", out_dir, sync_id); - - sync_dir = out_dir; - out_dir = x; - - if (!force_deterministic) { - skip_deterministic = 1; - use_splicing = 1; - } - -} - - -/* Handle screen resize (SIGWINCH). */ - -static void handle_resize(int sig) { - clear_screen = 1; -} - - -/* Check ASAN options. */ - -static void check_asan_opts(void) { - u8* x = getenv("ASAN_OPTIONS"); - - if (x) { - - if (!strstr(x, "abort_on_error=1")) - FATAL("Custom ASAN_OPTIONS set without abort_on_error=1 - please fix!"); - - if (!strstr(x, "symbolize=0")) - FATAL("Custom ASAN_OPTIONS set without symbolize=0 - please fix!"); - - } - - x = getenv("MSAN_OPTIONS"); - - if (x) { - - if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR))) - FATAL("Custom MSAN_OPTIONS set without exit_code=" - STRINGIFY(MSAN_ERROR) " - please fix!"); - - if (!strstr(x, "symbolize=0")) - FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!"); - - } - -} - - -/* Set up signal handlers. More complicated that needs to be, because libc on - Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call - siginterrupt(), and does other stupid things. */ - -void setup_signal_handlers(void) { - - struct sigaction sa; - - sa.sa_handler = NULL; - sa.sa_flags = SA_RESTART; - sa.sa_sigaction = NULL; - - sigemptyset(&sa.sa_mask); - - /* Various ways of saying "stop". */ - - sa.sa_handler = handle_stop_sig; - sigaction(SIGHUP, &sa, NULL); - sigaction(SIGINT, &sa, NULL); - sigaction(SIGTERM, &sa, NULL); - - /* Exec timeout notifications. */ - - sa.sa_handler = handle_timeout; - sigaction(SIGALRM, &sa, NULL); - - /* Window resize */ - - sa.sa_handler = handle_resize; - sigaction(SIGWINCH, &sa, NULL); - - /* SIGUSR1: skip entry */ - - sa.sa_handler = handle_skipreq; - sigaction(SIGUSR1, &sa, NULL); - - /* Things we don't care about. */ - - sa.sa_handler = SIG_IGN; - sigaction(SIGTSTP, &sa, NULL); - sigaction(SIGPIPE, &sa, NULL); - -} - - -/* Rewrite argv for QEMU. */ - -static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { - - char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); - u8 *tmp, *cp, *rsl, *own_copy; - - memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); - - new_argv[2] = target_path; - new_argv[1] = "--"; - - /* Now we need to actually find the QEMU binary to put in argv[0]. */ - - tmp = getenv("AFL_PATH"); - - if (tmp) { - - cp = alloc_printf("%s/afl-qemu-trace", tmp); - - if (access(cp, X_OK)) - FATAL("Unable to find '%s'", tmp); - - target_path = new_argv[0] = cp; - return new_argv; - - } - - own_copy = ck_strdup(own_loc); - rsl = strrchr(own_copy, '/'); - - if (rsl) { - - *rsl = 0; - - cp = alloc_printf("%s/afl-qemu-trace", own_copy); - ck_free(own_copy); - - if (!access(cp, X_OK)) { - - target_path = new_argv[0] = cp; - return new_argv; - - } - - } else ck_free(own_copy); - - if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { - - target_path = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace"); - return new_argv; - - } - - SAYF("\n" cLRD "[-] " cRST - "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be built\n" - " separately by following the instructions in qemu_mode/README.qemu. If you\n" - " already have the binary installed, you may need to specify AFL_PATH in the\n" - " environment.\n\n" - - " Of course, even without QEMU, afl-fuzz can still work with binaries that are\n" - " instrumented at compile time with afl-gcc. It is also possible to use it as a\n" - " traditional \"dumb\" fuzzer by specifying '-n' in the command line.\n"); - - FATAL("Failed to locate 'afl-qemu-trace'."); - -} - -/* Make a copy of the current command line. */ - -static void save_cmdline(u32 argc, char** argv) { - - u32 len = 1, i; - u8* buf; - - for (i = 0; i < argc; ++i) - len += strlen(argv[i]) + 1; - - buf = orig_cmdline = ck_alloc(len); - - for (i = 0; i < argc; ++i) { - - u32 l = strlen(argv[i]); - - memcpy(buf, argv[i], l); - buf += l; - - if (i != argc - 1) *(buf++) = ' '; - - } - - *buf = 0; - -} - -int stricmp(char const *a, char const *b) { +static int stricmp(char const *a, char const *b) { for (;; ++a, ++b) { int d; d = tolower(*a) - tolower(*b); @@ -9740,8 +91,6 @@ int stricmp(char const *a, char const *b) { } } -#ifndef AFL_LIB - /* Main entry point */ int main(int argc, char** argv) { From b24639d0113e15933e749ea0f96abe3f25a134a0 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Mon, 2 Sep 2019 18:49:43 +0200 Subject: [PATCH 68/83] run code formatter --- Makefile | 4 +- include/afl-as.h | 95 +- include/afl-fuzz.h | 499 +- include/alloc-inl.h | 225 +- include/android-ashmem.h | 87 +- include/common.h | 1 + include/config.h | 214 +- include/debug.h | 180 +- include/forkserver.h | 18 +- include/hash.h | 25 +- include/sharedmem.h | 1 + include/types.h | 43 +- libdislocator/libdislocator.so.c | 63 +- libtokencap/libtokencap.so.c | 66 +- llvm_mode/LLVMInsTrim.so.cc | 552 +- llvm_mode/MarkNodes.cc | 428 +- llvm_mode/MarkNodes.h | 13 +- llvm_mode/afl-clang-fast.c | 180 +- llvm_mode/afl-llvm-pass.so.cc | 286 +- llvm_mode/afl-llvm-rt.o.c | 57 +- llvm_mode/compare-transform-pass.so.cc | 288 +- llvm_mode/split-compares-pass.so.cc | 342 +- llvm_mode/split-switches-pass.so.cc | 290 +- qemu_mode/libcompcov/compcovtest.cc | 32 +- qemu_mode/libcompcov/libcompcov.so.c | 209 +- qemu_mode/libcompcov/pmparser.h | 434 +- qemu_mode/patches/afl-qemu-common.h | 20 +- qemu_mode/patches/afl-qemu-cpu-inl.h | 164 +- .../patches/afl-qemu-cpu-translate-inl.h | 108 +- qemu_mode/patches/afl-qemu-tcg-inl.h | 534 +- qemu_mode/patches/afl-qemu-translate-inl.h | 13 +- src/afl-analyze.c | 386 +- src/afl-as.c | 164 +- src/afl-common.c | 30 +- src/afl-forkserver.c | 143 +- src/afl-fuzz-bitmap.c | 153 +- src/afl-fuzz-extras.c | 129 +- src/afl-fuzz-globals.c | 326 +- src/afl-fuzz-init.c | 637 +- src/afl-fuzz-misc.c | 27 +- src/afl-fuzz-one.c | 5707 +++++++++-------- src/afl-fuzz-python.c | 117 +- src/afl-fuzz-queue.c | 198 +- src/afl-fuzz-run.c | 178 +- src/afl-fuzz-stats.c | 446 +- src/afl-fuzz.c | 629 +- src/afl-gcc.c | 128 +- src/afl-gotcpu.c | 23 +- src/afl-sharedmem.c | 42 +- src/afl-showmap.c | 277 +- src/afl-tmin.c | 374 +- test-instr.c | 7 +- unicorn_mode/patches/afl-unicorn-common.h | 20 +- unicorn_mode/patches/afl-unicorn-cpu-inl.h | 80 +- .../patches/afl-unicorn-cpu-translate-inl.h | 31 +- unicorn_mode/patches/afl-unicorn-tcg-op-inl.h | 33 +- .../patches/afl-unicorn-tcg-runtime-inl.h | 43 +- 57 files changed, 8674 insertions(+), 7125 deletions(-) diff --git a/Makefile b/Makefile index 6eb6f871..edf3d99b 100644 --- a/Makefile +++ b/Makefile @@ -159,8 +159,8 @@ afl-gotcpu: src/afl-gotcpu.c $(COMM_HDR) | test_x86 code-format: - ./.custom-format.py -i src/* - ./.custom-format.py -i include/* + ./.custom-format.py -i src/*.c + ./.custom-format.py -i include/*.h ./.custom-format.py -i libdislocator/*.c ./.custom-format.py -i libtokencap/*.c ./.custom-format.py -i llvm_mode/*.c diff --git a/include/afl-as.h b/include/afl-as.h index 4748eda7..4f8fb640 100644 --- a/include/afl-as.h +++ b/include/afl-as.h @@ -37,7 +37,7 @@ #include "config.h" #include "types.h" -/* +/* ------------------ Performances notes ------------------ @@ -106,47 +106,47 @@ static const u8* trampoline_fmt_32 = - "\n" - "/* --- AFL TRAMPOLINE (32-BIT) --- */\n" - "\n" - ".align 4\n" - "\n" - "leal -16(%%esp), %%esp\n" - "movl %%edi, 0(%%esp)\n" - "movl %%edx, 4(%%esp)\n" - "movl %%ecx, 8(%%esp)\n" - "movl %%eax, 12(%%esp)\n" - "movl $0x%08x, %%ecx\n" - "call __afl_maybe_log\n" - "movl 12(%%esp), %%eax\n" - "movl 8(%%esp), %%ecx\n" - "movl 4(%%esp), %%edx\n" - "movl 0(%%esp), %%edi\n" - "leal 16(%%esp), %%esp\n" - "\n" - "/* --- END --- */\n" - "\n"; + "\n" + "/* --- AFL TRAMPOLINE (32-BIT) --- */\n" + "\n" + ".align 4\n" + "\n" + "leal -16(%%esp), %%esp\n" + "movl %%edi, 0(%%esp)\n" + "movl %%edx, 4(%%esp)\n" + "movl %%ecx, 8(%%esp)\n" + "movl %%eax, 12(%%esp)\n" + "movl $0x%08x, %%ecx\n" + "call __afl_maybe_log\n" + "movl 12(%%esp), %%eax\n" + "movl 8(%%esp), %%ecx\n" + "movl 4(%%esp), %%edx\n" + "movl 0(%%esp), %%edi\n" + "leal 16(%%esp), %%esp\n" + "\n" + "/* --- END --- */\n" + "\n"; static const u8* trampoline_fmt_64 = - "\n" - "/* --- AFL TRAMPOLINE (64-BIT) --- */\n" - "\n" - ".align 4\n" - "\n" - "leaq -(128+24)(%%rsp), %%rsp\n" - "movq %%rdx, 0(%%rsp)\n" - "movq %%rcx, 8(%%rsp)\n" - "movq %%rax, 16(%%rsp)\n" - "movq $0x%08x, %%rcx\n" - "call __afl_maybe_log\n" - "movq 16(%%rsp), %%rax\n" - "movq 8(%%rsp), %%rcx\n" - "movq 0(%%rsp), %%rdx\n" - "leaq (128+24)(%%rsp), %%rsp\n" - "\n" - "/* --- END --- */\n" - "\n"; + "\n" + "/* --- AFL TRAMPOLINE (64-BIT) --- */\n" + "\n" + ".align 4\n" + "\n" + "leaq -(128+24)(%%rsp), %%rsp\n" + "movq %%rdx, 0(%%rsp)\n" + "movq %%rcx, 8(%%rsp)\n" + "movq %%rax, 16(%%rsp)\n" + "movq $0x%08x, %%rcx\n" + "call __afl_maybe_log\n" + "movq 16(%%rsp), %%rax\n" + "movq 8(%%rsp), %%rcx\n" + "movq 0(%%rsp), %%rdx\n" + "leaq (128+24)(%%rsp), %%rsp\n" + "\n" + "/* --- END --- */\n" + "\n"; static const u8* main_payload_32 = @@ -398,9 +398,9 @@ static const u8* main_payload_32 = recognize .string. */ #ifdef __APPLE__ -# define CALL_L64(str) "call _" str "\n" +# define CALL_L64(str) "call _" str "\n" #else -# define CALL_L64(str) "call " str "@PLT\n" +# define CALL_L64(str) "call " str "@PLT\n" #endif /* ^__APPLE__ */ static const u8* main_payload_64 = @@ -415,7 +415,7 @@ static const u8* main_payload_64 = "\n" "__afl_maybe_log:\n" "\n" -#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9)) +#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9)) " .byte 0x9f /* lahf */\n" #else " lahf\n" @@ -448,7 +448,7 @@ static const u8* main_payload_64 = "__afl_return:\n" "\n" " addb $127, %al\n" -#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9)) +#if defined(__OpenBSD__) || (defined(__FreeBSD__) && (__FreeBSD__ < 9)) " .byte 0x9e /* sahf */\n" #else " sahf\n" @@ -737,9 +737,9 @@ static const u8* main_payload_64 = #ifdef __APPLE__ " .comm __afl_area_ptr, 8\n" -#ifndef COVERAGE_ONLY +# ifndef COVERAGE_ONLY " .comm __afl_prev_loc, 8\n" -#endif /* !COVERAGE_ONLY */ +# endif /* !COVERAGE_ONLY */ " .comm __afl_fork_pid, 4\n" " .comm __afl_temp, 4\n" " .comm __afl_setup_failure, 1\n" @@ -747,9 +747,9 @@ static const u8* main_payload_64 = #else " .lcomm __afl_area_ptr, 8\n" -#ifndef COVERAGE_ONLY +# ifndef COVERAGE_ONLY " .lcomm __afl_prev_loc, 8\n" -#endif /* !COVERAGE_ONLY */ +# endif /* !COVERAGE_ONLY */ " .lcomm __afl_fork_pid, 4\n" " .lcomm __afl_temp, 4\n" " .lcomm __afl_setup_failure, 1\n" @@ -765,3 +765,4 @@ static const u8* main_payload_64 = "\n"; #endif /* !_HAVE_AFL_AS_H */ + diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index ca22ef75..3e121851 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -27,12 +27,12 @@ #define MESSAGES_TO_STDOUT #ifndef _GNU_SOURCE -#define _GNU_SOURCE +# define _GNU_SOURCE #endif #define _FILE_OFFSET_BITS 64 #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" @@ -68,7 +68,7 @@ #include #include -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) # include # define HAVE_ARC4RANDOM 1 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ @@ -88,45 +88,47 @@ struct queue_entry { - u8* fname; /* File name for the test case */ - u32 len; /* Input length */ + u8* fname; /* File name for the test case */ + u32 len; /* Input length */ - u8 cal_failed, /* Calibration failed? */ - trim_done, /* Trimmed? */ - was_fuzzed, /* historical, but needed for MOpt */ - passed_det, /* Deterministic stages passed? */ - has_new_cov, /* Triggers new coverage? */ - var_behavior, /* Variable behavior? */ - favored, /* Currently favored? */ - fs_redundant; /* Marked as redundant in the fs? */ + u8 cal_failed, /* Calibration failed? */ + trim_done, /* Trimmed? */ + was_fuzzed, /* historical, but needed for MOpt */ + passed_det, /* Deterministic stages passed? */ + has_new_cov, /* Triggers new coverage? */ + var_behavior, /* Variable behavior? */ + favored, /* Currently favored? */ + fs_redundant; /* Marked as redundant in the fs? */ - u32 bitmap_size, /* Number of bits set in bitmap */ - fuzz_level, /* Number of fuzzing iterations */ - exec_cksum; /* Checksum of the execution trace */ + u32 bitmap_size, /* Number of bits set in bitmap */ + fuzz_level, /* Number of fuzzing iterations */ + exec_cksum; /* Checksum of the execution trace */ - u64 exec_us, /* Execution time (us) */ - handicap, /* Number of queue cycles behind */ - n_fuzz, /* Number of fuzz, does not overflow */ - depth; /* Path depth */ + u64 exec_us, /* Execution time (us) */ + handicap, /* Number of queue cycles behind */ + n_fuzz, /* Number of fuzz, does not overflow */ + depth; /* Path depth */ - u8* trace_mini; /* Trace bytes, if kept */ - u32 tc_ref; /* Trace bytes ref count */ + u8* trace_mini; /* Trace bytes, if kept */ + u32 tc_ref; /* Trace bytes ref count */ - struct queue_entry *next, /* Next element, if any */ - *next_100; /* 100 elements ahead */ + struct queue_entry *next, /* Next element, if any */ + *next_100; /* 100 elements ahead */ }; struct extra_data { - u8* data; /* Dictionary token data */ - u32 len; /* Dictionary token length */ - u32 hit_cnt; /* Use count in the corpus */ -}; + u8* data; /* Dictionary token data */ + u32 len; /* Dictionary token length */ + u32 hit_cnt; /* Use count in the corpus */ + +}; /* Fuzzing stages */ enum { + /* 00 */ STAGE_FLIP1, /* 01 */ STAGE_FLIP2, /* 02 */ STAGE_FLIP4, @@ -146,72 +148,60 @@ enum { /* 16 */ STAGE_SPLICE, /* 17 */ STAGE_PYTHON, /* 18 */ STAGE_CUSTOM_MUTATOR + }; /* Stage value types */ enum { + /* 00 */ STAGE_VAL_NONE, /* 01 */ STAGE_VAL_LE, /* 02 */ STAGE_VAL_BE + }; /* Execution status fault codes */ enum { + /* 00 */ FAULT_NONE, /* 01 */ FAULT_TMOUT, /* 02 */ FAULT_CRASH, /* 03 */ FAULT_ERROR, /* 04 */ FAULT_NOINST, /* 05 */ FAULT_NOBITS -}; +}; /* MOpt: Lots of globals, but mostly for the status UI and other things where it really makes no sense to haul them around as function parameters. */ -extern u64 limit_time_puppet, - orig_hit_cnt_puppet, - last_limit_time_start, - tmp_pilot_time, - total_pacemaker_time, - total_puppet_find, - temp_puppet_find, - most_time_key, - most_time, - most_execs_key, - most_execs, - old_hit_count; +extern u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start, + tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find, + most_time_key, most_time, most_execs_key, most_execs, old_hit_count; -extern s32 SPLICE_CYCLES_puppet, - limit_time_sig, - key_puppet, - key_module; +extern s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module; -extern double w_init, - w_end, - w_now; +extern double w_init, w_end, w_now; extern s32 g_now; extern s32 g_max; #define operator_num 16 #define swarm_num 5 -#define period_core 500000 +#define period_core 500000 extern u64 tmp_core_time; extern s32 swarm_now; -extern double x_now[swarm_num][operator_num], - L_best[swarm_num][operator_num], - eff_best[swarm_num][operator_num], - G_best[operator_num], - v_now[swarm_num][operator_num], - probability_now[swarm_num][operator_num], - swarm_fitness[swarm_num]; +extern double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num], + eff_best[swarm_num][operator_num], G_best[operator_num], + v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num], + swarm_fitness[swarm_num]; -extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per fuzz stage */ +extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per + fuzz stage */ stage_finds_puppet_v2[swarm_num][operator_num], stage_cycles_puppet_v2[swarm_num][operator_num], stage_cycles_puppet_v3[swarm_num][operator_num], @@ -221,9 +211,9 @@ extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns fo core_operator_finds_puppet_v2[operator_num], core_operator_cycles_puppet[operator_num], core_operator_cycles_puppet_v2[operator_num], - core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ + core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ -#define RAND_C (rand()%1000*0.001) +#define RAND_C (rand() % 1000 * 0.001) #define v_max 1 #define v_min 0.05 #define limit_time_bound 1.1 @@ -236,225 +226,228 @@ extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns fo #define period_pilot 50000 extern double period_pilot_tmp; -extern s32 key_lv; +extern s32 key_lv; -extern u8 *in_dir, /* Input directory with test cases */ - *out_dir, /* Working & output directory */ - *tmp_dir , /* Temporary directory for input */ - *sync_dir, /* Synchronization directory */ - *sync_id, /* Fuzzer ID */ - *power_name, /* Power schedule name */ - *use_banner, /* Display banner */ - *in_bitmap, /* Input bitmap */ - *file_extension, /* File extension */ - *orig_cmdline; /* Original command line */ -extern u8 *doc_path, /* Path to documentation dir */ - *target_path, /* Path to target binary */ - *out_file; /* File to fuzz, if any */ +extern u8 *in_dir, /* Input directory with test cases */ + *out_dir, /* Working & output directory */ + *tmp_dir, /* Temporary directory for input */ + *sync_dir, /* Synchronization directory */ + *sync_id, /* Fuzzer ID */ + *power_name, /* Power schedule name */ + *use_banner, /* Display banner */ + *in_bitmap, /* Input bitmap */ + *file_extension, /* File extension */ + *orig_cmdline; /* Original command line */ +extern u8 *doc_path, /* Path to documentation dir */ + *target_path, /* Path to target binary */ + *out_file; /* File to fuzz, if any */ -extern u32 exec_tmout; /* Configurable exec timeout (ms) */ -extern u32 hang_tmout; /* Timeout used for hang det (ms) */ +extern u32 exec_tmout; /* Configurable exec timeout (ms) */ +extern u32 hang_tmout; /* Timeout used for hang det (ms) */ -extern u64 mem_limit; /* Memory cap for child (MB) */ +extern u64 mem_limit; /* Memory cap for child (MB) */ -extern u8 cal_cycles, /* Calibration cycles defaults */ - cal_cycles_long, - debug, /* Debug mode */ - python_only; /* Python-only mode */ +extern u8 cal_cycles, /* Calibration cycles defaults */ + cal_cycles_long, debug, /* Debug mode */ + python_only; /* Python-only mode */ -extern u32 stats_update_freq; /* Stats update frequency (execs) */ +extern u32 stats_update_freq; /* Stats update frequency (execs) */ enum { - /* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */ - /* 01 */ FAST, /* Exponential schedule */ - /* 02 */ COE, /* Cut-Off Exponential schedule */ - /* 03 */ LIN, /* Linear schedule */ - /* 04 */ QUAD, /* Quadratic schedule */ - /* 05 */ EXPLOIT, /* AFL's exploitation-based const. */ - + + /* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */ + /* 01 */ FAST, /* Exponential schedule */ + /* 02 */ COE, /* Cut-Off Exponential schedule */ + /* 03 */ LIN, /* Linear schedule */ + /* 04 */ QUAD, /* Quadratic schedule */ + /* 05 */ EXPLOIT, /* AFL's exploitation-based const. */ + POWER_SCHEDULES_NUM + }; -extern char *power_names[POWER_SCHEDULES_NUM]; +extern char* power_names[POWER_SCHEDULES_NUM]; -extern u8 schedule; /* Power schedule (default: EXPLORE)*/ +extern u8 schedule; /* Power schedule (default: EXPLORE)*/ extern u8 havoc_max_mult; -extern u8 skip_deterministic, /* Skip deterministic stages? */ - force_deterministic, /* Force deterministic stages? */ - use_splicing, /* Recombine input files? */ - dumb_mode, /* Run in non-instrumented mode? */ - score_changed, /* Scoring for favorites changed? */ - kill_signal, /* Signal that killed the child */ - resuming_fuzz, /* Resuming an older fuzzing job? */ - timeout_given, /* Specific timeout given? */ - not_on_tty, /* stdout is not a tty */ - term_too_small, /* terminal dimensions too small */ - no_forkserver, /* Disable forkserver? */ - crash_mode, /* Crash mode! Yeah! */ - in_place_resume, /* Attempt in-place resume? */ - auto_changed, /* Auto-generated tokens changed? */ - no_cpu_meter_red, /* Feng shui on the status screen */ - no_arith, /* Skip most arithmetic ops */ - shuffle_queue, /* Shuffle input queue? */ - bitmap_changed, /* Time to update bitmap? */ - qemu_mode, /* Running in QEMU mode? */ - unicorn_mode, /* Running in Unicorn mode? */ - skip_requested, /* Skip request, via SIGUSR1 */ - run_over10m, /* Run time over 10 minutes? */ - persistent_mode, /* Running in persistent mode? */ - deferred_mode, /* Deferred forkserver mode? */ - fixed_seed, /* do not reseed */ - fast_cal, /* Try to calibrate faster? */ - uses_asan; /* Target uses ASAN? */ +extern u8 skip_deterministic, /* Skip deterministic stages? */ + force_deterministic, /* Force deterministic stages? */ + use_splicing, /* Recombine input files? */ + dumb_mode, /* Run in non-instrumented mode? */ + score_changed, /* Scoring for favorites changed? */ + kill_signal, /* Signal that killed the child */ + resuming_fuzz, /* Resuming an older fuzzing job? */ + timeout_given, /* Specific timeout given? */ + not_on_tty, /* stdout is not a tty */ + term_too_small, /* terminal dimensions too small */ + no_forkserver, /* Disable forkserver? */ + crash_mode, /* Crash mode! Yeah! */ + in_place_resume, /* Attempt in-place resume? */ + auto_changed, /* Auto-generated tokens changed? */ + no_cpu_meter_red, /* Feng shui on the status screen */ + no_arith, /* Skip most arithmetic ops */ + shuffle_queue, /* Shuffle input queue? */ + bitmap_changed, /* Time to update bitmap? */ + qemu_mode, /* Running in QEMU mode? */ + unicorn_mode, /* Running in Unicorn mode? */ + skip_requested, /* Skip request, via SIGUSR1 */ + run_over10m, /* Run time over 10 minutes? */ + persistent_mode, /* Running in persistent mode? */ + deferred_mode, /* Deferred forkserver mode? */ + fixed_seed, /* do not reseed */ + fast_cal, /* Try to calibrate faster? */ + uses_asan; /* Target uses ASAN? */ -extern s32 out_fd, /* Persistent fd for out_file */ +extern s32 out_fd, /* Persistent fd for out_file */ #ifndef HAVE_ARC4RANDOM - dev_urandom_fd, /* Persistent fd for /dev/urandom */ + dev_urandom_fd, /* Persistent fd for /dev/urandom */ #endif - dev_null_fd, /* Persistent fd for /dev/null */ - fsrv_ctl_fd, /* Fork server control pipe (write) */ - fsrv_st_fd; /* Fork server status pipe (read) */ + dev_null_fd, /* Persistent fd for /dev/null */ + fsrv_ctl_fd, /* Fork server control pipe (write) */ + fsrv_st_fd; /* Fork server status pipe (read) */ -extern s32 forksrv_pid, /* PID of the fork server */ - child_pid, /* PID of the fuzzed program */ - out_dir_fd; /* FD of the lock file */ +extern s32 forksrv_pid, /* PID of the fork server */ + child_pid, /* PID of the fuzzed program */ + out_dir_fd; /* FD of the lock file */ -extern u8* trace_bits; /* SHM with instrumentation bitmap */ +extern u8* trace_bits; /* SHM with instrumentation bitmap */ -extern u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ - virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ - virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ +extern u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ + virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ + virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ -extern u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ +extern u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ -extern volatile u8 stop_soon, /* Ctrl-C pressed? */ - clear_screen, /* Window resized? */ - child_timed_out; /* Traced process timed out? */ +extern volatile u8 stop_soon, /* Ctrl-C pressed? */ + clear_screen, /* Window resized? */ + child_timed_out; /* Traced process timed out? */ -extern u32 queued_paths, /* Total number of queued testcases */ - queued_variable, /* Testcases with variable behavior */ - queued_at_start, /* Total number of initial inputs */ - queued_discovered, /* Items discovered during this run */ - queued_imported, /* Items imported via -S */ - queued_favored, /* Paths deemed favorable */ - queued_with_cov, /* Paths with new coverage bytes */ - pending_not_fuzzed, /* Queued but not done yet */ - pending_favored, /* Pending favored paths */ - cur_skipped_paths, /* Abandoned inputs in cur cycle */ - cur_depth, /* Current path depth */ - max_depth, /* Max path depth */ - useless_at_start, /* Number of useless starting paths */ - var_byte_count, /* Bitmap bytes with var behavior */ - current_entry, /* Current queue entry ID */ - havoc_div; /* Cycle count divisor for havoc */ +extern u32 queued_paths, /* Total number of queued testcases */ + queued_variable, /* Testcases with variable behavior */ + queued_at_start, /* Total number of initial inputs */ + queued_discovered, /* Items discovered during this run */ + queued_imported, /* Items imported via -S */ + queued_favored, /* Paths deemed favorable */ + queued_with_cov, /* Paths with new coverage bytes */ + pending_not_fuzzed, /* Queued but not done yet */ + pending_favored, /* Pending favored paths */ + cur_skipped_paths, /* Abandoned inputs in cur cycle */ + cur_depth, /* Current path depth */ + max_depth, /* Max path depth */ + useless_at_start, /* Number of useless starting paths */ + var_byte_count, /* Bitmap bytes with var behavior */ + current_entry, /* Current queue entry ID */ + havoc_div; /* Cycle count divisor for havoc */ -extern u64 total_crashes, /* Total number of crashes */ - unique_crashes, /* Crashes with unique signatures */ - total_tmouts, /* Total number of timeouts */ - unique_tmouts, /* Timeouts with unique signatures */ - unique_hangs, /* Hangs with unique signatures */ - total_execs, /* Total execve() calls */ - slowest_exec_ms, /* Slowest testcase non hang in ms */ - start_time, /* Unix start time (ms) */ - last_path_time, /* Time for most recent path (ms) */ - last_crash_time, /* Time for most recent crash (ms) */ - last_hang_time, /* Time for most recent hang (ms) */ - last_crash_execs, /* Exec counter at last crash */ - queue_cycle, /* Queue round counter */ - cycles_wo_finds, /* Cycles without any new paths */ - trim_execs, /* Execs done to trim input files */ - bytes_trim_in, /* Bytes coming into the trimmer */ - bytes_trim_out, /* Bytes coming outa the trimmer */ - blocks_eff_total, /* Blocks subject to effector maps */ - blocks_eff_select; /* Blocks selected as fuzzable */ +extern u64 total_crashes, /* Total number of crashes */ + unique_crashes, /* Crashes with unique signatures */ + total_tmouts, /* Total number of timeouts */ + unique_tmouts, /* Timeouts with unique signatures */ + unique_hangs, /* Hangs with unique signatures */ + total_execs, /* Total execve() calls */ + slowest_exec_ms, /* Slowest testcase non hang in ms */ + start_time, /* Unix start time (ms) */ + last_path_time, /* Time for most recent path (ms) */ + last_crash_time, /* Time for most recent crash (ms) */ + last_hang_time, /* Time for most recent hang (ms) */ + last_crash_execs, /* Exec counter at last crash */ + queue_cycle, /* Queue round counter */ + cycles_wo_finds, /* Cycles without any new paths */ + trim_execs, /* Execs done to trim input files */ + bytes_trim_in, /* Bytes coming into the trimmer */ + bytes_trim_out, /* Bytes coming outa the trimmer */ + blocks_eff_total, /* Blocks subject to effector maps */ + blocks_eff_select; /* Blocks selected as fuzzable */ -extern u32 subseq_tmouts; /* Number of timeouts in a row */ +extern u32 subseq_tmouts; /* Number of timeouts in a row */ -extern u8 *stage_name, /* Name of the current fuzz stage */ - *stage_short, /* Short stage name */ - *syncing_party; /* Currently syncing with... */ +extern u8 *stage_name, /* Name of the current fuzz stage */ + *stage_short, /* Short stage name */ + *syncing_party; /* Currently syncing with... */ -extern s32 stage_cur, stage_max; /* Stage progression */ -extern s32 splicing_with; /* Splicing with which test case? */ +extern s32 stage_cur, stage_max; /* Stage progression */ +extern s32 splicing_with; /* Splicing with which test case? */ -extern u32 master_id, master_max; /* Master instance job splitting */ +extern u32 master_id, master_max; /* Master instance job splitting */ -extern u32 syncing_case; /* Syncing with case #... */ +extern u32 syncing_case; /* Syncing with case #... */ -extern s32 stage_cur_byte, /* Byte offset of current stage op */ - stage_cur_val; /* Value used for stage op */ +extern s32 stage_cur_byte, /* Byte offset of current stage op */ + stage_cur_val; /* Value used for stage op */ -extern u8 stage_val_type; /* Value type (STAGE_VAL_*) */ +extern u8 stage_val_type; /* Value type (STAGE_VAL_*) */ -extern u64 stage_finds[32], /* Patterns found per fuzz stage */ - stage_cycles[32]; /* Execs per fuzz stage */ +extern u64 stage_finds[32], /* Patterns found per fuzz stage */ + stage_cycles[32]; /* Execs per fuzz stage */ #ifndef HAVE_ARC4RANDOM -extern u32 rand_cnt; /* Random number counter */ +extern u32 rand_cnt; /* Random number counter */ #endif -extern u64 total_cal_us, /* Total calibration time (us) */ - total_cal_cycles; /* Total calibration cycles */ +extern u64 total_cal_us, /* Total calibration time (us) */ + total_cal_cycles; /* Total calibration cycles */ -extern u64 total_bitmap_size, /* Total bit count for all bitmaps */ - total_bitmap_entries; /* Number of bitmaps counted */ +extern u64 total_bitmap_size, /* Total bit count for all bitmaps */ + total_bitmap_entries; /* Number of bitmaps counted */ -extern s32 cpu_core_count; /* CPU core count */ +extern s32 cpu_core_count; /* CPU core count */ #ifdef HAVE_AFFINITY -extern s32 cpu_aff; /* Selected CPU core */ +extern s32 cpu_aff; /* Selected CPU core */ #endif /* HAVE_AFFINITY */ -extern FILE* plot_file; /* Gnuplot output file */ +extern FILE* plot_file; /* Gnuplot output file */ - - -extern struct queue_entry *queue, /* Fuzzing queue (linked list) */ - *queue_cur, /* Current offset within the queue */ - *queue_top, /* Top of the list */ - *q_prev100; /* Previous 100 marker */ +extern struct queue_entry *queue, /* Fuzzing queue (linked list) */ + *queue_cur, /* Current offset within the queue */ + *queue_top, /* Top of the list */ + *q_prev100; /* Previous 100 marker */ extern struct queue_entry* - top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ + top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ -extern struct extra_data* extras; /* Extra tokens to fuzz with */ -extern u32 extras_cnt; /* Total number of tokens read */ +extern struct extra_data* extras; /* Extra tokens to fuzz with */ +extern u32 extras_cnt; /* Total number of tokens read */ -extern struct extra_data* a_extras; /* Automatically selected extras */ -extern u32 a_extras_cnt; /* Total number of tokens available */ +extern struct extra_data* a_extras; /* Automatically selected extras */ +extern u32 a_extras_cnt; /* Total number of tokens available */ u8* (*post_handler)(u8* buf, u32* len); /* hooks for the custom mutator function */ -size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed); -size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data); +size_t (*custom_mutator)(u8* data, size_t size, u8* mutated_out, + size_t max_size, unsigned int seed); +size_t (*pre_save_handler)(u8* data, size_t size, u8** new_data); /* Interesting values, as per config.h */ extern s8 interesting_8[INTERESTING_8_LEN]; extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN]; -extern s32 interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN]; +extern s32 + interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN]; /* Python stuff */ #ifdef USE_PYTHON -#include +# include -extern PyObject *py_module; +extern PyObject* py_module; enum { + /* 00 */ PY_FUNC_INIT, /* 01 */ PY_FUNC_FUZZ, /* 02 */ PY_FUNC_INIT_TRIM, /* 03 */ PY_FUNC_POST_TRIM, /* 04 */ PY_FUNC_TRIM, PY_FUNC_COUNT + }; -extern PyObject *py_functions[PY_FUNC_COUNT]; +extern PyObject* py_functions[PY_FUNC_COUNT]; #endif @@ -462,13 +455,13 @@ extern PyObject *py_functions[PY_FUNC_COUNT]; /* Python */ #ifdef USE_PYTHON -int init_py(); +int init_py(); void finalize_py(); void fuzz_py(char*, size_t, char*, size_t, char**, size_t*); -u32 init_trim_py(char*, size_t); -u32 post_trim_py(char); +u32 init_trim_py(char*, size_t); +u32 post_trim_py(char); void trim_py(char**, size_t*); -u8 trim_case_python(char**, struct queue_entry*, u8*); +u8 trim_case_python(char**, struct queue_entry*, u8*); #endif /* Queue */ @@ -480,16 +473,16 @@ void add_to_queue(u8*, u32, u8); void destroy_queue(void); void update_bitmap_score(struct queue_entry*); void cull_queue(void); -u32 calculate_score(struct queue_entry*); +u32 calculate_score(struct queue_entry*); /* Bitmap */ void write_bitmap(void); void read_bitmap(u8*); -u8 has_new_bits(u8*); -u32 count_bits(u8*); -u32 count_bytes(u8*); -u32 count_non_255_bytes(u8*); +u8 has_new_bits(u8*); +u32 count_bits(u8*); +u32 count_bytes(u8*); +u32 count_non_255_bytes(u8*); #ifdef __x86_64__ void simplify_trace(u64*); void classify_counts(u64*); @@ -529,51 +522,51 @@ void show_init_stats(void); /* Run */ -u8 run_target(char**, u32); +u8 run_target(char**, u32); void write_to_testcase(void*, u32); void write_with_gap(void*, u32, u32, u32); -u8 calibrate_case(char**, struct queue_entry*, u8*, u32, u8); +u8 calibrate_case(char**, struct queue_entry*, u8*, u32, u8); void sync_fuzzers(char**); -u8 trim_case(char**, struct queue_entry*, u8*); -u8 common_fuzz_stuff(char**, u8*, u32); +u8 trim_case(char**, struct queue_entry*, u8*); +u8 common_fuzz_stuff(char**, u8*, u32); /* Fuzz one */ -u8 fuzz_one_original(char**); +u8 fuzz_one_original(char**); static u8 pilot_fuzzing(char**); -u8 core_fuzzing(char**); -void pso_updating(void); -u8 fuzz_one(char**); +u8 core_fuzzing(char**); +void pso_updating(void); +u8 fuzz_one(char**); /* Init */ #ifdef HAVE_AFFINITY void bind_to_free_cpu(void); #endif -void setup_post(void); -void setup_custom_mutator(void); -void read_testcases(void); -void perform_dry_run(char**); -void pivot_inputs(void); -u32 find_start_position(void); -void find_timeout(void); +void setup_post(void); +void setup_custom_mutator(void); +void read_testcases(void); +void perform_dry_run(char**); +void pivot_inputs(void); +u32 find_start_position(void); +void find_timeout(void); double get_runnable_processes(void); -void nuke_resume_dir(void); -void maybe_delete_out_dir(void); -void setup_dirs_fds(void); -void setup_cmdline_file(char**); -void setup_stdio_file(void); -void check_crash_handling(void); -void check_cpu_governor(void); -void get_core_count(void); -void fix_up_sync(void); -void check_asan_opts(void); -void check_binary(u8*); -void fix_up_banner(u8*); -void check_if_tty(void); -void setup_signal_handlers(void); +void nuke_resume_dir(void); +void maybe_delete_out_dir(void); +void setup_dirs_fds(void); +void setup_cmdline_file(char**); +void setup_stdio_file(void); +void check_crash_handling(void); +void check_cpu_governor(void); +void get_core_count(void); +void fix_up_sync(void); +void check_asan_opts(void); +void check_binary(u8*); +void fix_up_banner(u8*); +void check_if_tty(void); +void setup_signal_handlers(void); char** get_qemu_argv(u8*, char**, int); -void save_cmdline(u32, char**); +void save_cmdline(u32, char**); /**** Inline routines ****/ @@ -581,25 +574,27 @@ void save_cmdline(u32, char**); have slight bias. */ static inline u32 UR(u32 limit) { + #ifdef HAVE_ARC4RANDOM - if (fixed_seed) { - return random() % limit; - } + if (fixed_seed) { return random() % limit; } /* The boundary not being necessarily a power of 2, we need to ensure the result uniformity. */ return arc4random_uniform(limit); #else if (!fixed_seed && unlikely(!rand_cnt--)) { + u32 seed[2]; ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom"); srandom(seed[0]); rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG); + } return random() % limit; #endif + } /* Find first power of two greater or equal to val (assuming val under @@ -608,7 +603,8 @@ static inline u32 UR(u32 limit) { static u64 next_p2(u64 val) { u64 ret = 1; - while (val > ret) ret <<= 1; + while (val > ret) + ret <<= 1; return ret; } @@ -617,7 +613,7 @@ static u64 next_p2(u64 val) { static u64 get_cur_time(void) { - struct timeval tv; + struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); @@ -626,12 +622,11 @@ static u64 get_cur_time(void) { } - /* Get unix time in microseconds */ static u64 get_cur_time_us(void) { - struct timeval tv; + struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 2f98da0e..4a4beff1 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -31,82 +31,105 @@ /* User-facing macro to sprintf() to a dynamically allocated buffer. */ -#define alloc_printf(_str...) ({ \ - u8* _tmp; \ - s32 _len = snprintf(NULL, 0, _str); \ +#define alloc_printf(_str...) \ + ({ \ + \ + u8* _tmp; \ + s32 _len = snprintf(NULL, 0, _str); \ if (_len < 0) FATAL("Whoa, snprintf() fails?!"); \ - _tmp = ck_alloc(_len + 1); \ - snprintf((char*)_tmp, _len + 1, _str); \ - _tmp; \ + _tmp = ck_alloc(_len + 1); \ + snprintf((char*)_tmp, _len + 1, _str); \ + _tmp; \ + \ }) /* Macro to enforce allocation limits as a last-resort defense against integer overflows. */ -#define ALLOC_CHECK_SIZE(_s) do { \ - if ((_s) > MAX_ALLOC) \ - ABORT("Bad alloc request: %u bytes", (_s)); \ +#define ALLOC_CHECK_SIZE(_s) \ + do { \ + \ + if ((_s) > MAX_ALLOC) ABORT("Bad alloc request: %u bytes", (_s)); \ + \ } while (0) /* Macro to check malloc() failures and the like. */ -#define ALLOC_CHECK_RESULT(_r, _s) do { \ - if (!(_r)) \ - ABORT("Out of memory: can't allocate %u bytes", (_s)); \ +#define ALLOC_CHECK_RESULT(_r, _s) \ + do { \ + \ + if (!(_r)) ABORT("Out of memory: can't allocate %u bytes", (_s)); \ + \ } while (0) /* Magic tokens used to mark used / freed chunks. */ -#define ALLOC_MAGIC_C1 0xFF00FF00 /* Used head (dword) */ -#define ALLOC_MAGIC_F 0xFE00FE00 /* Freed head (dword) */ -#define ALLOC_MAGIC_C2 0xF0 /* Used tail (byte) */ +#define ALLOC_MAGIC_C1 0xFF00FF00 /* Used head (dword) */ +#define ALLOC_MAGIC_F 0xFE00FE00 /* Freed head (dword) */ +#define ALLOC_MAGIC_C2 0xF0 /* Used tail (byte) */ /* Positions of guard tokens in relation to the user-visible pointer. */ -#define ALLOC_C1(_ptr) (((u32*)(_ptr))[-2]) -#define ALLOC_S(_ptr) (((u32*)(_ptr))[-1]) -#define ALLOC_C2(_ptr) (((u8*)(_ptr))[ALLOC_S(_ptr)]) +#define ALLOC_C1(_ptr) (((u32*)(_ptr))[-2]) +#define ALLOC_S(_ptr) (((u32*)(_ptr))[-1]) +#define ALLOC_C2(_ptr) (((u8*)(_ptr))[ALLOC_S(_ptr)]) -#define ALLOC_OFF_HEAD 8 +#define ALLOC_OFF_HEAD 8 #define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1) /* Allocator increments for ck_realloc_block(). */ -#define ALLOC_BLK_INC 256 +#define ALLOC_BLK_INC 256 /* Sanity-checking macros for pointers. */ -#define CHECK_PTR(_p) do { \ - if (_p) { \ - if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\ - if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \ - ABORT("Use after free."); \ - else ABORT("Corrupted head alloc canary."); \ - } \ - } \ +#define CHECK_PTR(_p) \ + do { \ + \ + if (_p) { \ + \ + if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) { \ + \ + if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \ + ABORT("Use after free."); \ + else \ + ABORT("Corrupted head alloc canary."); \ + \ + } \ + \ + } \ + \ } while (0) /* #define CHECK_PTR(_p) do { \ + \ if (_p) { \ + \ if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\ + \ if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \ ABORT("Use after free."); \ else ABORT("Corrupted head alloc canary."); \ + \ } \ if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \ ABORT("Corrupted tail alloc canary."); \ + \ } \ + \ } while (0) */ -#define CHECK_PTR_EXPR(_p) ({ \ - typeof (_p) _tmp = (_p); \ - CHECK_PTR(_tmp); \ - _tmp; \ +#define CHECK_PTR_EXPR(_p) \ + ({ \ + \ + typeof(_p) _tmp = (_p); \ + CHECK_PTR(_tmp); \ + _tmp; \ + \ }) - /* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized requests. */ @@ -123,14 +146,13 @@ static inline void* DFL_ck_alloc_nozero(u32 size) { ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; - return (void *)ret; + return (void*)ret; } - /* Allocate a buffer, returning zeroed memory. */ static inline void* DFL_ck_alloc(u32 size) { @@ -144,7 +166,6 @@ static inline void* DFL_ck_alloc(u32 size) { } - /* Free memory, checking for double free and corrupted heap. When DEBUG_BUILD is set, the old memory will be also clobbered with 0xFF. */ @@ -163,20 +184,19 @@ static inline void DFL_ck_free(void* mem) { ALLOC_C1(mem) = ALLOC_MAGIC_F; - u8 *realStart = mem; + u8* realStart = mem; free(realStart - ALLOC_OFF_HEAD); } - /* Re-allocate a buffer, checking for issues and zeroing any newly-added tail. With DEBUG_BUILD, the buffer is always reallocated to a new addresses and the old memory is clobbered with 0xFF. */ static inline void* DFL_ck_realloc(void* orig, u32 size) { - u8* ret; - u32 old_size = 0; + u8* ret; + u32 old_size = 0; if (!size) { @@ -193,9 +213,9 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { ALLOC_C1(orig) = ALLOC_MAGIC_F; #endif /* !DEBUG_BUILD */ - old_size = ALLOC_S(orig); - u8 *origu8 = orig; - origu8 -= ALLOC_OFF_HEAD; + old_size = ALLOC_S(orig); + u8* origu8 = orig; + origu8 -= ALLOC_OFF_HEAD; orig = origu8; ALLOC_CHECK_SIZE(old_size); @@ -219,7 +239,7 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { if (orig) { - u8 *origu8 = orig; + u8* origu8 = orig; memcpy(ret + ALLOC_OFF_HEAD, origu8 + ALLOC_OFF_HEAD, MIN(size, old_size)); memset(origu8 + ALLOC_OFF_HEAD, 0xFF, old_size); @@ -234,17 +254,15 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; - if (size > old_size) - memset(ret + old_size, 0, size - old_size); + if (size > old_size) memset(ret + old_size, 0, size - old_size); - return (void *)ret; + return (void*)ret; } - /* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up repeated small reallocs without complicating the user code). */ @@ -268,13 +286,12 @@ static inline void* DFL_ck_realloc_block(void* orig, u32 size) { } - /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */ static inline u8* DFL_ck_strdup(u8* str) { - u8* ret; - u32 size; + u8* ret; + u32 size; if (!str) return NULL; @@ -287,38 +304,36 @@ static inline u8* DFL_ck_strdup(u8* str) { ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; return memcpy(ret, str, size); } - /* Create a buffer with a copy of a memory block. Returns NULL for zero-sized or NULL inputs. */ static inline void* DFL_ck_memdup(void* mem, u32 size) { - u8* ret; + u8* ret; if (!mem || !size) return NULL; ALLOC_CHECK_SIZE(size); ret = malloc(size + ALLOC_OFF_TOTAL); ALLOC_CHECK_RESULT(ret, size); - + ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; return memcpy(ret, mem, size); } - /* Create a buffer with a block of text, appending a NUL terminator at the end. Returns NULL for zero-sized or NULL inputs. */ @@ -331,11 +346,11 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) { ALLOC_CHECK_SIZE(size); ret = malloc(size + ALLOC_OFF_TOTAL + 1); ALLOC_CHECK_RESULT(ret, size); - + ret += ALLOC_OFF_HEAD; ALLOC_C1(ret) = ALLOC_MAGIC_C1; - ALLOC_S(ret) = size; + ALLOC_S(ret) = size; ALLOC_C2(ret) = ALLOC_MAGIC_C2; memcpy(ret, mem, size); @@ -345,22 +360,21 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) { } - #ifndef DEBUG_BUILD /* In non-debug mode, we just do straightforward aliasing of the above functions to user-visible names such as ck_alloc(). */ -#define ck_alloc DFL_ck_alloc -#define ck_alloc_nozero DFL_ck_alloc_nozero -#define ck_realloc DFL_ck_realloc -#define ck_realloc_block DFL_ck_realloc_block -#define ck_strdup DFL_ck_strdup -#define ck_memdup DFL_ck_memdup -#define ck_memdup_str DFL_ck_memdup_str -#define ck_free DFL_ck_free +# define ck_alloc DFL_ck_alloc +# define ck_alloc_nozero DFL_ck_alloc_nozero +# define ck_realloc DFL_ck_realloc +# define ck_realloc_block DFL_ck_realloc_block +# define ck_strdup DFL_ck_strdup +# define ck_memdup DFL_ck_memdup +# define ck_memdup_str DFL_ck_memdup_str +# define ck_free DFL_ck_free -#define alloc_report() +# define alloc_report() #else @@ -369,34 +383,35 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) { /* Alloc tracking data structures: */ -#define ALLOC_BUCKETS 4096 +# define ALLOC_BUCKETS 4096 struct TRK_obj { - void *ptr; + + void* ptr; char *file, *func; - u32 line; + u32 line; + }; -#ifdef AFL_MAIN +# ifdef AFL_MAIN struct TRK_obj* TRK[ALLOC_BUCKETS]; u32 TRK_cnt[ALLOC_BUCKETS]; -# define alloc_report() TRK_report() +# define alloc_report() TRK_report() -#else +# else extern struct TRK_obj* TRK[ALLOC_BUCKETS]; -extern u32 TRK_cnt[ALLOC_BUCKETS]; +extern u32 TRK_cnt[ALLOC_BUCKETS]; -# define alloc_report() +# define alloc_report() -#endif /* ^AFL_MAIN */ +# endif /* ^AFL_MAIN */ /* Bucket-assigning function for a given pointer: */ -#define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS) - +# define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS) /* Add a new entry to the list of allocated objects. */ @@ -415,7 +430,7 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func, if (!TRK[bucket][i].ptr) { - TRK[bucket][i].ptr = ptr; + TRK[bucket][i].ptr = ptr; TRK[bucket][i].file = (char*)file; TRK[bucket][i].func = (char*)func; TRK[bucket][i].line = line; @@ -425,10 +440,10 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func, /* No space available - allocate more. */ - TRK[bucket] = DFL_ck_realloc_block(TRK[bucket], - (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj)); + TRK[bucket] = DFL_ck_realloc_block( + TRK[bucket], (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj)); - TRK[bucket][i].ptr = ptr; + TRK[bucket][i].ptr = ptr; TRK[bucket][i].file = (char*)file; TRK[bucket][i].func = (char*)func; TRK[bucket][i].line = line; @@ -437,7 +452,6 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func, } - /* Remove entry from the list of allocated objects. */ static inline void TRK_free_buf(void* ptr, const char* file, const char* func, @@ -460,12 +474,11 @@ static inline void TRK_free_buf(void* ptr, const char* file, const char* func, } - WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)", - func, file, line); + WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)", func, file, + line); } - /* Do a final report on all non-deallocated objects. */ static inline void TRK_report(void) { @@ -482,7 +495,6 @@ static inline void TRK_report(void) { } - /* Simple wrappers for non-debugging functions: */ static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func, @@ -494,7 +506,6 @@ static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func, } - static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file, const char* func, u32 line) { @@ -505,7 +516,6 @@ static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file, } - static inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file, const char* func, u32 line) { @@ -516,7 +526,6 @@ static inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file, } - static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func, u32 line) { @@ -526,7 +535,6 @@ static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func, } - static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file, const char* func, u32 line) { @@ -536,7 +544,6 @@ static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file, } - static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file, const char* func, u32 line) { @@ -546,9 +553,8 @@ static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file, } - -static inline void TRK_ck_free(void* ptr, const char* file, - const char* func, u32 line) { +static inline void TRK_ck_free(void* ptr, const char* file, const char* func, + u32 line) { TRK_free_buf(ptr, file, func, line); DFL_ck_free(ptr); @@ -557,30 +563,27 @@ static inline void TRK_ck_free(void* ptr, const char* file, /* Aliasing user-facing names to tracking functions: */ -#define ck_alloc(_p1) \ - TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) +# define ck_alloc(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) -#define ck_alloc_nozero(_p1) \ - TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) +#define ck_alloc_nozero(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) -#define ck_realloc(_p1, _p2) \ +# define ck_realloc(_p1, _p2)\ TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -#define ck_realloc_block(_p1, _p2) \ +# define ck_realloc_block(_p1, _p2)\ TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -#define ck_strdup(_p1) \ - TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__) +# define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__) -#define ck_memdup(_p1, _p2) \ +# define ck_memdup(_p1, _p2)\ TRK_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -#define ck_memdup_str(_p1, _p2) \ +# define ck_memdup_str(_p1, _p2)\ TRK_ck_memdup_str(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -#define ck_free(_p1) \ - TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__) +# define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__) #endif /* ^!DEBUG_BUILD */ #endif /* ! _HAVE_ALLOC_INL_H */ + diff --git a/include/android-ashmem.h b/include/android-ashmem.h index a787c04b..a4b5bf30 100644 --- a/include/android-ashmem.h +++ b/include/android-ashmem.h @@ -8,74 +8,73 @@ #include #if __ANDROID_API__ >= 26 -#define shmat bionic_shmat -#define shmctl bionic_shmctl -#define shmdt bionic_shmdt -#define shmget bionic_shmget +# define shmat bionic_shmat +# define shmctl bionic_shmctl +# define shmdt bionic_shmdt +# define shmget bionic_shmget #endif - #include +#include #undef shmat #undef shmctl #undef shmdt #undef shmget #include -#define ASHMEM_DEVICE "/dev/ashmem" +#define ASHMEM_DEVICE "/dev/ashmem" -static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) -{ - int ret = 0; - if (__cmd == IPC_RMID) { - int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL); - struct ashmem_pin pin = {0, length}; - ret = ioctl(__shmid, ASHMEM_UNPIN, &pin); - close(__shmid); - } +static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) { + + int ret = 0; + if (__cmd == IPC_RMID) { + + int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL); + struct ashmem_pin pin = {0, length}; + ret = ioctl(__shmid, ASHMEM_UNPIN, &pin); + close(__shmid); + + } + + return ret; - return ret; } -static inline int shmget (key_t __key, size_t __size, int __shmflg) -{ - int fd,ret; - char ourkey[11]; +static inline int shmget(key_t __key, size_t __size, int __shmflg) { - fd = open(ASHMEM_DEVICE, O_RDWR); - if (fd < 0) - return fd; + int fd, ret; + char ourkey[11]; - sprintf(ourkey,"%d",__key); - ret = ioctl(fd, ASHMEM_SET_NAME, ourkey); - if (ret < 0) - goto error; + fd = open(ASHMEM_DEVICE, O_RDWR); + if (fd < 0) return fd; - ret = ioctl(fd, ASHMEM_SET_SIZE, __size); - if (ret < 0) - goto error; + sprintf(ourkey, "%d", __key); + ret = ioctl(fd, ASHMEM_SET_NAME, ourkey); + if (ret < 0) goto error; - return fd; + ret = ioctl(fd, ASHMEM_SET_SIZE, __size); + if (ret < 0) goto error; + + return fd; error: - close(fd); - return ret; + close(fd); + return ret; + } -static inline void *shmat (int __shmid, const void *__shmaddr, int __shmflg) -{ - int size; +static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) { + + int size; void *ptr; - + size = ioctl(__shmid, ASHMEM_GET_SIZE, NULL); - if (size < 0) { - return NULL; - } + if (size < 0) { return NULL; } ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, __shmid, 0); - if (ptr == MAP_FAILED) { - return NULL; - } - + if (ptr == MAP_FAILED) { return NULL; } + return ptr; + } #endif + diff --git a/include/common.h b/include/common.h index 161caa39..9845c2af 100644 --- a/include/common.h +++ b/include/common.h @@ -4,3 +4,4 @@ void detect_file_args(char **argv, u8 *prog_in); #endif + diff --git a/include/config.h b/include/config.h index 17836151..babba3bd 100644 --- a/include/config.h +++ b/include/config.h @@ -21,7 +21,7 @@ /* Version string: */ -#define VERSION "++2.53d" // c = release, d = volatile github dev +#define VERSION "++2.53d" // c = release, d = volatile github dev /****************************************************** * * @@ -41,64 +41,64 @@ /* Default timeout for fuzzed code (milliseconds). This is the upper bound, also used for detecting hangs; the actual value is auto-scaled: */ -#define EXEC_TIMEOUT 1000 +#define EXEC_TIMEOUT 1000 /* Timeout rounding factor when auto-scaling (milliseconds): */ -#define EXEC_TM_ROUND 20 +#define EXEC_TM_ROUND 20 /* Default memory limit for child process (MB): */ -#ifndef __x86_64__ -# define MEM_LIMIT 25 +#ifndef __x86_64__ +# define MEM_LIMIT 25 #else -# define MEM_LIMIT 50 +# define MEM_LIMIT 50 #endif /* ^!__x86_64__ */ /* Default memory limit when running in QEMU mode (MB): */ -#define MEM_LIMIT_QEMU 200 +#define MEM_LIMIT_QEMU 200 /* Default memory limit when running in Unicorn mode (MB): */ -#define MEM_LIMIT_UNICORN 200 +#define MEM_LIMIT_UNICORN 200 /* Number of calibration cycles per every new test case (and for test cases that show variable behavior): */ -#define CAL_CYCLES 8 -#define CAL_CYCLES_LONG 40 +#define CAL_CYCLES 8 +#define CAL_CYCLES_LONG 40 /* Number of subsequent timeouts before abandoning an input file: */ -#define TMOUT_LIMIT 250 +#define TMOUT_LIMIT 250 /* Maximum number of unique hangs or crashes to record: */ -#define KEEP_UNIQUE_HANG 500 -#define KEEP_UNIQUE_CRASH 5000 +#define KEEP_UNIQUE_HANG 500 +#define KEEP_UNIQUE_CRASH 5000 /* Baseline number of random tweaks during a single 'havoc' stage: */ -#define HAVOC_CYCLES 256 -#define HAVOC_CYCLES_INIT 1024 +#define HAVOC_CYCLES 256 +#define HAVOC_CYCLES_INIT 1024 /* Maximum multiplier for the above (should be a power of two, beware of 32-bit int overflows): */ -#define HAVOC_MAX_MULT 16 +#define HAVOC_MAX_MULT 16 #define HAVOC_MAX_MULT_MOPT 32 /* Absolute minimum number of havoc cycles (after all adjustments): */ -#define HAVOC_MIN 16 +#define HAVOC_MIN 16 /* Power Schedule Divisor */ -#define POWER_BETA 1 -#define MAX_FACTOR (POWER_BETA * 32) +#define POWER_BETA 1 +#define MAX_FACTOR (POWER_BETA * 32) /* Maximum stacking for havoc-stage tweaks. The actual value is calculated - like this: + like this: n = random between 1 and HAVOC_STACK_POW2 stacking = 2^n @@ -106,116 +106,116 @@ In other words, the default (n = 7) produces 2, 4, 8, 16, 32, 64, or 128 stacked tweaks: */ -#define HAVOC_STACK_POW2 7 +#define HAVOC_STACK_POW2 7 /* Caps on block sizes for cloning and deletion operations. Each of these ranges has a 33% probability of getting picked, except for the first two cycles where smaller blocks are favored: */ -#define HAVOC_BLK_SMALL 32 -#define HAVOC_BLK_MEDIUM 128 -#define HAVOC_BLK_LARGE 1500 +#define HAVOC_BLK_SMALL 32 +#define HAVOC_BLK_MEDIUM 128 +#define HAVOC_BLK_LARGE 1500 /* Extra-large blocks, selected very rarely (<5% of the time): */ -#define HAVOC_BLK_XL 32768 +#define HAVOC_BLK_XL 32768 /* Probabilities of skipping non-favored entries in the queue, expressed as percentages: */ -#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */ -#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */ -#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */ +#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */ +#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */ +#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */ /* Splicing cycle count: */ -#define SPLICE_CYCLES 15 +#define SPLICE_CYCLES 15 /* Nominal per-splice havoc cycle length: */ -#define SPLICE_HAVOC 32 +#define SPLICE_HAVOC 32 /* Maximum offset for integer addition / subtraction stages: */ -#define ARITH_MAX 35 +#define ARITH_MAX 35 /* Limits for the test case trimmer. The absolute minimum chunk size; and the starting and ending divisors for chopping up the input file: */ -#define TRIM_MIN_BYTES 4 -#define TRIM_START_STEPS 16 -#define TRIM_END_STEPS 1024 +#define TRIM_MIN_BYTES 4 +#define TRIM_START_STEPS 16 +#define TRIM_END_STEPS 1024 /* Maximum size of input file, in bytes (keep under 100MB): */ -#define MAX_FILE (1 * 1024 * 1024) +#define MAX_FILE (1 * 1024 * 1024) /* The same, for the test case minimizer: */ -#define TMIN_MAX_FILE (10 * 1024 * 1024) +#define TMIN_MAX_FILE (10 * 1024 * 1024) /* Block normalization steps for afl-tmin: */ -#define TMIN_SET_MIN_SIZE 4 -#define TMIN_SET_STEPS 128 +#define TMIN_SET_MIN_SIZE 4 +#define TMIN_SET_STEPS 128 /* Maximum dictionary token size (-x), in bytes: */ -#define MAX_DICT_FILE 128 +#define MAX_DICT_FILE 128 /* Length limits for auto-detected dictionary tokens: */ -#define MIN_AUTO_EXTRA 3 -#define MAX_AUTO_EXTRA 32 +#define MIN_AUTO_EXTRA 3 +#define MAX_AUTO_EXTRA 32 /* Maximum number of user-specified dictionary tokens to use in deterministic steps; past this point, the "extras/user" step will be still carried out, but with proportionally lower odds: */ -#define MAX_DET_EXTRAS 200 +#define MAX_DET_EXTRAS 200 /* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing (first value), and to keep in memory as candidates. The latter should be much higher than the former. */ -#define USE_AUTO_EXTRAS 50 -#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10) +#define USE_AUTO_EXTRAS 50 +#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10) /* Scaling factor for the effector map used to skip some of the more expensive deterministic steps. The actual divisor is set to 2^EFF_MAP_SCALE2 bytes: */ -#define EFF_MAP_SCALE2 3 +#define EFF_MAP_SCALE2 3 /* Minimum input file length at which the effector logic kicks in: */ -#define EFF_MIN_LEN 128 +#define EFF_MIN_LEN 128 /* Maximum effector density past which everything is just fuzzed unconditionally (%): */ -#define EFF_MAX_PERC 90 +#define EFF_MAX_PERC 90 /* UI refresh frequency (Hz): */ -#define UI_TARGET_HZ 5 +#define UI_TARGET_HZ 5 /* Fuzzer stats file and plot update intervals (sec): */ -#define STATS_UPDATE_SEC 60 -#define PLOT_UPDATE_SEC 5 +#define STATS_UPDATE_SEC 60 +#define PLOT_UPDATE_SEC 5 /* Smoothing divisor for CPU load and exec speed stats (1 - no smoothing). */ -#define AVG_SMOOTHING 16 +#define AVG_SMOOTHING 16 /* Sync interval (every n havoc cycles): */ -#define SYNC_INTERVAL 5 +#define SYNC_INTERVAL 5 /* Output directory reuse grace period (minutes): */ -#define OUTPUT_GRACE 25 +#define OUTPUT_GRACE 25 /* Uncomment to use simple file names (id_NNNNNN): */ @@ -223,42 +223,42 @@ /* List of interesting values to use in fuzzing. */ -#define INTERESTING_8 \ - -128, /* Overflow signed 8-bit when decremented */ \ - -1, /* */ \ - 0, /* */ \ - 1, /* */ \ - 16, /* One-off with common buffer size */ \ - 32, /* One-off with common buffer size */ \ - 64, /* One-off with common buffer size */ \ - 100, /* One-off with common buffer size */ \ - 127 /* Overflow signed 8-bit when incremented */ +#define INTERESTING_8 \ + -128, /* Overflow signed 8-bit when decremented */ \ + -1, /* */ \ + 0, /* */ \ + 1, /* */ \ + 16, /* One-off with common buffer size */ \ + 32, /* One-off with common buffer size */ \ + 64, /* One-off with common buffer size */ \ + 100, /* One-off with common buffer size */ \ + 127 /* Overflow signed 8-bit when incremented */ #define INTERESTING_8_LEN 9 -#define INTERESTING_16 \ - -32768, /* Overflow signed 16-bit when decremented */ \ - -129, /* Overflow signed 8-bit */ \ - 128, /* Overflow signed 8-bit */ \ - 255, /* Overflow unsig 8-bit when incremented */ \ - 256, /* Overflow unsig 8-bit */ \ - 512, /* One-off with common buffer size */ \ - 1000, /* One-off with common buffer size */ \ - 1024, /* One-off with common buffer size */ \ - 4096, /* One-off with common buffer size */ \ - 32767 /* Overflow signed 16-bit when incremented */ +#define INTERESTING_16 \ + -32768, /* Overflow signed 16-bit when decremented */ \ + -129, /* Overflow signed 8-bit */ \ + 128, /* Overflow signed 8-bit */ \ + 255, /* Overflow unsig 8-bit when incremented */ \ + 256, /* Overflow unsig 8-bit */ \ + 512, /* One-off with common buffer size */ \ + 1000, /* One-off with common buffer size */ \ + 1024, /* One-off with common buffer size */ \ + 4096, /* One-off with common buffer size */ \ + 32767 /* Overflow signed 16-bit when incremented */ #define INTERESTING_16_LEN 10 -#define INTERESTING_32 \ - -2147483648LL, /* Overflow signed 32-bit when decremented */ \ - -100663046, /* Large negative number (endian-agnostic) */ \ - -32769, /* Overflow signed 16-bit */ \ - 32768, /* Overflow signed 16-bit */ \ - 65535, /* Overflow unsig 16-bit when incremented */ \ - 65536, /* Overflow unsig 16 bit */ \ - 100663045, /* Large positive number (endian-agnostic) */ \ - 2147483647 /* Overflow signed 32-bit when incremented */ +#define INTERESTING_32 \ + -2147483648LL, /* Overflow signed 32-bit when decremented */ \ + -100663046, /* Large negative number (endian-agnostic) */ \ + -32769, /* Overflow signed 16-bit */ \ + 32768, /* Overflow signed 16-bit */ \ + 65535, /* Overflow unsig 16-bit when incremented */ \ + 65536, /* Overflow unsig 16 bit */ \ + 100663045, /* Large positive number (endian-agnostic) */ \ + 2147483647 /* Overflow signed 32-bit when incremented */ #define INTERESTING_32_LEN 8 @@ -270,57 +270,57 @@ /* Call count interval between reseeding the libc PRNG from /dev/urandom: */ -#define RESEED_RNG 10000 +#define RESEED_RNG 10000 /* Maximum line length passed from GCC to 'as' and used for parsing configuration files: */ -#define MAX_LINE 8192 +#define MAX_LINE 8192 /* Environment variable used to pass SHM ID to the called program. */ -#define SHM_ENV_VAR "__AFL_SHM_ID" +#define SHM_ENV_VAR "__AFL_SHM_ID" /* Other less interesting, internal-only variables. */ -#define CLANG_ENV_VAR "__AFL_CLANG_MODE" -#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK" -#define PERSIST_ENV_VAR "__AFL_PERSISTENT" -#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV" +#define CLANG_ENV_VAR "__AFL_CLANG_MODE" +#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK" +#define PERSIST_ENV_VAR "__AFL_PERSISTENT" +#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV" /* In-code signatures for deferred and persistent mode. */ -#define PERSIST_SIG "##SIG_AFL_PERSISTENT##" -#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##" +#define PERSIST_SIG "##SIG_AFL_PERSISTENT##" +#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##" /* Distinctive bitmap signature used to indicate failed execution: */ -#define EXEC_FAIL_SIG 0xfee1dead +#define EXEC_FAIL_SIG 0xfee1dead /* Distinctive exit code used to indicate MSAN trip condition: */ -#define MSAN_ERROR 86 +#define MSAN_ERROR 86 /* Designated file descriptors for forkserver commands (the application will use FORKSRV_FD and FORKSRV_FD + 1): */ -#define FORKSRV_FD 198 +#define FORKSRV_FD 198 /* Fork server init timeout multiplier: we'll wait the user-selected timeout plus this much for the fork server to spin up. */ -#define FORK_WAIT_MULT 10 +#define FORK_WAIT_MULT 10 /* Calibration timeout adjustments, to be a bit more generous when resuming fuzzing sessions or trying to calibrate already-added internal finds. The first value is a percentage, the other is in milliseconds: */ -#define CAL_TMOUT_PERC 125 -#define CAL_TMOUT_ADD 50 +#define CAL_TMOUT_PERC 125 +#define CAL_TMOUT_ADD 50 /* Number of chances to calibrate a case before giving up: */ -#define CAL_CHANCES 3 +#define CAL_CHANCES 3 /* Map size for the traced binary (2^MAP_SIZE_POW2). Must be greater than 2; you probably want to keep it under 18 or so for performance reasons @@ -328,28 +328,27 @@ problems with complex programs). You need to recompile the target binary after changing this - otherwise, SEGVs may ensue. */ -#define MAP_SIZE_POW2 16 -#define MAP_SIZE (1 << MAP_SIZE_POW2) +#define MAP_SIZE_POW2 16 +#define MAP_SIZE (1 << MAP_SIZE_POW2) /* Maximum allocator request size (keep well under INT_MAX): */ -#define MAX_ALLOC 0x40000000 +#define MAX_ALLOC 0x40000000 /* A made-up hashing seed: */ -#define HASH_CONST 0xa5b35705 +#define HASH_CONST 0xa5b35705 /* Constants for afl-gotcpu to control busy loop timing: */ -#define CTEST_TARGET_MS 5000 -#define CTEST_CORE_TRG_MS 1000 -#define CTEST_BUSY_CYCLES (10 * 1000 * 1000) +#define CTEST_TARGET_MS 5000 +#define CTEST_CORE_TRG_MS 1000 +#define CTEST_BUSY_CYCLES (10 * 1000 * 1000) /* Enable NeverZero counters in QEMU mode */ #define AFL_QEMU_NOT_ZERO - /* Uncomment this to use inferior block-coverage-based instrumentation. Note that you need to recompile the target binary for this to have any effect: */ @@ -368,3 +367,4 @@ // #define IGNORE_FINDS #endif /* ! _HAVE_CONFIG_H */ + diff --git a/include/debug.h b/include/debug.h index c0044280..6a59ad7a 100644 --- a/include/debug.h +++ b/include/debug.h @@ -108,39 +108,39 @@ #ifdef FANCY_BOXES -# define SET_G1 "\x1b)0" /* Set G1 for box drawing */ -# define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ -# define bSTART "\x0e" /* Enter G1 drawing mode */ -# define bSTOP "\x0f" /* Leave G1 drawing mode */ -# define bH "q" /* Horizontal line */ -# define bV "x" /* Vertical line */ -# define bLT "l" /* Left top corner */ -# define bRT "k" /* Right top corner */ -# define bLB "m" /* Left bottom corner */ -# define bRB "j" /* Right bottom corner */ -# define bX "n" /* Cross */ -# define bVR "t" /* Vertical, branch right */ -# define bVL "u" /* Vertical, branch left */ -# define bHT "v" /* Horizontal, branch top */ -# define bHB "w" /* Horizontal, branch bottom */ +# define SET_G1 "\x1b)0" /* Set G1 for box drawing */ +# define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ +# define bSTART "\x0e" /* Enter G1 drawing mode */ +# define bSTOP "\x0f" /* Leave G1 drawing mode */ +# define bH "q" /* Horizontal line */ +# define bV "x" /* Vertical line */ +# define bLT "l" /* Left top corner */ +# define bRT "k" /* Right top corner */ +# define bLB "m" /* Left bottom corner */ +# define bRB "j" /* Right bottom corner */ +# define bX "n" /* Cross */ +# define bVR "t" /* Vertical, branch right */ +# define bVL "u" /* Vertical, branch left */ +# define bHT "v" /* Horizontal, branch top */ +# define bHB "w" /* Horizontal, branch bottom */ #else -# define SET_G1 "" +# define SET_G1 "" # define RESET_G1 "" -# define bSTART "" -# define bSTOP "" -# define bH "-" -# define bV "|" -# define bLT "+" -# define bRT "+" -# define bLB "+" -# define bRB "+" -# define bX "+" -# define bVR "+" -# define bVL "+" -# define bHT "+" -# define bHB "+" +# define bSTART "" +# define bSTOP "" +# define bH "-" +# define bV "|" +# define bLT "+" +# define bRT "+" +# define bLB "+" +# define bRB "+" +# define bX "+" +# define bVR "+" +# define bVL "+" +# define bHT "+" +# define bHB "+" #endif /* ^FANCY_BOXES */ @@ -148,11 +148,11 @@ * Misc terminal codes * ***********************/ -#define TERM_HOME "\x1b[H" -#define TERM_CLEAR TERM_HOME "\x1b[2J" -#define cEOL "\x1b[0K" -#define CURSOR_HIDE "\x1b[?25l" -#define CURSOR_SHOW "\x1b[?25h" +#define TERM_HOME "\x1b[H" +#define TERM_CLEAR TERM_HOME "\x1b[2J" +#define cEOL "\x1b[0K" +#define CURSOR_HIDE "\x1b[?25l" +#define CURSOR_SHOW "\x1b[?25h" /************************ * Debug & error macros * @@ -161,91 +161,125 @@ /* Just print stuff to the appropriate stream. */ #ifdef MESSAGES_TO_STDOUT -# define SAYF(x...) printf(x) -#else -# define SAYF(x...) fprintf(stderr, x) +# define SAYF(x...) printf(x) +#else +# define SAYF(x...) fprintf(stderr, x) #endif /* ^MESSAGES_TO_STDOUT */ /* Show a prefixed warning. */ -#define WARNF(x...) do { \ +#define WARNF(x...) \ + do { \ + \ SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \ - SAYF(cRST "\n"); \ + SAYF(cRST "\n"); \ + \ } while (0) /* Show a prefixed "doing something" message. */ -#define ACTF(x...) do { \ +#define ACTF(x...) \ + do { \ + \ SAYF(cLBL "[*] " cRST x); \ - SAYF(cRST "\n"); \ + SAYF(cRST "\n"); \ + \ } while (0) /* Show a prefixed "success" message. */ -#define OKF(x...) do { \ +#define OKF(x...) \ + do { \ + \ SAYF(cLGN "[+] " cRST x); \ - SAYF(cRST "\n"); \ + SAYF(cRST "\n"); \ + \ } while (0) /* Show a prefixed fatal error message (not used in afl). */ -#define BADF(x...) do { \ +#define BADF(x...) \ + do { \ + \ SAYF(cLRD "\n[-] " cRST x); \ - SAYF(cRST "\n"); \ + SAYF(cRST "\n"); \ + \ } while (0) /* Die with a verbose non-OS fatal error message. */ -#define FATAL(x...) do { \ - SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \ - cRST x); \ - SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", \ - __FUNCTION__, __FILE__, __LINE__); \ - exit(1); \ +#define FATAL(x...) \ + do { \ + \ + SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ + "\n[-] PROGRAM ABORT : " cRST x); \ + SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \ + __FILE__, __LINE__); \ + exit(1); \ + \ } while (0) /* Die by calling abort() to provide a core dump. */ -#define ABORT(x...) do { \ - SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \ - cRST x); \ - SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", \ - __FUNCTION__, __FILE__, __LINE__); \ - abort(); \ +#define ABORT(x...) \ + do { \ + \ + SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ + "\n[-] PROGRAM ABORT : " cRST x); \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \ + __FILE__, __LINE__); \ + abort(); \ + \ } while (0) /* Die while also including the output of perror(). */ -#define PFATAL(x...) do { \ - fflush(stdout); \ - SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] SYSTEM ERROR : " \ - cRST x); \ - SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", \ - __FUNCTION__, __FILE__, __LINE__); \ - SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \ - exit(1); \ +#define PFATAL(x...) \ + do { \ + \ + fflush(stdout); \ + SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ + "\n[-] SYSTEM ERROR : " cRST x); \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", __FUNCTION__, \ + __FILE__, __LINE__); \ + SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \ + exit(1); \ + \ } while (0) /* Die with FAULT() or PFAULT() depending on the value of res (used to interpret different failure modes for read(), write(), etc). */ -#define RPFATAL(res, x...) do { \ - if (res < 0) PFATAL(x); else FATAL(x); \ +#define RPFATAL(res, x...) \ + do { \ + \ + if (res < 0) \ + PFATAL(x); \ + else \ + FATAL(x); \ + \ } while (0) /* Error-checking versions of read() and write() that call RPFATAL() as appropriate. */ -#define ck_write(fd, buf, len, fn) do { \ - u32 _len = (len); \ - s32 _res = write(fd, buf, _len); \ +#define ck_write(fd, buf, len, fn) \ + do { \ + \ + u32 _len = (len); \ + s32 _res = write(fd, buf, _len); \ if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \ + \ } while (0) -#define ck_read(fd, buf, len, fn) do { \ - u32 _len = (len); \ - s32 _res = read(fd, buf, _len); \ +#define ck_read(fd, buf, len, fn) \ + do { \ + \ + u32 _len = (len); \ + s32 _res = read(fd, buf, _len); \ if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \ + \ } while (0) #endif /* ! _HAVE_DEBUG_H */ + diff --git a/include/forkserver.h b/include/forkserver.h index fa40d9c6..af5dab72 100644 --- a/include/forkserver.h +++ b/include/forkserver.h @@ -5,21 +5,21 @@ void handle_timeout(int sig); void init_forkserver(char **argv); #ifdef __APPLE__ -#define MSG_FORK_ON_APPLE \ - " - On MacOS X, the semantics of fork() syscalls are non-standard and " \ - "may\n" \ - " break afl-fuzz performance optimizations when running " \ - "platform-specific\n" \ +# define MSG_FORK_ON_APPLE \ + " - On MacOS X, the semantics of fork() syscalls are non-standard and " \ + "may\n" \ + " break afl-fuzz performance optimizations when running " \ + "platform-specific\n" \ " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n" #else -#define MSG_FORK_ON_APPLE "" +# define MSG_FORK_ON_APPLE "" #endif #ifdef RLIMIT_AS - #define MSG_ULIMIT_USAGE " ( ulimit -Sv $[%llu << 10];" +# define MSG_ULIMIT_USAGE " ( ulimit -Sv $[%llu << 10];" #else - #define MSG_ULIMIT_USAGE " ( ulimit -Sd $[%llu << 10];" +# define MSG_ULIMIT_USAGE " ( ulimit -Sd $[%llu << 10];" #endif /* ^RLIMIT_AS */ - #endif + diff --git a/include/hash.h b/include/hash.h index f39a8257..5d0512a6 100644 --- a/include/hash.h +++ b/include/hash.h @@ -31,12 +31,12 @@ #ifdef __x86_64__ -#define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r)))) +# define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r)))) static inline u32 hash32(const void* key, u32 len, u32 seed) { const u64* data = (u64*)key; - u64 h1 = seed ^ len; + u64 h1 = seed ^ len; len >>= 3; @@ -45,12 +45,12 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { u64 k1 = *data++; k1 *= 0x87c37b91114253d5ULL; - k1 = ROL64(k1, 31); + k1 = ROL64(k1, 31); k1 *= 0x4cf5ad432745937fULL; h1 ^= k1; - h1 = ROL64(h1, 27); - h1 = h1 * 5 + 0x52dce729; + h1 = ROL64(h1, 27); + h1 = h1 * 5 + 0x52dce729; } @@ -64,14 +64,14 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { } -#else +#else -#define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r)))) +# define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r)))) static inline u32 hash32(const void* key, u32 len, u32 seed) { - const u32* data = (u32*)key; - u32 h1 = seed ^ len; + const u32* data = (u32*)key; + u32 h1 = seed ^ len; len >>= 2; @@ -80,12 +80,12 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { u32 k1 = *data++; k1 *= 0xcc9e2d51; - k1 = ROL32(k1, 15); + k1 = ROL32(k1, 15); k1 *= 0x1b873593; h1 ^= k1; - h1 = ROL32(h1, 13); - h1 = h1 * 5 + 0xe6546b64; + h1 = ROL32(h1, 13); + h1 = h1 * 5 + 0xe6546b64; } @@ -102,3 +102,4 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { #endif /* ^__x86_64__ */ #endif /* !_HAVE_HASH_H */ + diff --git a/include/sharedmem.h b/include/sharedmem.h index 9aa44d0e..7e13b13b 100644 --- a/include/sharedmem.h +++ b/include/sharedmem.h @@ -5,3 +5,4 @@ void setup_shm(unsigned char dumb_mode); void remove_shm(void); #endif + diff --git a/include/types.h b/include/types.h index 7606d4ed..60ae64c2 100644 --- a/include/types.h +++ b/include/types.h @@ -46,26 +46,31 @@ typedef unsigned long long u64; typedef uint64_t u64; #endif /* ^__x86_64__ */ -typedef int8_t s8; -typedef int16_t s16; -typedef int32_t s32; -typedef int64_t s64; +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; #ifndef MIN -# define MIN(_a,_b) ((_a) > (_b) ? (_b) : (_a)) -# define MAX(_a,_b) ((_a) > (_b) ? (_a) : (_b)) +# define MIN(_a, _b) ((_a) > (_b) ? (_b) : (_a)) +# define MAX(_a, _b) ((_a) > (_b) ? (_a) : (_b)) #endif /* !MIN */ -#define SWAP16(_x) ({ \ - u16 _ret = (_x); \ +#define SWAP16(_x) \ + ({ \ + \ + u16 _ret = (_x); \ (u16)((_ret << 8) | (_ret >> 8)); \ + \ }) -#define SWAP32(_x) ({ \ - u32 _ret = (_x); \ - (u32)((_ret << 24) | (_ret >> 24) | \ - ((_ret << 8) & 0x00FF0000) | \ - ((_ret >> 8) & 0x0000FF00)); \ +#define SWAP32(_x) \ + ({ \ + \ + u32 _ret = (_x); \ + (u32)((_ret << 24) | (_ret >> 24) | ((_ret << 8) & 0x00FF0000) | \ + ((_ret >> 8) & 0x0000FF00)); \ + \ }) #ifdef AFL_LLVM_PASS @@ -77,15 +82,15 @@ typedef int64_t s64; #define STRINGIFY_INTERNAL(x) #x #define STRINGIFY(x) STRINGIFY_INTERNAL(x) -#define MEM_BARRIER() \ - __asm__ volatile("" ::: "memory") +#define MEM_BARRIER() __asm__ volatile("" ::: "memory") #if __GNUC__ < 6 - #define likely(_x) (_x) - #define unlikely(_x) (_x) +# define likely(_x) (_x) +# define unlikely(_x) (_x) #else - #define likely(_x) __builtin_expect(!!(_x), 1) - #define unlikely(_x) __builtin_expect(!!(_x), 0) +# define likely(_x) __builtin_expect(!!(_x), 1) +# define unlikely(_x) __builtin_expect(!!(_x), 0) #endif #endif /* ! _HAVE_TYPES_H */ + diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c index 71620b17..5104fed4 100644 --- a/libdislocator/libdislocator.so.c +++ b/libdislocator/libdislocator.so.c @@ -38,23 +38,35 @@ /* Error / message handling: */ -#define DEBUGF(_x...) do { \ - if (alloc_verbose) { \ - if (++call_depth == 1) { \ +#define DEBUGF(_x...) \ + do { \ + \ + if (alloc_verbose) { \ + \ + if (++call_depth == 1) { \ + \ fprintf(stderr, "[AFL] " _x); \ - fprintf(stderr, "\n"); \ - } \ - call_depth--; \ - } \ + fprintf(stderr, "\n"); \ + \ + } \ + call_depth--; \ + \ + } \ + \ } while (0) -#define FATAL(_x...) do { \ - if (++call_depth == 1) { \ +#define FATAL(_x...) \ + do { \ + \ + if (++call_depth == 1) { \ + \ fprintf(stderr, "*** [AFL] " _x); \ - fprintf(stderr, " ***\n"); \ - abort(); \ - } \ - call_depth--; \ + fprintf(stderr, " ***\n"); \ + abort(); \ + \ + } \ + call_depth--; \ + \ } while (0) /* Macro to count the number of pages needed to store a buffer: */ @@ -63,7 +75,7 @@ /* Canary & clobber bytes: */ -#define ALLOC_CANARY 0xAACCAACC +#define ALLOC_CANARY 0xAACCAACC #define ALLOC_CLOBBER 0xCC #define PTR_C(_p) (((u32*)(_p))[-1]) @@ -73,14 +85,13 @@ static u32 max_mem = MAX_ALLOC; /* Max heap usage to permit */ static u8 alloc_verbose, /* Additional debug messages */ - hard_fail, /* abort() when max_mem exceeded? */ - no_calloc_over; /* abort() on calloc() overflows? */ + hard_fail, /* abort() when max_mem exceeded? */ + no_calloc_over; /* abort() on calloc() overflows? */ static __thread size_t total_mem; /* Currently allocated mem */ static __thread u32 call_depth; /* To avoid recursion via fprintf() */ - /* This is the main alloc function. It allocates one page more than necessary, sets that tailing page to PROT_NONE, and then increments the return address so that it is right-aligned to that boundary. Since it always uses mmap(), @@ -90,14 +101,11 @@ static void* __dislocator_alloc(size_t len) { void* ret; - if (total_mem + len > max_mem || total_mem + len < total_mem) { - if (hard_fail) - FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024); + if (hard_fail) FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024); - DEBUGF("total allocs exceed %u MB, returning NULL", - max_mem / 1024 / 1024); + DEBUGF("total allocs exceed %u MB, returning NULL", max_mem / 1024 / 1024); return NULL; @@ -142,7 +150,6 @@ static void* __dislocator_alloc(size_t len) { } - /* The "user-facing" wrapper for calloc(). This just checks for overflows and displays debug messages if requested. */ @@ -157,8 +164,11 @@ void* calloc(size_t elem_len, size_t elem_cnt) { if (elem_cnt && len / elem_cnt != elem_len) { if (no_calloc_over) { - DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len, elem_cnt); + + DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len, + elem_cnt); return NULL; + } FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt); @@ -174,7 +184,6 @@ void* calloc(size_t elem_len, size_t elem_cnt) { } - /* The wrapper for malloc(). Roughly the same, also clobbers the returned memory (unlike calloc(), malloc() is not guaranteed to return zeroed memory). */ @@ -193,7 +202,6 @@ void* malloc(size_t len) { } - /* The wrapper for free(). This simply marks the entire region as PROT_NONE. If the region is already freed, the code will segfault during the attempt to read the canary. Not very graceful, but works, right? */ @@ -224,7 +232,6 @@ void free(void* ptr) { } - /* Realloc is pretty straightforward, too. We forcibly reallocate the buffer, move data, and then free (aka mprotect()) the original one. */ @@ -249,7 +256,6 @@ void* realloc(void* ptr, size_t len) { } - __attribute__((constructor)) void __dislocator_init(void) { u8* tmp = getenv("AFL_LD_LIMIT_MB"); @@ -266,3 +272,4 @@ __attribute__((constructor)) void __dislocator_init(void) { no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER"); } + diff --git a/libtokencap/libtokencap.so.c b/libtokencap/libtokencap.so.c index 54072279..fa26447e 100644 --- a/libtokencap/libtokencap.so.c +++ b/libtokencap/libtokencap.so.c @@ -30,27 +30,23 @@ # error "Sorry, this library is Linux-specific for now!" #endif /* !__linux__ */ - /* Mapping data and such */ #define MAX_MAPPINGS 1024 -static struct mapping { - void *st, *en; -} __tokencap_ro[MAX_MAPPINGS]; +static struct mapping { void *st, *en; } __tokencap_ro[MAX_MAPPINGS]; static u32 __tokencap_ro_cnt; static u8 __tokencap_ro_loaded; static FILE* __tokencap_out_file; - /* Identify read-only regions in memory. Only parameters that fall into these ranges are worth dumping when passed to strcmp() and so on. Read-write regions are far more likely to contain user input instead. */ static void __tokencap_load_mappings(void) { - u8 buf[MAX_LINE]; + u8 buf[MAX_LINE]; FILE* f = fopen("/proc/self/maps", "r"); __tokencap_ro_loaded = 1; @@ -59,8 +55,8 @@ static void __tokencap_load_mappings(void) { while (fgets(buf, MAX_LINE, f)) { - u8 rf, wf; - void* st, *en; + u8 rf, wf; + void *st, *en; if (sscanf(buf, "%p-%p %c%c", &st, &en, &rf, &wf) != 4) continue; if (wf == 'w' || rf != 'r') continue; @@ -76,7 +72,6 @@ static void __tokencap_load_mappings(void) { } - /* Check an address against the list of read-only mappings. */ static u8 __tokencap_is_ro(const void* ptr) { @@ -85,20 +80,19 @@ static u8 __tokencap_is_ro(const void* ptr) { if (!__tokencap_ro_loaded) __tokencap_load_mappings(); - for (i = 0; i < __tokencap_ro_cnt; i++) + for (i = 0; i < __tokencap_ro_cnt; i++) if (ptr >= __tokencap_ro[i].st && ptr <= __tokencap_ro[i].en) return 1; return 0; } - /* Dump an interesting token to output file, quoting and escaping it properly. */ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) { - u8 buf[MAX_AUTO_EXTRA * 4 + 1]; + u8 buf[MAX_AUTO_EXTRA * 4 + 1]; u32 i; u32 pos = 0; @@ -120,9 +114,7 @@ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) { pos += 4; break; - default: - - buf[pos++] = ptr[i]; + default: buf[pos++] = ptr[i]; } @@ -130,11 +122,10 @@ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) { buf[pos] = 0; - fprintf(__tokencap_out_file, "\"%s\"\n", buf); + fprintf(__tokencap_out_file, "\"%s\"\n", buf); } - /* Replacements for strcmp(), memcmp(), and so on. Note that these will be used only if the target is compiled with -fno-builtins and linked dynamically. */ @@ -151,13 +142,13 @@ int strcmp(const char* str1, const char* str2) { if (c1 != c2) return (c1 > c2) ? 1 : -1; if (!c1) return 0; - str1++; str2++; + str1++; + str2++; } } - #undef strncmp int strncmp(const char* str1, const char* str2, size_t len) { @@ -171,7 +162,8 @@ int strncmp(const char* str1, const char* str2, size_t len) { if (!c1) return 0; if (c1 != c2) return (c1 > c2) ? 1 : -1; - str1++; str2++; + str1++; + str2++; } @@ -179,7 +171,6 @@ int strncmp(const char* str1, const char* str2, size_t len) { } - #undef strcasecmp int strcasecmp(const char* str1, const char* str2) { @@ -193,13 +184,13 @@ int strcasecmp(const char* str1, const char* str2) { if (c1 != c2) return (c1 > c2) ? 1 : -1; if (!c1) return 0; - str1++; str2++; + str1++; + str2++; } } - #undef strncasecmp int strncasecmp(const char* str1, const char* str2, size_t len) { @@ -213,7 +204,8 @@ int strncasecmp(const char* str1, const char* str2, size_t len) { if (!c1) return 0; if (c1 != c2) return (c1 > c2) ? 1 : -1; - str1++; str2++; + str1++; + str2++; } @@ -221,7 +213,6 @@ int strncasecmp(const char* str1, const char* str2, size_t len) { } - #undef memcmp int memcmp(const void* mem1, const void* mem2, size_t len) { @@ -233,7 +224,8 @@ int memcmp(const void* mem1, const void* mem2, size_t len) { unsigned char c1 = *(const char*)mem1, c2 = *(const char*)mem2; if (c1 != c2) return (c1 > c2) ? 1 : -1; - mem1++; mem2++; + mem1++; + mem2++; } @@ -241,7 +233,6 @@ int memcmp(const void* mem1, const void* mem2, size_t len) { } - #undef strstr char* strstr(const char* haystack, const char* needle) { @@ -249,16 +240,17 @@ char* strstr(const char* haystack, const char* needle) { if (__tokencap_is_ro(haystack)) __tokencap_dump(haystack, strlen(haystack), 1); - if (__tokencap_is_ro(needle)) - __tokencap_dump(needle, strlen(needle), 1); + if (__tokencap_is_ro(needle)) __tokencap_dump(needle, strlen(needle), 1); do { + const char* n = needle; const char* h = haystack; - while(*n && *h && *n == *h) n++, h++; + while (*n && *h && *n == *h) + n++, h++; - if(!*n) return (char*)haystack; + if (!*n) return (char*)haystack; } while (*(haystack++)); @@ -266,7 +258,6 @@ char* strstr(const char* haystack, const char* needle) { } - #undef strcasestr char* strcasestr(const char* haystack, const char* needle) { @@ -274,25 +265,24 @@ char* strcasestr(const char* haystack, const char* needle) { if (__tokencap_is_ro(haystack)) __tokencap_dump(haystack, strlen(haystack), 1); - if (__tokencap_is_ro(needle)) - __tokencap_dump(needle, strlen(needle), 1); + if (__tokencap_is_ro(needle)) __tokencap_dump(needle, strlen(needle), 1); do { const char* n = needle; const char* h = haystack; - while(*n && *h && tolower(*n) == tolower(*h)) n++, h++; + while (*n && *h && tolower(*n) == tolower(*h)) + n++, h++; - if(!*n) return (char*)haystack; + if (!*n) return (char*)haystack; - } while(*(haystack++)); + } while (*(haystack++)); return 0; } - /* Init code to open the output file (or default to stderr). */ __attribute__((constructor)) void __tokencap_init(void) { diff --git a/llvm_mode/LLVMInsTrim.so.cc b/llvm_mode/LLVMInsTrim.so.cc index 95b52d48..4b5597e2 100644 --- a/llvm_mode/LLVMInsTrim.so.cc +++ b/llvm_mode/LLVMInsTrim.so.cc @@ -37,268 +37,349 @@ static cl::opt LoopHeadOpt("loophead", cl::desc("LoopHead"), cl::init(false)); namespace { - struct InsTrim : public ModulePass { - protected: - std::list myWhitelist; +struct InsTrim : public ModulePass { - private: - std::mt19937 generator; - int total_instr = 0; + protected: + std::list myWhitelist; - unsigned int genLabel() { - return generator() & (MAP_SIZE - 1); - } + private: + std::mt19937 generator; + int total_instr = 0; - public: - static char ID; - InsTrim() : ModulePass(ID), generator(0) { - char* instWhiteListFilename = getenv("AFL_LLVM_WHITELIST"); - if (instWhiteListFilename) { - std::string line; - std::ifstream fileStream; - fileStream.open(instWhiteListFilename); - if (!fileStream) - report_fatal_error("Unable to open AFL_LLVM_WHITELIST"); + unsigned int genLabel() { + + return generator() & (MAP_SIZE - 1); + + } + + public: + static char ID; + InsTrim() : ModulePass(ID), generator(0) { + + char *instWhiteListFilename = getenv("AFL_LLVM_WHITELIST"); + if (instWhiteListFilename) { + + std::string line; + std::ifstream fileStream; + fileStream.open(instWhiteListFilename); + if (!fileStream) report_fatal_error("Unable to open AFL_LLVM_WHITELIST"); + getline(fileStream, line); + while (fileStream) { + + myWhitelist.push_back(line); getline(fileStream, line); - while (fileStream) { - myWhitelist.push_back(line); - getline(fileStream, line); - } + } + } - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.addRequired(); - } + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + + AU.addRequired(); + + } #if LLVM_VERSION_MAJOR < 4 - const char * + const char * #else - StringRef + StringRef #endif - getPassName() const override { - return "InstTrim Instrumentation"; + getPassName() const override { + + return "InstTrim Instrumentation"; + + } + + bool runOnModule(Module &M) override { + + char be_quiet = 0; + + if (isatty(2) && !getenv("AFL_QUIET")) { + + SAYF(cCYA "LLVMInsTrim" VERSION cRST " by csienslab\n"); + + } else + + be_quiet = 1; + +#if LLVM_VERSION_MAJOR < 9 + char *neverZero_counters_str; + if ((neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO")) != NULL) + OKF("LLVM neverZero activated (by hexcoder)\n"); +#endif + + if (getenv("AFL_LLVM_INSTRIM_LOOPHEAD") != NULL || + getenv("LOOPHEAD") != NULL) { + + LoopHeadOpt = true; + } - bool runOnModule(Module &M) override { - char be_quiet = 0; - - if (isatty(2) && !getenv("AFL_QUIET")) { - SAYF(cCYA "LLVMInsTrim" VERSION cRST " by csienslab\n"); - } else be_quiet = 1; - -#if LLVM_VERSION_MAJOR < 9 - char* neverZero_counters_str; - if ((neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO")) != NULL) - OKF("LLVM neverZero activated (by hexcoder)\n"); -#endif - - if (getenv("AFL_LLVM_INSTRIM_LOOPHEAD") != NULL || getenv("LOOPHEAD") != NULL) { - LoopHeadOpt = true; - } + // this is our default + MarkSetOpt = true; - // this is our default - MarkSetOpt = true; - -/* // I dont think this makes sense to port into LLVMInsTrim - char* inst_ratio_str = getenv("AFL_INST_RATIO"); - unsigned int inst_ratio = 100; - if (inst_ratio_str) { - if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || !inst_ratio || inst_ratio > 100) - FATAL("Bad value of AFL_INST_RATIO (must be between 1 and 100)"); - } -*/ + /* // I dont think this makes sense to port into LLVMInsTrim + char* inst_ratio_str = getenv("AFL_INST_RATIO"); + unsigned int inst_ratio = 100; + if (inst_ratio_str) { - LLVMContext &C = M.getContext(); - IntegerType *Int8Ty = IntegerType::getInt8Ty(C); - IntegerType *Int32Ty = IntegerType::getInt32Ty(C); + if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || !inst_ratio || + inst_ratio > 100) FATAL("Bad value of AFL_INST_RATIO (must be between 1 + and 100)"); - GlobalVariable *CovMapPtr = new GlobalVariable( + } + + */ + + LLVMContext &C = M.getContext(); + IntegerType *Int8Ty = IntegerType::getInt8Ty(C); + IntegerType *Int32Ty = IntegerType::getInt32Ty(C); + + GlobalVariable *CovMapPtr = new GlobalVariable( M, PointerType::getUnqual(Int8Ty), false, GlobalValue::ExternalLinkage, nullptr, "__afl_area_ptr"); - GlobalVariable *OldPrev = new GlobalVariable( - M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", - 0, GlobalVariable::GeneralDynamicTLSModel, 0, false); + GlobalVariable *OldPrev = new GlobalVariable( + M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", 0, + GlobalVariable::GeneralDynamicTLSModel, 0, false); - u64 total_rs = 0; - u64 total_hs = 0; + u64 total_rs = 0; + u64 total_hs = 0; + + for (Function &F : M) { + + if (!F.size()) { continue; } + + if (!myWhitelist.empty()) { + + bool instrumentBlock = false; + DebugLoc Loc; + StringRef instFilename; + + for (auto &BB : F) { + + BasicBlock::iterator IP = BB.getFirstInsertionPt(); + IRBuilder<> IRB(&(*IP)); + if (!Loc) Loc = IP->getDebugLoc(); - for (Function &F : M) { - if (!F.size()) { - continue; } - if (!myWhitelist.empty()) { - bool instrumentBlock = false; - DebugLoc Loc; - StringRef instFilename; + if (Loc) { + + DILocation *cDILoc = dyn_cast(Loc.getAsMDNode()); + + unsigned int instLine = cDILoc->getLine(); + instFilename = cDILoc->getFilename(); + + if (instFilename.str().empty()) { + + /* If the original location is empty, try using the inlined location + */ + DILocation *oDILoc = cDILoc->getInlinedAt(); + if (oDILoc) { + + instFilename = oDILoc->getFilename(); + instLine = oDILoc->getLine(); + + } - for (auto &BB : F) { - BasicBlock::iterator IP = BB.getFirstInsertionPt(); - IRBuilder<> IRB(&(*IP)); - if (!Loc) - Loc = IP->getDebugLoc(); } - if ( Loc ) { - DILocation *cDILoc = dyn_cast(Loc.getAsMDNode()); + /* Continue only if we know where we actually are */ + if (!instFilename.str().empty()) { - unsigned int instLine = cDILoc->getLine(); - instFilename = cDILoc->getFilename(); + for (std::list::iterator it = myWhitelist.begin(); + it != myWhitelist.end(); ++it) { - if (instFilename.str().empty()) { - /* If the original location is empty, try using the inlined location */ - DILocation *oDILoc = cDILoc->getInlinedAt(); - if (oDILoc) { - instFilename = oDILoc->getFilename(); - instLine = oDILoc->getLine(); - } - } + if (instFilename.str().length() >= it->length()) { - /* Continue only if we know where we actually are */ - if (!instFilename.str().empty()) { - for (std::list::iterator it = myWhitelist.begin(); it != myWhitelist.end(); ++it) { - if (instFilename.str().length() >= it->length()) { - if (instFilename.str().compare(instFilename.str().length() - it->length(), it->length(), *it) == 0) { - instrumentBlock = true; - break; - } - } - } - } - } + if (instFilename.str().compare( + instFilename.str().length() - it->length(), + it->length(), *it) == 0) { - /* Either we couldn't figure out our location or the location is - * not whitelisted, so we skip instrumentation. */ - if (!instrumentBlock) { - if (!instFilename.str().empty()) - SAYF(cYEL "[!] " cBRI "Not in whitelist, skipping %s ...\n", instFilename.str().c_str()); - else - SAYF(cYEL "[!] " cBRI "No filename information found, skipping it"); - continue; - } - } - - std::unordered_set MS; - if (!MarkSetOpt) { - for (auto &BB : F) { - MS.insert(&BB); - } - total_rs += F.size(); - } else { - auto Result = markNodes(&F); - auto RS = Result.first; - auto HS = Result.second; - - MS.insert(RS.begin(), RS.end()); - if (!LoopHeadOpt) { - MS.insert(HS.begin(), HS.end()); - total_rs += MS.size(); - } else { - DenseSet> EdgeSet; - DominatorTreeWrapperPass *DTWP = &getAnalysis(F); - auto DT = &DTWP->getDomTree(); - - total_rs += RS.size(); - total_hs += HS.size(); - - for (BasicBlock *BB : HS) { - bool Inserted = false; - for (auto BI = pred_begin(BB), BE = pred_end(BB); - BI != BE; ++BI - ) { - auto Edge = BasicBlockEdge(*BI, BB); - if (Edge.isSingleEdge() && DT->dominates(Edge, BB)) { - EdgeSet.insert({*BI, BB}); - Inserted = true; + instrumentBlock = true; break; + } + } - if (!Inserted) { - MS.insert(BB); - total_rs += 1; - total_hs -= 1; - } + } - for (auto I = EdgeSet.begin(), E = EdgeSet.end(); I != E; ++I) { - auto PredBB = I->first; - auto SuccBB = I->second; - auto NewBB = SplitBlockPredecessors(SuccBB, {PredBB}, ".split", - DT, nullptr, + + } + + } + + /* Either we couldn't figure out our location or the location is + * not whitelisted, so we skip instrumentation. */ + if (!instrumentBlock) { + + if (!instFilename.str().empty()) + SAYF(cYEL "[!] " cBRI "Not in whitelist, skipping %s ...\n", + instFilename.str().c_str()); + else + SAYF(cYEL "[!] " cBRI "No filename information found, skipping it"); + continue; + + } + + } + + std::unordered_set MS; + if (!MarkSetOpt) { + + for (auto &BB : F) { + + MS.insert(&BB); + + } + + total_rs += F.size(); + + } else { + + auto Result = markNodes(&F); + auto RS = Result.first; + auto HS = Result.second; + + MS.insert(RS.begin(), RS.end()); + if (!LoopHeadOpt) { + + MS.insert(HS.begin(), HS.end()); + total_rs += MS.size(); + + } else { + + DenseSet> EdgeSet; + DominatorTreeWrapperPass * DTWP = + &getAnalysis(F); + auto DT = &DTWP->getDomTree(); + + total_rs += RS.size(); + total_hs += HS.size(); + + for (BasicBlock *BB : HS) { + + bool Inserted = false; + for (auto BI = pred_begin(BB), BE = pred_end(BB); BI != BE; ++BI) { + + auto Edge = BasicBlockEdge(*BI, BB); + if (Edge.isSingleEdge() && DT->dominates(Edge, BB)) { + + EdgeSet.insert({*BI, BB}); + Inserted = true; + break; + + } + + } + + if (!Inserted) { + + MS.insert(BB); + total_rs += 1; + total_hs -= 1; + + } + + } + + for (auto I = EdgeSet.begin(), E = EdgeSet.end(); I != E; ++I) { + + auto PredBB = I->first; + auto SuccBB = I->second; + auto NewBB = + SplitBlockPredecessors(SuccBB, {PredBB}, ".split", DT, nullptr, #if LLVM_VERSION_MAJOR >= 8 - nullptr, + nullptr, #endif - false); - MS.insert(NewBB); - } + false); + MS.insert(NewBB); + } - auto *EBB = &F.getEntryBlock(); - if (succ_begin(EBB) == succ_end(EBB)) { - MS.insert(EBB); - total_rs += 1; - } + } + + auto *EBB = &F.getEntryBlock(); + if (succ_begin(EBB) == succ_end(EBB)) { + + MS.insert(EBB); + total_rs += 1; - for (BasicBlock &BB : F) { - if (MS.find(&BB) == MS.end()) { - continue; - } - IRBuilder<> IRB(&*BB.getFirstInsertionPt()); - IRB.CreateStore(ConstantInt::get(Int32Ty, genLabel()), OldPrev); - } } for (BasicBlock &BB : F) { - auto PI = pred_begin(&BB); - auto PE = pred_end(&BB); - if (MarkSetOpt && MS.find(&BB) == MS.end()) { - continue; - } + if (MS.find(&BB) == MS.end()) { continue; } IRBuilder<> IRB(&*BB.getFirstInsertionPt()); - Value *L = NULL; - if (PI == PE) { - L = ConstantInt::get(Int32Ty, genLabel()); - } else { - auto *PN = PHINode::Create(Int32Ty, 0, "", &*BB.begin()); - DenseMap PredMap; - for (auto PI = pred_begin(&BB), PE = pred_end(&BB); - PI != PE; ++PI - ) { - BasicBlock *PBB = *PI; - auto It = PredMap.insert({PBB, genLabel()}); - unsigned Label = It.first->second; - PN->addIncoming(ConstantInt::get(Int32Ty, Label), PBB); - } - L = PN; + IRB.CreateStore(ConstantInt::get(Int32Ty, genLabel()), OldPrev); + + } + + } + + for (BasicBlock &BB : F) { + + auto PI = pred_begin(&BB); + auto PE = pred_end(&BB); + if (MarkSetOpt && MS.find(&BB) == MS.end()) { continue; } + + IRBuilder<> IRB(&*BB.getFirstInsertionPt()); + Value * L = NULL; + if (PI == PE) { + + L = ConstantInt::get(Int32Ty, genLabel()); + + } else { + + auto *PN = PHINode::Create(Int32Ty, 0, "", &*BB.begin()); + DenseMap PredMap; + for (auto PI = pred_begin(&BB), PE = pred_end(&BB); PI != PE; ++PI) { + + BasicBlock *PBB = *PI; + auto It = PredMap.insert({PBB, genLabel()}); + unsigned Label = It.first->second; + PN->addIncoming(ConstantInt::get(Int32Ty, Label), PBB); + } - /* Load prev_loc */ - LoadInst *PrevLoc = IRB.CreateLoad(OldPrev); - PrevLoc->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - Value *PrevLocCasted = IRB.CreateZExt(PrevLoc, IRB.getInt32Ty()); + L = PN; - /* Load SHM pointer */ - LoadInst *MapPtr = IRB.CreateLoad(CovMapPtr); - MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - Value *MapPtrIdx = IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, L)); + } - /* Update bitmap */ - LoadInst *Counter = IRB.CreateLoad(MapPtrIdx); - Counter->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - - Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1)); + /* Load prev_loc */ + LoadInst *PrevLoc = IRB.CreateLoad(OldPrev); + PrevLoc->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + Value *PrevLocCasted = IRB.CreateZExt(PrevLoc, IRB.getInt32Ty()); + + /* Load SHM pointer */ + LoadInst *MapPtr = IRB.CreateLoad(CovMapPtr); + MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + Value *MapPtrIdx = + IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, L)); + + /* Update bitmap */ + LoadInst *Counter = IRB.CreateLoad(MapPtrIdx); + Counter->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + + Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1)); #if LLVM_VERSION_MAJOR < 9 - if (neverZero_counters_str != NULL) // with llvm 9 we make this the default as the bug in llvm is then fixed + if (neverZero_counters_str != + NULL) // with llvm 9 we make this the default as the bug in llvm is + // then fixed #else - if (1) // with llvm 9 we make this the default as the bug in llvm is then fixed + if (1) // with llvm 9 we make this the default as the bug in llvm is + // then fixed #endif - { + { + /* hexcoder: Realize a counter that skips zero during overflow. - * Once this counter reaches its maximum value, it next increments to 1 + * Once this counter reaches its maximum value, it next increments to + * 1 * * Instead of * Counter + 1 -> Counter @@ -306,38 +387,52 @@ namespace { * Counter + 1 -> {Counter, OverflowFlag} * Counter + OverflowFlag -> Counter */ - auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0)); - auto carry = IRB.CreateZExt(cf, Int8Ty); - Incr = IRB.CreateAdd(Incr, carry); - } - - IRB.CreateStore(Incr, MapPtrIdx)->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - - /* Set prev_loc to cur_loc >> 1 */ - /* - StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, L >> 1), OldPrev); - Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - */ + auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0)); + auto carry = IRB.CreateZExt(cf, Int8Ty); + Incr = IRB.CreateAdd(Incr, carry); - total_instr++; } + + IRB.CreateStore(Incr, MapPtrIdx) + ->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + + /* Set prev_loc to cur_loc >> 1 */ + /* + StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, L >> 1), + OldPrev); Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, + None)); + */ + + total_instr++; + } - OKF("Instrumented %u locations (%llu, %llu) (%s mode)\n"/*", ratio %u%%)."*/, - total_instr, total_rs, total_hs, - getenv("AFL_HARDEN") ? "hardened" : - ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) ? - "ASAN/MSAN" : "non-hardened")/*, inst_ratio*/); - return false; } - }; // end of struct InsTrim + + OKF("Instrumented %u locations (%llu, %llu) (%s mode)\n" /*", ratio + %u%%)."*/ + , + total_instr, total_rs, total_hs, + getenv("AFL_HARDEN") + ? "hardened" + : ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) + ? "ASAN/MSAN" + : "non-hardened") /*, inst_ratio*/); + return false; + + } + +}; // end of struct InsTrim + } // end of anonymous namespace char InsTrim::ID = 0; static void registerAFLPass(const PassManagerBuilder &, legacy::PassManagerBase &PM) { + PM.add(new InsTrim()); + } static RegisterStandardPasses RegisterAFLPass( @@ -345,3 +440,4 @@ static RegisterStandardPasses RegisterAFLPass( static RegisterStandardPasses RegisterAFLPass0( PassManagerBuilder::EP_EnabledOnOptLevel0, registerAFLPass); + diff --git a/llvm_mode/MarkNodes.cc b/llvm_mode/MarkNodes.cc index 348dc264..2aeeda8d 100644 --- a/llvm_mode/MarkNodes.cc +++ b/llvm_mode/MarkNodes.cc @@ -19,207 +19,267 @@ using namespace llvm; -DenseMap LMap; -std::vector Blocks; -std::set Marked , Markabove; -std::vector< std::vector > Succs , Preds; +DenseMap LMap; +std::vector Blocks; +std::set Marked, Markabove; +std::vector > Succs, Preds; + +void reset() { -void reset(){ LMap.clear(); Blocks.clear(); Marked.clear(); Markabove.clear(); + } uint32_t start_point; void labelEachBlock(Function *F) { + // Fake single endpoint; LMap[NULL] = Blocks.size(); Blocks.push_back(NULL); - + // Assign the unique LabelID to each block; for (auto I = F->begin(), E = F->end(); I != E; ++I) { + BasicBlock *BB = &*I; LMap[BB] = Blocks.size(); Blocks.push_back(BB); + } - + start_point = LMap[&F->getEntryBlock()]; + } void buildCFG(Function *F) { - Succs.resize( Blocks.size() ); - Preds.resize( Blocks.size() ); - for( size_t i = 0 ; i < Succs.size() ; i ++ ){ - Succs[ i ].clear(); - Preds[ i ].clear(); + + Succs.resize(Blocks.size()); + Preds.resize(Blocks.size()); + for (size_t i = 0; i < Succs.size(); i++) { + + Succs[i].clear(); + Preds[i].clear(); + } - //uint32_t FakeID = 0; + // uint32_t FakeID = 0; for (auto S = F->begin(), E = F->end(); S != E; ++S) { + BasicBlock *BB = &*S; - uint32_t MyID = LMap[BB]; - //if (succ_begin(BB) == succ_end(BB)) { - //Succs[MyID].push_back(FakeID); - //Marked.insert(MyID); + uint32_t MyID = LMap[BB]; + // if (succ_begin(BB) == succ_end(BB)) { + + // Succs[MyID].push_back(FakeID); + // Marked.insert(MyID); //} for (auto I = succ_begin(BB), E = succ_end(BB); I != E; ++I) { + Succs[MyID].push_back(LMap[*I]); + } + } + } -std::vector< std::vector > tSuccs; -std::vector tag , indfs; +std::vector > tSuccs; +std::vector tag, indfs; void DFStree(size_t now_id) { - if(tag[now_id]) return; - tag[now_id]=true; - indfs[now_id]=true; - for (auto succ: tSuccs[now_id]) { - if(tag[succ] and indfs[succ]) { + + if (tag[now_id]) return; + tag[now_id] = true; + indfs[now_id] = true; + for (auto succ : tSuccs[now_id]) { + + if (tag[succ] and indfs[succ]) { + Marked.insert(succ); Markabove.insert(succ); continue; + } + Succs[now_id].push_back(succ); Preds[succ].push_back(now_id); DFStree(succ); + } - indfs[now_id]=false; + + indfs[now_id] = false; + } + void turnCFGintoDAG(Function *F) { + tSuccs = Succs; tag.resize(Blocks.size()); indfs.resize(Blocks.size()); - for (size_t i = 0; i < Blocks.size(); ++ i) { + for (size_t i = 0; i < Blocks.size(); ++i) { + Succs[i].clear(); - tag[i]=false; - indfs[i]=false; + tag[i] = false; + indfs[i] = false; + } + DFStree(start_point); - for (size_t i = 0; i < Blocks.size(); ++ i) - if( Succs[i].empty() ){ + for (size_t i = 0; i < Blocks.size(); ++i) + if (Succs[i].empty()) { + Succs[i].push_back(0); Preds[0].push_back(i); + } + } uint32_t timeStamp; -namespace DominatorTree{ - std::vector< std::vector > cov; - std::vector dfn, nfd, par, sdom, idom, mom, mn; +namespace DominatorTree { + +std::vector > cov; +std::vector dfn, nfd, par, sdom, idom, mom, mn; + +bool Compare(uint32_t u, uint32_t v) { + + return dfn[u] < dfn[v]; + +} + +uint32_t eval(uint32_t u) { + + if (mom[u] == u) return u; + uint32_t res = eval(mom[u]); + if (Compare(sdom[mn[mom[u]]], sdom[mn[u]])) { mn[u] = mn[mom[u]]; } + return mom[u] = res; + +} + +void DFS(uint32_t now) { + + timeStamp += 1; + dfn[now] = timeStamp; + nfd[timeStamp - 1] = now; + for (auto succ : Succs[now]) { + + if (dfn[succ] == 0) { + + par[succ] = now; + DFS(succ); - bool Compare(uint32_t u, uint32_t v) { - return dfn[u] < dfn[v]; - } - uint32_t eval(uint32_t u) { - if( mom[u] == u ) return u; - uint32_t res = eval( mom[u] ); - if(Compare(sdom[mn[mom[u]]] , sdom[mn[u]])) { - mn[u] = mn[mom[u]]; } - return mom[u] = res; + } - void DFS(uint32_t now) { - timeStamp += 1; - dfn[now] = timeStamp; - nfd[timeStamp - 1] = now; - for( auto succ : Succs[now] ) { - if( dfn[succ] == 0 ) { - par[succ] = now; - DFS(succ); - } - } +} + +void DominatorTree(Function *F) { + + if (Blocks.empty()) return; + uint32_t s = start_point; + + // Initialization + mn.resize(Blocks.size()); + cov.resize(Blocks.size()); + dfn.resize(Blocks.size()); + nfd.resize(Blocks.size()); + par.resize(Blocks.size()); + mom.resize(Blocks.size()); + sdom.resize(Blocks.size()); + idom.resize(Blocks.size()); + + for (uint32_t i = 0; i < Blocks.size(); i++) { + + dfn[i] = 0; + nfd[i] = Blocks.size(); + cov[i].clear(); + idom[i] = mom[i] = mn[i] = sdom[i] = i; + } - void DominatorTree(Function *F) { - if( Blocks.empty() ) return; - uint32_t s = start_point; + timeStamp = 0; + DFS(s); - // Initialization - mn.resize(Blocks.size()); - cov.resize(Blocks.size()); - dfn.resize(Blocks.size()); - nfd.resize(Blocks.size()); - par.resize(Blocks.size()); - mom.resize(Blocks.size()); - sdom.resize(Blocks.size()); - idom.resize(Blocks.size()); + for (uint32_t i = Blocks.size() - 1; i >= 1u; i--) { + + uint32_t now = nfd[i]; + if (now == Blocks.size()) { continue; } + for (uint32_t pre : Preds[now]) { + + if (dfn[pre]) { + + eval(pre); + if (Compare(sdom[mn[pre]], sdom[now])) { sdom[now] = sdom[mn[pre]]; } + + } - for( uint32_t i = 0 ; i < Blocks.size() ; i ++ ) { - dfn[i] = 0; - nfd[i] = Blocks.size(); - cov[i].clear(); - idom[i] = mom[i] = mn[i] = sdom[i] = i; } - timeStamp = 0; - DFS(s); + cov[sdom[now]].push_back(now); + mom[now] = par[now]; + for (uint32_t x : cov[par[now]]) { + + eval(x); + if (Compare(sdom[mn[x]], par[now])) { + + idom[x] = mn[x]; + + } else { + + idom[x] = par[now]; - for( uint32_t i = Blocks.size() - 1 ; i >= 1u ; i -- ) { - uint32_t now = nfd[i]; - if( now == Blocks.size() ) { - continue; - } - for( uint32_t pre : Preds[ now ] ) { - if( dfn[ pre ] ) { - eval(pre); - if( Compare(sdom[mn[pre]], sdom[now]) ) { - sdom[now] = sdom[mn[pre]]; - } - } - } - cov[sdom[now]].push_back(now); - mom[now] = par[now]; - for( uint32_t x : cov[par[now]] ) { - eval(x); - if( Compare(sdom[mn[x]], par[now]) ) { - idom[x] = mn[x]; - } else { - idom[x] = par[now]; - } } + } - for( uint32_t i = 1 ; i < Blocks.size() ; i += 1 ) { - uint32_t now = nfd[i]; - if( now == Blocks.size() ) { - continue; - } - if(idom[now] != sdom[now]) - idom[now] = idom[idom[now]]; - } } -} // End of DominatorTree -std::vector Visited, InStack; -std::vector TopoOrder, InDeg; -std::vector< std::vector > t_Succ , t_Pred; + for (uint32_t i = 1; i < Blocks.size(); i += 1) { + + uint32_t now = nfd[i]; + if (now == Blocks.size()) { continue; } + if (idom[now] != sdom[now]) idom[now] = idom[idom[now]]; + + } + +} + +} // namespace DominatorTree + +std::vector Visited, InStack; +std::vector TopoOrder, InDeg; +std::vector > t_Succ, t_Pred; void Go(uint32_t now, uint32_t tt) { - if( now == tt ) return; + + if (now == tt) return; Visited[now] = InStack[now] = timeStamp; - for(uint32_t nxt : Succs[now]) { - if(Visited[nxt] == timeStamp and InStack[nxt] == timeStamp) { + for (uint32_t nxt : Succs[now]) { + + if (Visited[nxt] == timeStamp and InStack[nxt] == timeStamp) { + Marked.insert(nxt); + } + t_Succ[now].push_back(nxt); t_Pred[nxt].push_back(now); InDeg[nxt] += 1; - if(Visited[nxt] == timeStamp) { - continue; - } + if (Visited[nxt] == timeStamp) { continue; } Go(nxt, tt); + } InStack[now] = 0; + } void TopologicalSort(uint32_t ss, uint32_t tt) { + timeStamp += 1; Go(ss, tt); @@ -227,76 +287,111 @@ void TopologicalSort(uint32_t ss, uint32_t tt) { TopoOrder.clear(); std::queue wait; wait.push(ss); - while( not wait.empty() ) { - uint32_t now = wait.front(); wait.pop(); + while (not wait.empty()) { + + uint32_t now = wait.front(); + wait.pop(); TopoOrder.push_back(now); - for(uint32_t nxt : t_Succ[now]) { + for (uint32_t nxt : t_Succ[now]) { + InDeg[nxt] -= 1; - if(InDeg[nxt] == 0u) { - wait.push(nxt); - } + if (InDeg[nxt] == 0u) { wait.push(nxt); } + } + } + } -std::vector< std::set > NextMarked; -bool Indistinguish(uint32_t node1, uint32_t node2) { - if(NextMarked[node1].size() > NextMarked[node2].size()){ +std::vector > NextMarked; +bool Indistinguish(uint32_t node1, uint32_t node2) { + + if (NextMarked[node1].size() > NextMarked[node2].size()) { + uint32_t _swap = node1; node1 = node2; node2 = _swap; + } - for(uint32_t x : NextMarked[node1]) { - if( NextMarked[node2].find(x) != NextMarked[node2].end() ) { - return true; - } + + for (uint32_t x : NextMarked[node1]) { + + if (NextMarked[node2].find(x) != NextMarked[node2].end()) { return true; } + } + return false; + } void MakeUniq(uint32_t now) { + bool StopFlag = false; if (Marked.find(now) == Marked.end()) { - for(uint32_t pred1 : t_Pred[now]) { - for(uint32_t pred2 : t_Pred[now]) { - if(pred1 == pred2) continue; - if(Indistinguish(pred1, pred2)) { + + for (uint32_t pred1 : t_Pred[now]) { + + for (uint32_t pred2 : t_Pred[now]) { + + if (pred1 == pred2) continue; + if (Indistinguish(pred1, pred2)) { + Marked.insert(now); StopFlag = true; break; + } + } - if (StopFlag) { - break; - } + + if (StopFlag) { break; } + } + } - if(Marked.find(now) != Marked.end()) { + + if (Marked.find(now) != Marked.end()) { + NextMarked[now].insert(now); + } else { - for(uint32_t pred : t_Pred[now]) { - for(uint32_t x : NextMarked[pred]) { + + for (uint32_t pred : t_Pred[now]) { + + for (uint32_t x : NextMarked[pred]) { + NextMarked[now].insert(x); + } + } + } + } void MarkSubGraph(uint32_t ss, uint32_t tt) { - TopologicalSort(ss, tt); - if(TopoOrder.empty()) return; - for(uint32_t i : TopoOrder) { + TopologicalSort(ss, tt); + if (TopoOrder.empty()) return; + + for (uint32_t i : TopoOrder) { + NextMarked[i].clear(); + } NextMarked[TopoOrder[0]].insert(TopoOrder[0]); - for(uint32_t i = 1 ; i < TopoOrder.size() ; i += 1) { + for (uint32_t i = 1; i < TopoOrder.size(); i += 1) { + MakeUniq(TopoOrder[i]); + } + } void MarkVertice(Function *F) { + uint32_t s = start_point; InDeg.resize(Blocks.size()); @@ -306,26 +401,32 @@ void MarkVertice(Function *F) { t_Pred.resize(Blocks.size()); NextMarked.resize(Blocks.size()); - for( uint32_t i = 0 ; i < Blocks.size() ; i += 1 ) { + for (uint32_t i = 0; i < Blocks.size(); i += 1) { + Visited[i] = InStack[i] = InDeg[i] = 0; t_Succ[i].clear(); t_Pred[i].clear(); + } + timeStamp = 0; uint32_t t = 0; - //MarkSubGraph(s, t); - //return; + // MarkSubGraph(s, t); + // return; + + while (s != t) { - while( s != t ) { MarkSubGraph(DominatorTree::idom[t], t); t = DominatorTree::idom[t]; + } } // return {marked nodes} -std::pair, - std::vector >markNodes(Function *F) { +std::pair, std::vector > markNodes( + Function *F) { + assert(F->size() > 0 && "Function can not be empty"); reset(); @@ -335,21 +436,30 @@ std::pair, DominatorTree::DominatorTree(F); MarkVertice(F); - std::vector Result , ResultAbove; - for( uint32_t x : Markabove ) { - auto it = Marked.find( x ); - if( it != Marked.end() ) - Marked.erase( it ); - if( x ) - ResultAbove.push_back(Blocks[x]); - } - for( uint32_t x : Marked ) { - if (x == 0) { - continue; - } else { - Result.push_back(Blocks[x]); - } + std::vector Result, ResultAbove; + for (uint32_t x : Markabove) { + + auto it = Marked.find(x); + if (it != Marked.end()) Marked.erase(it); + if (x) ResultAbove.push_back(Blocks[x]); + } - return { Result , ResultAbove }; + for (uint32_t x : Marked) { + + if (x == 0) { + + continue; + + } else { + + Result.push_back(Blocks[x]); + + } + + } + + return {Result, ResultAbove}; + } + diff --git a/llvm_mode/MarkNodes.h b/llvm_mode/MarkNodes.h index e3bf3ce5..23316652 100644 --- a/llvm_mode/MarkNodes.h +++ b/llvm_mode/MarkNodes.h @@ -1,11 +1,12 @@ #ifndef __MARK_NODES__ -#define __MARK_NODES__ +# define __MARK_NODES__ -#include "llvm/IR/BasicBlock.h" -#include "llvm/IR/Function.h" -#include +# include "llvm/IR/BasicBlock.h" +# include "llvm/IR/Function.h" +# include -std::pair, - std::vector> markNodes(llvm::Function *F); +std::pair, std::vector> +markNodes(llvm::Function *F); #endif + diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c index 1b810edf..666fd043 100644 --- a/llvm_mode/afl-clang-fast.c +++ b/llvm_mode/afl-clang-fast.c @@ -34,16 +34,15 @@ #include #include -static u8* obj_path; /* Path to runtime libraries */ -static u8** cc_params; /* Parameters passed to the real CC */ -static u32 cc_par_cnt = 1; /* Param count, including argv0 */ - +static u8* obj_path; /* Path to runtime libraries */ +static u8** cc_params; /* Parameters passed to the real CC */ +static u32 cc_par_cnt = 1; /* Param count, including argv0 */ /* Try to find the runtime libraries. If that fails, abort. */ static void find_obj(u8* argv0) { - u8 *afl_path = getenv("AFL_PATH"); + u8* afl_path = getenv("AFL_PATH"); u8 *slash, *tmp; if (afl_path) { @@ -51,9 +50,11 @@ static void find_obj(u8* argv0) { tmp = alloc_printf("%s/afl-llvm-rt.o", afl_path); if (!access(tmp, R_OK)) { + obj_path = afl_path; ck_free(tmp); return; + } ck_free(tmp); @@ -64,7 +65,7 @@ static void find_obj(u8* argv0) { if (slash) { - u8 *dir; + u8* dir; *slash = 0; dir = ck_strdup(argv0); @@ -73,9 +74,11 @@ static void find_obj(u8* argv0) { tmp = alloc_printf("%s/afl-llvm-rt.o", dir); if (!access(tmp, R_OK)) { + obj_path = dir; ck_free(tmp); return; + } ck_free(tmp); @@ -84,33 +87,43 @@ static void find_obj(u8* argv0) { } if (!access(AFL_PATH "/afl-llvm-rt.o", R_OK)) { + obj_path = AFL_PATH; return; + } - FATAL("Unable to find 'afl-llvm-rt.o' or 'afl-llvm-pass.so.cc'. Please set AFL_PATH"); - -} + FATAL( + "Unable to find 'afl-llvm-rt.o' or 'afl-llvm-pass.so.cc'. Please set " + "AFL_PATH"); +} /* Copy argv to cc_params, making the necessary edits. */ static void edit_params(u32 argc, char** argv) { - u8 fortify_set = 0, asan_set = 0, x_set = 0, maybe_linking = 1, bit_mode = 0; - u8 *name; + u8 fortify_set = 0, asan_set = 0, x_set = 0, maybe_linking = 1, bit_mode = 0; + u8* name; cc_params = ck_alloc((argc + 128) * sizeof(u8*)); name = strrchr(argv[0], '/'); - if (!name) name = argv[0]; else name++; + if (!name) + name = argv[0]; + else + name++; if (!strcmp(name, "afl-clang-fast++")) { + u8* alt_cxx = getenv("AFL_CXX"); cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++"; + } else { + u8* alt_cc = getenv("AFL_CC"); cc_params[0] = alt_cc ? alt_cc : (u8*)"clang"; + } /* There are three ways to compile with afl-clang-fast. In the traditional @@ -118,36 +131,50 @@ static void edit_params(u32 argc, char** argv) { much faster but has less coverage. Finally tere is the experimental 'trace-pc-guard' mode, we use native LLVM instrumentation callbacks instead. For trace-pc-guard see: - http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards */ + http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards + */ // laf - if (getenv("LAF_SPLIT_SWITCHES")||getenv("AFL_LLVM_LAF_SPLIT_SWITCHES")) { + if (getenv("LAF_SPLIT_SWITCHES") || getenv("AFL_LLVM_LAF_SPLIT_SWITCHES")) { + cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; - cc_params[cc_par_cnt++] = alloc_printf("%s/split-switches-pass.so", obj_path); + cc_params[cc_par_cnt++] = + alloc_printf("%s/split-switches-pass.so", obj_path); + } - if (getenv("LAF_TRANSFORM_COMPARES")||getenv("AFL_LLVM_LAF_TRANSFORM_COMPARES")) { + if (getenv("LAF_TRANSFORM_COMPARES") || + getenv("AFL_LLVM_LAF_TRANSFORM_COMPARES")) { + cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; - cc_params[cc_par_cnt++] = alloc_printf("%s/compare-transform-pass.so", obj_path); + cc_params[cc_par_cnt++] = + alloc_printf("%s/compare-transform-pass.so", obj_path); + } - if (getenv("LAF_SPLIT_COMPARES")||getenv("AFL_LLVM_LAF_SPLIT_COMPARES")) { + if (getenv("LAF_SPLIT_COMPARES") || getenv("AFL_LLVM_LAF_SPLIT_COMPARES")) { + cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; cc_params[cc_par_cnt++] = "-Xclang"; - cc_params[cc_par_cnt++] = alloc_printf("%s/split-compares-pass.so", obj_path); + cc_params[cc_par_cnt++] = + alloc_printf("%s/split-compares-pass.so", obj_path); + } + // /laf #ifdef USE_TRACE_PC - cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default - //cc_params[cc_par_cnt++] = "-mllvm"; - //cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; - //cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0"; + cc_params[cc_par_cnt++] = + "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default + // cc_params[cc_par_cnt++] = "-mllvm"; + // cc_params[cc_par_cnt++] = + // "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; cc_params[cc_par_cnt++] + // = "-sanitizer-coverage-block-threshold=0"; #else cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; @@ -165,6 +192,7 @@ static void edit_params(u32 argc, char** argv) { if (argc == 1 && !strcmp(argv[1], "-v")) maybe_linking = 0; while (--argc) { + u8* cur = *(++argv); if (!strcmp(cur, "-m32")) bit_mode = 32; @@ -175,15 +203,15 @@ static void edit_params(u32 argc, char** argv) { if (!strcmp(cur, "-c") || !strcmp(cur, "-S") || !strcmp(cur, "-E")) maybe_linking = 0; - if (!strcmp(cur, "-fsanitize=address") || - !strcmp(cur, "-fsanitize=memory")) asan_set = 1; + if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory")) + asan_set = 1; if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1; if (!strcmp(cur, "-shared")) maybe_linking = 0; - if (!strcmp(cur, "-Wl,-z,defs") || - !strcmp(cur, "-Wl,--no-undefined")) continue; + if (!strcmp(cur, "-Wl,-z,defs") || !strcmp(cur, "-Wl,--no-undefined")) + continue; cc_params[cc_par_cnt++] = cur; @@ -193,8 +221,7 @@ static void edit_params(u32 argc, char** argv) { cc_params[cc_par_cnt++] = "-fstack-protector-all"; - if (!fortify_set) - cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2"; + if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2"; } @@ -202,8 +229,7 @@ static void edit_params(u32 argc, char** argv) { if (getenv("AFL_USE_ASAN")) { - if (getenv("AFL_USE_MSAN")) - FATAL("ASAN and MSAN are mutually exclusive"); + if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("ASAN and AFL_HARDEN are mutually exclusive"); @@ -213,8 +239,7 @@ static void edit_params(u32 argc, char** argv) { } else if (getenv("AFL_USE_MSAN")) { - if (getenv("AFL_USE_ASAN")) - FATAL("ASAN and MSAN are mutually exclusive"); + if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("MSAN and AFL_HARDEN are mutually exclusive"); @@ -279,35 +304,41 @@ static void edit_params(u32 argc, char** argv) { */ - cc_params[cc_par_cnt++] = "-D__AFL_LOOP(_A)=" - "({ static volatile char *_B __attribute__((used)); " - " _B = (char*)\"" PERSIST_SIG "\"; " + cc_params[cc_par_cnt++] = + "-D__AFL_LOOP(_A)=" + "({ static volatile char *_B __attribute__((used)); " + " _B = (char*)\"" PERSIST_SIG + "\"; " #ifdef __APPLE__ - "__attribute__((visibility(\"default\"))) " - "int _L(unsigned int) __asm__(\"___afl_persistent_loop\"); " + "__attribute__((visibility(\"default\"))) " + "int _L(unsigned int) __asm__(\"___afl_persistent_loop\"); " #else - "__attribute__((visibility(\"default\"))) " - "int _L(unsigned int) __asm__(\"__afl_persistent_loop\"); " + "__attribute__((visibility(\"default\"))) " + "int _L(unsigned int) __asm__(\"__afl_persistent_loop\"); " #endif /* ^__APPLE__ */ - "_L(_A); })"; + "_L(_A); })"; - cc_params[cc_par_cnt++] = "-D__AFL_INIT()=" - "do { static volatile char *_A __attribute__((used)); " - " _A = (char*)\"" DEFER_SIG "\"; " + cc_params[cc_par_cnt++] = + "-D__AFL_INIT()=" + "do { static volatile char *_A __attribute__((used)); " + " _A = (char*)\"" DEFER_SIG + "\"; " #ifdef __APPLE__ - "__attribute__((visibility(\"default\"))) " - "void _I(void) __asm__(\"___afl_manual_init\"); " + "__attribute__((visibility(\"default\"))) " + "void _I(void) __asm__(\"___afl_manual_init\"); " #else - "__attribute__((visibility(\"default\"))) " - "void _I(void) __asm__(\"__afl_manual_init\"); " + "__attribute__((visibility(\"default\"))) " + "void _I(void) __asm__(\"__afl_manual_init\"); " #endif /* ^__APPLE__ */ - "_I(); } while (0)"; + "_I(); } while (0)"; if (maybe_linking) { if (x_set) { + cc_params[cc_par_cnt++] = "-x"; cc_params[cc_par_cnt++] = "none"; + } switch (bit_mode) { @@ -340,7 +371,6 @@ static void edit_params(u32 argc, char** argv) { } - /* Main entry point */ int main(int argc, char** argv) { @@ -348,46 +378,53 @@ int main(int argc, char** argv) { if (isatty(2) && !getenv("AFL_QUIET")) { #ifdef USE_TRACE_PC - SAYF(cCYA "afl-clang-fast" VERSION cRST " [tpcg] by \n"); + SAYF(cCYA "afl-clang-fast" VERSION cRST + " [tpcg] by \n"); #else - SAYF(cCYA "afl-clang-fast" VERSION cRST " by \n"); + SAYF(cCYA "afl-clang-fast" VERSION cRST " by \n"); #endif /* ^USE_TRACE_PC */ } if (argc < 2) { - SAYF("\n" - "This is a helper application for afl-fuzz. It serves as a drop-in replacement\n" - "for clang, letting you recompile third-party code with the required runtime\n" - "instrumentation. A common use pattern would be one of the following:\n\n" + SAYF( + "\n" + "This is a helper application for afl-fuzz. It serves as a drop-in " + "replacement\n" + "for clang, letting you recompile third-party code with the required " + "runtime\n" + "instrumentation. A common use pattern would be one of the " + "following:\n\n" - " CC=%s/afl-clang-fast ./configure\n" - " CXX=%s/afl-clang-fast++ ./configure\n\n" + " CC=%s/afl-clang-fast ./configure\n" + " CXX=%s/afl-clang-fast++ ./configure\n\n" - "In contrast to the traditional afl-clang tool, this version is implemented as\n" - "an LLVM pass and tends to offer improved performance with slow programs.\n\n" + "In contrast to the traditional afl-clang tool, this version is " + "implemented as\n" + "an LLVM pass and tends to offer improved performance with slow " + "programs.\n\n" - "You can specify custom next-stage toolchain via AFL_CC and AFL_CXX. Setting\n" - "AFL_HARDEN enables hardening optimizations in the compiled code.\n\n", - BIN_PATH, BIN_PATH); + "You can specify custom next-stage toolchain via AFL_CC and AFL_CXX. " + "Setting\n" + "AFL_HARDEN enables hardening optimizations in the compiled code.\n\n", + BIN_PATH, BIN_PATH); exit(1); } - find_obj(argv[0]); edit_params(argc, argv); -/* - int i = 0; - printf("EXEC:"); - while (cc_params[i] != NULL) - printf(" %s", cc_params[i++]); - printf("\n"); -*/ + /* + int i = 0; + printf("EXEC:"); + while (cc_params[i] != NULL) + printf(" %s", cc_params[i++]); + printf("\n"); + */ execvp(cc_params[0], (char**)cc_params); @@ -396,3 +433,4 @@ int main(int argc, char** argv) { return 0; } + diff --git a/llvm_mode/afl-llvm-pass.so.cc b/llvm_mode/afl-llvm-pass.so.cc index b242163e..5d531a87 100644 --- a/llvm_mode/afl-llvm-pass.so.cc +++ b/llvm_mode/afl-llvm-pass.so.cc @@ -48,50 +48,52 @@ using namespace llvm; namespace { - class AFLCoverage : public ModulePass { +class AFLCoverage : public ModulePass { - public: + public: + static char ID; + AFLCoverage() : ModulePass(ID) { + + char *instWhiteListFilename = getenv("AFL_LLVM_WHITELIST"); + if (instWhiteListFilename) { + + std::string line; + std::ifstream fileStream; + fileStream.open(instWhiteListFilename); + if (!fileStream) report_fatal_error("Unable to open AFL_LLVM_WHITELIST"); + getline(fileStream, line); + while (fileStream) { + + myWhitelist.push_back(line); + getline(fileStream, line); - static char ID; - AFLCoverage() : ModulePass(ID) { - char* instWhiteListFilename = getenv("AFL_LLVM_WHITELIST"); - if (instWhiteListFilename) { - std::string line; - std::ifstream fileStream; - fileStream.open(instWhiteListFilename); - if (!fileStream) - report_fatal_error("Unable to open AFL_LLVM_WHITELIST"); - getline(fileStream, line); - while (fileStream) { - myWhitelist.push_back(line); - getline(fileStream, line); - } - } } - bool runOnModule(Module &M) override; + } - // StringRef getPassName() const override { - // return "American Fuzzy Lop Instrumentation"; - // } + } - protected: + bool runOnModule(Module &M) override; - std::list myWhitelist; + // StringRef getPassName() const override { - }; + // return "American Fuzzy Lop Instrumentation"; + // } -} + protected: + std::list myWhitelist; +}; + +} // namespace char AFLCoverage::ID = 0; - bool AFLCoverage::runOnModule(Module &M) { LLVMContext &C = M.getContext(); - IntegerType *Int8Ty = IntegerType::getInt8Ty(C); + IntegerType *Int8Ty = IntegerType::getInt8Ty(C); IntegerType *Int32Ty = IntegerType::getInt32Ty(C); unsigned int cur_loc = 0; @@ -103,11 +105,13 @@ bool AFLCoverage::runOnModule(Module &M) { SAYF(cCYA "afl-llvm-pass" VERSION cRST " by \n"); - } else be_quiet = 1; + } else + + be_quiet = 1; /* Decide instrumentation ratio */ - char* inst_ratio_str = getenv("AFL_INST_RATIO"); + char * inst_ratio_str = getenv("AFL_INST_RATIO"); unsigned int inst_ratio = 100; if (inst_ratio_str) { @@ -119,7 +123,7 @@ bool AFLCoverage::runOnModule(Module &M) { } #if LLVM_VERSION_MAJOR < 9 - char* neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO"); + char *neverZero_counters_str = getenv("AFL_LLVM_NOT_ZERO"); #endif /* Get globals for the SHM region and the previous location. Note that @@ -134,8 +138,8 @@ bool AFLCoverage::runOnModule(Module &M) { M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc"); #else GlobalVariable *AFLPrevLoc = new GlobalVariable( - M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", - 0, GlobalVariable::GeneralDynamicTLSModel, 0, false); + M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", 0, + GlobalVariable::GeneralDynamicTLSModel, 0, false); #endif /* Instrument all the things! */ @@ -146,58 +150,77 @@ bool AFLCoverage::runOnModule(Module &M) { for (auto &BB : F) { BasicBlock::iterator IP = BB.getFirstInsertionPt(); - IRBuilder<> IRB(&(*IP)); - + IRBuilder<> IRB(&(*IP)); + if (!myWhitelist.empty()) { - bool instrumentBlock = false; - /* Get the current location using debug information. - * For now, just instrument the block if we are not able - * to determine our location. */ - DebugLoc Loc = IP->getDebugLoc(); - if ( Loc ) { - DILocation *cDILoc = dyn_cast(Loc.getAsMDNode()); + bool instrumentBlock = false; - unsigned int instLine = cDILoc->getLine(); - StringRef instFilename = cDILoc->getFilename(); + /* Get the current location using debug information. + * For now, just instrument the block if we are not able + * to determine our location. */ + DebugLoc Loc = IP->getDebugLoc(); + if (Loc) { - if (instFilename.str().empty()) { - /* If the original location is empty, try using the inlined location */ - DILocation *oDILoc = cDILoc->getInlinedAt(); - if (oDILoc) { - instFilename = oDILoc->getFilename(); - instLine = oDILoc->getLine(); - } - } + DILocation *cDILoc = dyn_cast(Loc.getAsMDNode()); + + unsigned int instLine = cDILoc->getLine(); + StringRef instFilename = cDILoc->getFilename(); + + if (instFilename.str().empty()) { + + /* If the original location is empty, try using the inlined location + */ + DILocation *oDILoc = cDILoc->getInlinedAt(); + if (oDILoc) { + + instFilename = oDILoc->getFilename(); + instLine = oDILoc->getLine(); + + } - /* Continue only if we know where we actually are */ - if (!instFilename.str().empty()) { - for (std::list::iterator it = myWhitelist.begin(); it != myWhitelist.end(); ++it) { - /* We don't check for filename equality here because - * filenames might actually be full paths. Instead we - * check that the actual filename ends in the filename - * specified in the list. */ - if (instFilename.str().length() >= it->length()) { - if (instFilename.str().compare(instFilename.str().length() - it->length(), it->length(), *it) == 0) { - instrumentBlock = true; - break; - } - } - } - } } - /* Either we couldn't figure out our location or the location is - * not whitelisted, so we skip instrumentation. */ - if (!instrumentBlock) continue; - } + /* Continue only if we know where we actually are */ + if (!instFilename.str().empty()) { + for (std::list::iterator it = myWhitelist.begin(); + it != myWhitelist.end(); ++it) { + + /* We don't check for filename equality here because + * filenames might actually be full paths. Instead we + * check that the actual filename ends in the filename + * specified in the list. */ + if (instFilename.str().length() >= it->length()) { + + if (instFilename.str().compare( + instFilename.str().length() - it->length(), + it->length(), *it) == 0) { + + instrumentBlock = true; + break; + + } + + } + + } + + } + + } + + /* Either we couldn't figure out our location or the location is + * not whitelisted, so we skip instrumentation. */ + if (!instrumentBlock) continue; + + } if (AFL_R(100) >= inst_ratio) continue; /* Make up cur_loc */ - //cur_loc++; + // cur_loc++; cur_loc = AFL_R(MAP_SIZE); // only instrument if this basic block is the destination of a previous @@ -205,24 +228,27 @@ bool AFLCoverage::runOnModule(Module &M) { // this gets rid of ~5-10% of instrumentations that are unnecessary // result: a little more speed and less map pollution int more_than_one = -1; - //fprintf(stderr, "BB %u: ", cur_loc); + // fprintf(stderr, "BB %u: ", cur_loc); for (BasicBlock *Pred : predecessors(&BB)) { + int count = 0; - if (more_than_one == -1) - more_than_one = 0; - //fprintf(stderr, " %p=>", Pred); + if (more_than_one == -1) more_than_one = 0; + // fprintf(stderr, " %p=>", Pred); for (BasicBlock *Succ : successors(Pred)) { - //if (count > 0) + + // if (count > 0) // fprintf(stderr, "|"); if (Succ != NULL) count++; - //fprintf(stderr, "%p", Succ); + // fprintf(stderr, "%p", Succ); + } - if (count > 1) - more_than_one = 1; + + if (count > 1) more_than_one = 1; + } - //fprintf(stderr, " == %d\n", more_than_one); - if (more_than_one != 1) - continue; + + // fprintf(stderr, " == %d\n", more_than_one); + if (more_than_one != 1) continue; ConstantInt *CurLoc = ConstantInt::get(Int32Ty, cur_loc); @@ -236,7 +262,8 @@ bool AFLCoverage::runOnModule(Module &M) { LoadInst *MapPtr = IRB.CreateLoad(AFLMapPtr); MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - Value *MapPtrIdx = IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc)); + Value *MapPtrIdx = + IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc)); /* Update bitmap */ @@ -246,7 +273,9 @@ bool AFLCoverage::runOnModule(Module &M) { Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1)); #if LLVM_VERSION_MAJOR < 9 - if (neverZero_counters_str != NULL) { // with llvm 9 we make this the default as the bug in llvm is then fixed + if (neverZero_counters_str != + NULL) { // with llvm 9 we make this the default as the bug in llvm is + // then fixed #endif /* hexcoder: Realize a counter that skips zero during overflow. * Once this counter reaches its maximum value, it next increments to 1 @@ -257,48 +286,67 @@ bool AFLCoverage::runOnModule(Module &M) { * Counter + 1 -> {Counter, OverflowFlag} * Counter + OverflowFlag -> Counter */ -/* // we keep the old solutions just in case - // Solution #1 - if (neverZero_counters_str[0] == '1') { - CallInst *AddOv = IRB.CreateBinaryIntrinsic(Intrinsic::uadd_with_overflow, Counter, ConstantInt::get(Int8Ty, 1)); - AddOv->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); - Value *SumWithOverflowBit = AddOv; - Incr = IRB.CreateAdd(IRB.CreateExtractValue(SumWithOverflowBit, 0), // sum - IRB.CreateZExt( // convert from one bit type to 8 bits type - IRB.CreateExtractValue(SumWithOverflowBit, 1), // overflow - Int8Ty)); - // Solution #2 - } else if (neverZero_counters_str[0] == '2') { - auto cf = IRB.CreateICmpEQ(Counter, ConstantInt::get(Int8Ty, 255)); - Value *HowMuch = IRB.CreateAdd(ConstantInt::get(Int8Ty, 1), cf); - Incr = IRB.CreateAdd(Counter, HowMuch); - // Solution #3 - } else if (neverZero_counters_str[0] == '3') { -*/ - // this is the solution we choose because llvm9 should do the right thing here - auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0)); - auto carry = IRB.CreateZExt(cf, Int8Ty); - Incr = IRB.CreateAdd(Incr, carry); + /* // we keep the old solutions just in case + // Solution #1 + if (neverZero_counters_str[0] == '1') { + + CallInst *AddOv = + IRB.CreateBinaryIntrinsic(Intrinsic::uadd_with_overflow, Counter, + ConstantInt::get(Int8Ty, 1)); + AddOv->setMetadata(M.getMDKindID("nosanitize"), + MDNode::get(C, None)); Value *SumWithOverflowBit = AddOv; Incr = + IRB.CreateAdd(IRB.CreateExtractValue(SumWithOverflowBit, 0), // sum + IRB.CreateZExt( // convert from one bit + type to 8 bits type IRB.CreateExtractValue(SumWithOverflowBit, 1), // + overflow Int8Ty)); + // Solution #2 + + } else if (neverZero_counters_str[0] == '2') { + + auto cf = IRB.CreateICmpEQ(Counter, + ConstantInt::get(Int8Ty, 255)); Value *HowMuch = + IRB.CreateAdd(ConstantInt::get(Int8Ty, 1), cf); Incr = + IRB.CreateAdd(Counter, HowMuch); + // Solution #3 + + } else if (neverZero_counters_str[0] == '3') { + + */ + // this is the solution we choose because llvm9 should do the right + // thing here + auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0)); + auto carry = IRB.CreateZExt(cf, Int8Ty); + Incr = IRB.CreateAdd(Incr, carry); /* // Solution #4 + } else if (neverZero_counters_str[0] == '4') { + auto cf = IRB.CreateICmpULT(Incr, ConstantInt::get(Int8Ty, 1)); auto carry = IRB.CreateZExt(cf, Int8Ty); Incr = IRB.CreateAdd(Incr, carry); + } else { - fprintf(stderr, "Error: unknown value for AFL_NZERO_COUNTS: %s (valid is 1-4)\n", neverZero_counters_str); - exit(-1); + + fprintf(stderr, "Error: unknown value for AFL_NZERO_COUNTS: %s + (valid is 1-4)\n", neverZero_counters_str); exit(-1); + } + */ #if LLVM_VERSION_MAJOR < 9 + } + #endif - IRB.CreateStore(Incr, MapPtrIdx)->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); + IRB.CreateStore(Incr, MapPtrIdx) + ->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); /* Set prev_loc to cur_loc >> 1 */ - StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, cur_loc >> 1), AFLPrevLoc); + StoreInst *Store = + IRB.CreateStore(ConstantInt::get(Int32Ty, cur_loc >> 1), AFLPrevLoc); Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None)); inst_blocks++; @@ -309,11 +357,16 @@ bool AFLCoverage::runOnModule(Module &M) { if (!be_quiet) { - if (!inst_blocks) WARNF("No instrumentation targets found."); - else OKF("Instrumented %u locations (%s mode, ratio %u%%).", - inst_blocks, getenv("AFL_HARDEN") ? "hardened" : - ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) ? - "ASAN/MSAN" : "non-hardened"), inst_ratio); + if (!inst_blocks) + WARNF("No instrumentation targets found."); + else + OKF("Instrumented %u locations (%s mode, ratio %u%%).", inst_blocks, + getenv("AFL_HARDEN") + ? "hardened" + : ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) + ? "ASAN/MSAN" + : "non-hardened"), + inst_ratio); } @@ -321,7 +374,6 @@ bool AFLCoverage::runOnModule(Module &M) { } - static void registerAFLPass(const PassManagerBuilder &, legacy::PassManagerBase &PM) { @@ -329,9 +381,9 @@ static void registerAFLPass(const PassManagerBuilder &, } - static RegisterStandardPasses RegisterAFLPass( PassManagerBuilder::EP_OptimizerLast, registerAFLPass); static RegisterStandardPasses RegisterAFLPass0( PassManagerBuilder::EP_EnabledOnOptLevel0, registerAFLPass); + diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index e6d9b993..bc38f1ec 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -20,7 +20,7 @@ */ #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -50,10 +50,9 @@ #include #include - /* Globals needed by the injected instrumentation. The __afl_area_initial region - is used for instrumentation output before __afl_map_shm() has a chance to run. - It will end up as .comm, so it shouldn't be too wasteful. */ + is used for instrumentation output before __afl_map_shm() has a chance to + run. It will end up as .comm, so it shouldn't be too wasteful. */ u8 __afl_area_initial[MAP_SIZE]; u8* __afl_area_ptr = __afl_area_initial; @@ -64,43 +63,46 @@ u32 __afl_prev_loc; __thread u32 __afl_prev_loc; #endif - /* Running in persistent mode? */ static u8 is_persistent; - /* SHM setup. */ static void __afl_map_shm(void) { - u8 *id_str = getenv(SHM_ENV_VAR); + u8* id_str = getenv(SHM_ENV_VAR); /* If we're running under AFL, attach to the appropriate region, replacing the early-stage __afl_area_initial region that is needed to allow some really hacky .init code to work correctly in projects such as OpenSSL. */ if (id_str) { + #ifdef USEMMAP - const char *shm_file_path = id_str; - int shm_fd = -1; - unsigned char *shm_base = NULL; + const char* shm_file_path = id_str; + int shm_fd = -1; + unsigned char* shm_base = NULL; /* create the shared memory segment as if it was a file */ shm_fd = shm_open(shm_file_path, O_RDWR, 0600); if (shm_fd == -1) { + printf("shm_open() failed\n"); exit(1); + } /* map the shared memory segment to the address space of the process */ shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); if (shm_base == MAP_FAILED) { + close(shm_fd); shm_fd = -1; printf("mmap() failed\n"); exit(2); + } __afl_area_ptr = shm_base; @@ -112,7 +114,7 @@ static void __afl_map_shm(void) { /* Whooooops. */ - if (__afl_area_ptr == (void *)-1) _exit(1); + if (__afl_area_ptr == (void*)-1) _exit(1); /* Write something into the bitmap so that even with low AFL_INST_RATIO, our parent doesn't give up on us. */ @@ -123,16 +125,15 @@ static void __afl_map_shm(void) { } - /* Fork server logic. */ static void __afl_start_forkserver(void) { static u8 tmp[4]; - s32 child_pid; + s32 child_pid; + + u8 child_stopped = 0; - u8 child_stopped = 0; - void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL); /* Phone home and tell the parent that we're OK. If parent isn't there, @@ -154,8 +155,10 @@ static void __afl_start_forkserver(void) { process. */ if (child_stopped && was_killed) { + child_stopped = 0; if (waitpid(child_pid, &status, 0) < 0) _exit(1); + } if (!child_stopped) { @@ -168,12 +171,13 @@ static void __afl_start_forkserver(void) { /* In child process: close fds, resume execution. */ if (!child_pid) { + signal(SIGCHLD, old_sigchld_handler); close(FORKSRV_FD); close(FORKSRV_FD + 1); return; - + } } else { @@ -207,7 +211,6 @@ static void __afl_start_forkserver(void) { } - /* A simplified persistent mode handler, used as explained in README.llvm. */ int __afl_persistent_loop(unsigned int max_cnt) { @@ -227,9 +230,10 @@ int __afl_persistent_loop(unsigned int max_cnt) { memset(__afl_area_ptr, 0, MAP_SIZE); __afl_area_ptr[0] = 1; __afl_prev_loc = 0; + } - cycle_cnt = max_cnt; + cycle_cnt = max_cnt; first_pass = 0; return 1; @@ -262,7 +266,6 @@ int __afl_persistent_loop(unsigned int max_cnt) { } - /* This one can be called from user code when deferred forkserver mode is enabled. */ @@ -280,7 +283,6 @@ void __afl_manual_init(void) { } - /* Proper initialization routine. */ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { @@ -293,7 +295,6 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { } - /* The following stuff deals with supporting -fsanitize-coverage=trace-pc-guard. It remains non-operational in the traditional, plugin-backed LLVM mode. For more info about 'trace-pc-guard', see README.llvm. @@ -302,9 +303,10 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) { edge (as opposed to every basic block). */ void __sanitizer_cov_trace_pc_guard(uint32_t* guard) { - __afl_area_ptr[*guard]++; -} + __afl_area_ptr[*guard]++; + +} /* Init callback. Populates instrumentation IDs. Note that we're using ID of 0 as a special value to indicate non-instrumented bits. That may @@ -321,8 +323,10 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) { if (x) inst_ratio = atoi(x); if (!inst_ratio || inst_ratio > 100) { + fprintf(stderr, "[-] ERROR: Invalid AFL_INST_RATIO (must be 1-100).\n"); abort(); + } /* Make sure that the first element in the range is always set - we use that @@ -333,11 +337,14 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) { while (start < stop) { - if (R(100) < inst_ratio) *start = R(MAP_SIZE - 1) + 1; - else *start = 0; + if (R(100) < inst_ratio) + *start = R(MAP_SIZE - 1) + 1; + else + *start = 0; start++; } } + diff --git a/llvm_mode/compare-transform-pass.so.cc b/llvm_mode/compare-transform-pass.so.cc index e7886db1..e1b6e671 100644 --- a/llvm_mode/compare-transform-pass.so.cc +++ b/llvm_mode/compare-transform-pass.so.cc @@ -36,202 +36,236 @@ using namespace llvm; namespace { - class CompareTransform : public ModulePass { +class CompareTransform : public ModulePass { - public: - static char ID; - CompareTransform() : ModulePass(ID) { - } + public: + static char ID; + CompareTransform() : ModulePass(ID) { - bool runOnModule(Module &M) override; + } + + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR < 4 - const char * getPassName() const override { -#else - StringRef getPassName() const override { -#endif - return "transforms compare functions"; - } - private: - bool transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp - ,const bool processStrncmp, const bool processStrcasecmp, const bool processStrncasecmp); - }; -} + const char *getPassName() const override { +#else + StringRef getPassName() const override { + +#endif + return "transforms compare functions"; + + } + + private: + bool transformCmps(Module &M, const bool processStrcmp, + const bool processMemcmp, const bool processStrncmp, + const bool processStrcasecmp, + const bool processStrncasecmp); + +}; + +} // namespace char CompareTransform::ID = 0; -bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp - , const bool processStrncmp, const bool processStrcasecmp, const bool processStrncasecmp) { +bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, + const bool processMemcmp, + const bool processStrncmp, + const bool processStrcasecmp, + const bool processStrncasecmp) { - std::vector calls; - LLVMContext &C = M.getContext(); - IntegerType *Int8Ty = IntegerType::getInt8Ty(C); - IntegerType *Int32Ty = IntegerType::getInt32Ty(C); - IntegerType *Int64Ty = IntegerType::getInt64Ty(C); + std::vector calls; + LLVMContext & C = M.getContext(); + IntegerType * Int8Ty = IntegerType::getInt8Ty(C); + IntegerType * Int32Ty = IntegerType::getInt32Ty(C); + IntegerType * Int64Ty = IntegerType::getInt64Ty(C); #if LLVM_VERSION_MAJOR < 9 - Constant* + Constant * #else FunctionCallee #endif - c = M.getOrInsertFunction("tolower", - Int32Ty, - Int32Ty + c = M.getOrInsertFunction("tolower", Int32Ty, Int32Ty #if LLVM_VERSION_MAJOR < 5 - , nullptr + , + nullptr #endif - ); + ); #if LLVM_VERSION_MAJOR < 9 - Function* tolowerFn = cast(c); + Function *tolowerFn = cast(c); #else FunctionCallee tolowerFn = c; #endif - /* iterate over all functions, bbs and instruction and add suitable calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp */ + /* iterate over all functions, bbs and instruction and add suitable calls to + * strcmp/memcmp/strncmp/strcasecmp/strncasecmp */ for (auto &F : M) { + for (auto &BB : F) { - for(auto &IN: BB) { - CallInst* callInst = nullptr; + + for (auto &IN : BB) { + + CallInst *callInst = nullptr; if ((callInst = dyn_cast(&IN))) { - bool isStrcmp = processStrcmp; - bool isMemcmp = processMemcmp; - bool isStrncmp = processStrncmp; - bool isStrcasecmp = processStrcasecmp; + bool isStrcmp = processStrcmp; + bool isMemcmp = processMemcmp; + bool isStrncmp = processStrncmp; + bool isStrcasecmp = processStrcasecmp; bool isStrncasecmp = processStrncasecmp; Function *Callee = callInst->getCalledFunction(); - if (!Callee) - continue; - if (callInst->getCallingConv() != llvm::CallingConv::C) - continue; + if (!Callee) continue; + if (callInst->getCallingConv() != llvm::CallingConv::C) continue; StringRef FuncName = Callee->getName(); - isStrcmp &= !FuncName.compare(StringRef("strcmp")); - isMemcmp &= !FuncName.compare(StringRef("memcmp")); - isStrncmp &= !FuncName.compare(StringRef("strncmp")); - isStrcasecmp &= !FuncName.compare(StringRef("strcasecmp")); + isStrcmp &= !FuncName.compare(StringRef("strcmp")); + isMemcmp &= !FuncName.compare(StringRef("memcmp")); + isStrncmp &= !FuncName.compare(StringRef("strncmp")); + isStrcasecmp &= !FuncName.compare(StringRef("strcasecmp")); isStrncasecmp &= !FuncName.compare(StringRef("strncasecmp")); - if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && !isStrncasecmp) + if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && + !isStrncasecmp) continue; - /* Verify the strcmp/memcmp/strncmp/strcasecmp/strncasecmp function prototype */ + /* Verify the strcmp/memcmp/strncmp/strcasecmp/strncasecmp function + * prototype */ FunctionType *FT = Callee->getFunctionType(); - - isStrcmp &= FT->getNumParams() == 2 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()); - isStrcasecmp &= FT->getNumParams() == 2 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()); - isMemcmp &= FT->getNumParams() == 3 && + isStrcmp &= + FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()); + isStrcasecmp &= + FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()); + isMemcmp &= FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0)->isPointerTy() && FT->getParamType(1)->isPointerTy() && FT->getParamType(2)->isIntegerTy(); - isStrncmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()) && - FT->getParamType(2)->isIntegerTy(); + isStrncmp &= FT->getNumParams() == 3 && + FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8PtrTy(M.getContext()) && + FT->getParamType(2)->isIntegerTy(); isStrncasecmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()) && - FT->getParamType(2)->isIntegerTy(); + FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8PtrTy(M.getContext()) && + FT->getParamType(2)->isIntegerTy(); - if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && !isStrncasecmp) + if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && + !isStrncasecmp) continue; /* is a str{n,}{case,}cmp/memcmp, check if we have * str{case,}cmp(x, "const") or str{case,}cmp("const", x) * strn{case,}cmp(x, "const", ..) or strn{case,}cmp("const", x, ..) * memcmp(x, "const", ..) or memcmp("const", x, ..) */ - Value *Str1P = callInst->getArgOperand(0), *Str2P = callInst->getArgOperand(1); + Value *Str1P = callInst->getArgOperand(0), + *Str2P = callInst->getArgOperand(1); StringRef Str1, Str2; - bool HasStr1 = getConstantStringInfo(Str1P, Str1); - bool HasStr2 = getConstantStringInfo(Str2P, Str2); + bool HasStr1 = getConstantStringInfo(Str1P, Str1); + bool HasStr2 = getConstantStringInfo(Str2P, Str2); /* handle cases of one string is const, one string is variable */ - if (!(HasStr1 ^ HasStr2)) - continue; + if (!(HasStr1 ^ HasStr2)) continue; if (isMemcmp || isStrncmp || isStrncasecmp) { + /* check if third operand is a constant integer * strlen("constStr") and sizeof() are treated as constant */ - Value *op2 = callInst->getArgOperand(2); - ConstantInt* ilen = dyn_cast(op2); - if (!ilen) - continue; - /* final precaution: if size of compare is larger than constant string skip it*/ - uint64_t literalLength = HasStr1 ? GetStringLength(Str1P) : GetStringLength(Str2P); - if (literalLength < ilen->getZExtValue()) - continue; + Value * op2 = callInst->getArgOperand(2); + ConstantInt *ilen = dyn_cast(op2); + if (!ilen) continue; + /* final precaution: if size of compare is larger than constant + * string skip it*/ + uint64_t literalLength = + HasStr1 ? GetStringLength(Str1P) : GetStringLength(Str2P); + if (literalLength < ilen->getZExtValue()) continue; + } calls.push_back(callInst); + } + } + } + } - if (!calls.size()) - return false; - errs() << "Replacing " << calls.size() << " calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp\n"; + if (!calls.size()) return false; + errs() << "Replacing " << calls.size() + << " calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp\n"; - for (auto &callInst: calls) { + for (auto &callInst : calls) { - Value *Str1P = callInst->getArgOperand(0), *Str2P = callInst->getArgOperand(1); - StringRef Str1, Str2, ConstStr; + Value *Str1P = callInst->getArgOperand(0), + *Str2P = callInst->getArgOperand(1); + StringRef Str1, Str2, ConstStr; std::string TmpConstStr; - Value *VarStr; - bool HasStr1 = getConstantStringInfo(Str1P, Str1); + Value * VarStr; + bool HasStr1 = getConstantStringInfo(Str1P, Str1); getConstantStringInfo(Str2P, Str2); uint64_t constLen, sizedLen; - bool isMemcmp = !callInst->getCalledFunction()->getName().compare(StringRef("memcmp")); - bool isSizedcmp = isMemcmp - || !callInst->getCalledFunction()->getName().compare(StringRef("strncmp")) - || !callInst->getCalledFunction()->getName().compare(StringRef("strncasecmp")); - bool isCaseInsensitive = !callInst->getCalledFunction()->getName().compare(StringRef("strcasecmp")) - || !callInst->getCalledFunction()->getName().compare(StringRef("strncasecmp")); + bool isMemcmp = + !callInst->getCalledFunction()->getName().compare(StringRef("memcmp")); + bool isSizedcmp = isMemcmp || + !callInst->getCalledFunction()->getName().compare( + StringRef("strncmp")) || + !callInst->getCalledFunction()->getName().compare( + StringRef("strncasecmp")); + bool isCaseInsensitive = !callInst->getCalledFunction()->getName().compare( + StringRef("strcasecmp")) || + !callInst->getCalledFunction()->getName().compare( + StringRef("strncasecmp")); if (isSizedcmp) { - Value *op2 = callInst->getArgOperand(2); - ConstantInt* ilen = dyn_cast(op2); + + Value * op2 = callInst->getArgOperand(2); + ConstantInt *ilen = dyn_cast(op2); sizedLen = ilen->getZExtValue(); + } if (HasStr1) { + TmpConstStr = Str1.str(); VarStr = Str2P; constLen = isMemcmp ? sizedLen : GetStringLength(Str1P); - } - else { + + } else { + TmpConstStr = Str2.str(); VarStr = Str1P; constLen = isMemcmp ? sizedLen : GetStringLength(Str2P); + } /* properly handle zero terminated C strings by adding the terminating 0 to * the StringRef (in comparison to std::string a StringRef has built-in * runtime bounds checking, which makes debugging easier) */ - TmpConstStr.append("\0", 1); ConstStr = StringRef(TmpConstStr); + TmpConstStr.append("\0", 1); + ConstStr = StringRef(TmpConstStr); - if (isSizedcmp && constLen > sizedLen) { - constLen = sizedLen; - } + if (isSizedcmp && constLen > sizedLen) { constLen = sizedLen; } - errs() << callInst->getCalledFunction()->getName() << ": len " << constLen << ": " << ConstStr << "\n"; + errs() << callInst->getCalledFunction()->getName() << ": len " << constLen + << ": " << ConstStr << "\n"; /* split before the call instruction */ BasicBlock *bb = callInst->getParent(); BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(callInst)); - BasicBlock *next_bb = BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb); + BasicBlock *next_bb = + BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb); BranchInst::Create(end_bb, next_bb); PHINode *PN = PHINode::Create(Int32Ty, constLen + 1, "cmp_phi"); @@ -249,71 +283,81 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const char c = isCaseInsensitive ? tolower(ConstStr[i]) : ConstStr[i]; - BasicBlock::iterator IP = next_bb->getFirstInsertionPt(); - IRBuilder<> IRB(&*IP); + IRBuilder<> IRB(&*IP); - Value* v = ConstantInt::get(Int64Ty, i); - Value *ele = IRB.CreateInBoundsGEP(VarStr, v, "empty"); + Value *v = ConstantInt::get(Int64Ty, i); + Value *ele = IRB.CreateInBoundsGEP(VarStr, v, "empty"); Value *load = IRB.CreateLoad(ele); if (isCaseInsensitive) { + // load >= 'A' && load <= 'Z' ? load | 0x020 : load std::vector args; args.push_back(load); load = IRB.CreateCall(tolowerFn, args, "tmp"); load = IRB.CreateTrunc(load, Int8Ty); + } + Value *isub; if (HasStr1) isub = IRB.CreateSub(ConstantInt::get(Int8Ty, c), load); else isub = IRB.CreateSub(load, ConstantInt::get(Int8Ty, c)); - Value *sext = IRB.CreateSExt(isub, Int32Ty); + Value *sext = IRB.CreateSExt(isub, Int32Ty); PN->addIncoming(sext, cur_bb); - if (i < constLen - 1) { - next_bb = BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb); + + next_bb = + BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb); BranchInst::Create(end_bb, next_bb); Value *icmp = IRB.CreateICmpEQ(isub, ConstantInt::get(Int8Ty, 0)); IRB.CreateCondBr(icmp, next_bb, end_bb); cur_bb->getTerminator()->eraseFromParent(); + } else { - //IRB.CreateBr(end_bb); + + // IRB.CreateBr(end_bb); + } - //add offset to varstr - //create load - //create signed isub - //create icmp - //create jcc - //create next_bb + // add offset to varstr + // create load + // create signed isub + // create icmp + // create jcc + // create next_bb + } /* since the call is the first instruction of the bb it is safe to * replace it with a phi instruction */ BasicBlock::iterator ii(callInst); ReplaceInstWithInst(callInst->getParent()->getInstList(), ii, PN); + } - return true; + } bool CompareTransform::runOnModule(Module &M) { if (getenv("AFL_QUIET") == NULL) - llvm::errs() << "Running compare-transform-pass by laf.intel@gmail.com, extended by heiko@hexco.de\n"; + llvm::errs() << "Running compare-transform-pass by laf.intel@gmail.com, " + "extended by heiko@hexco.de\n"; transformCmps(M, true, true, true, true, true); verifyModule(M); return true; + } static void registerCompTransPass(const PassManagerBuilder &, - legacy::PassManagerBase &PM) { + legacy::PassManagerBase &PM) { auto p = new CompareTransform(); PM.add(p); diff --git a/llvm_mode/split-compares-pass.so.cc b/llvm_mode/split-compares-pass.so.cc index a74b60fa..1e9d6542 100644 --- a/llvm_mode/split-compares-pass.so.cc +++ b/llvm_mode/split-compares-pass.so.cc @@ -27,117 +27,126 @@ using namespace llvm; namespace { - class SplitComparesTransform : public ModulePass { - public: - static char ID; - SplitComparesTransform() : ModulePass(ID) {} - bool runOnModule(Module &M) override; +class SplitComparesTransform : public ModulePass { + + public: + static char ID; + SplitComparesTransform() : ModulePass(ID) { + + } + + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR >= 4 - StringRef getPassName() const override { -#else - const char * getPassName() const override { -#endif - return "simplifies and splits ICMP instructions"; - } - private: - bool splitCompares(Module &M, unsigned bitw); - bool simplifyCompares(Module &M); - bool simplifySignedness(Module &M); + StringRef getPassName() const override { - }; -} +#else + const char *getPassName() const override { + +#endif + return "simplifies and splits ICMP instructions"; + + } + + private: + bool splitCompares(Module &M, unsigned bitw); + bool simplifyCompares(Module &M); + bool simplifySignedness(Module &M); + +}; + +} // namespace char SplitComparesTransform::ID = 0; -/* This function splits ICMP instructions with xGE or xLE predicates into two +/* This function splits ICMP instructions with xGE or xLE predicates into two * ICMP instructions with predicate xGT or xLT and EQ */ bool SplitComparesTransform::simplifyCompares(Module &M) { - LLVMContext &C = M.getContext(); - std::vector icomps; - IntegerType *Int1Ty = IntegerType::getInt1Ty(C); + + LLVMContext & C = M.getContext(); + std::vector icomps; + IntegerType * Int1Ty = IntegerType::getInt1Ty(C); /* iterate over all functions, bbs and instruction and add * all integer comparisons with >= and <= predicates to the icomps vector */ for (auto &F : M) { + for (auto &BB : F) { - for (auto &IN: BB) { - CmpInst* selectcmpInst = nullptr; + + for (auto &IN : BB) { + + CmpInst *selectcmpInst = nullptr; if ((selectcmpInst = dyn_cast(&IN))) { if (selectcmpInst->getPredicate() != CmpInst::ICMP_UGE && selectcmpInst->getPredicate() != CmpInst::ICMP_SGE && selectcmpInst->getPredicate() != CmpInst::ICMP_ULE && - selectcmpInst->getPredicate() != CmpInst::ICMP_SLE ) { + selectcmpInst->getPredicate() != CmpInst::ICMP_SLE) { + continue; + } auto op0 = selectcmpInst->getOperand(0); auto op1 = selectcmpInst->getOperand(1); - IntegerType* intTyOp0 = dyn_cast(op0->getType()); - IntegerType* intTyOp1 = dyn_cast(op1->getType()); + IntegerType *intTyOp0 = dyn_cast(op0->getType()); + IntegerType *intTyOp1 = dyn_cast(op1->getType()); /* this is probably not needed but we do it anyway */ - if (!intTyOp0 || !intTyOp1) { - continue; - } + if (!intTyOp0 || !intTyOp1) { continue; } icomps.push_back(selectcmpInst); + } + } + } + } - if (!icomps.size()) { - return false; - } + if (!icomps.size()) { return false; } + for (auto &IcmpInst : icomps) { - for (auto &IcmpInst: icomps) { - BasicBlock* bb = IcmpInst->getParent(); + BasicBlock *bb = IcmpInst->getParent(); auto op0 = IcmpInst->getOperand(0); auto op1 = IcmpInst->getOperand(1); /* find out what the new predicate is going to be */ - auto pred = dyn_cast(IcmpInst)->getPredicate(); + auto pred = dyn_cast(IcmpInst)->getPredicate(); CmpInst::Predicate new_pred; - switch(pred) { - case CmpInst::ICMP_UGE: - new_pred = CmpInst::ICMP_UGT; - break; - case CmpInst::ICMP_SGE: - new_pred = CmpInst::ICMP_SGT; - break; - case CmpInst::ICMP_ULE: - new_pred = CmpInst::ICMP_ULT; - break; - case CmpInst::ICMP_SLE: - new_pred = CmpInst::ICMP_SLT; - break; - default: // keep the compiler happy + switch (pred) { + + case CmpInst::ICMP_UGE: new_pred = CmpInst::ICMP_UGT; break; + case CmpInst::ICMP_SGE: new_pred = CmpInst::ICMP_SGT; break; + case CmpInst::ICMP_ULE: new_pred = CmpInst::ICMP_ULT; break; + case CmpInst::ICMP_SLE: new_pred = CmpInst::ICMP_SLT; break; + default: // keep the compiler happy continue; + } /* split before the icmp instruction */ - BasicBlock* end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); + BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); /* the old bb now contains a unconditional jump to the new one (end_bb) * we need to delete it later */ /* create the ICMP instruction with new_pred and add it to the old basic * block bb it is now at the position where the old IcmpInst was */ - Instruction* icmp_np; + Instruction *icmp_np; icmp_np = CmpInst::Create(Instruction::ICmp, new_pred, op0, op1); bb->getInstList().insert(bb->getTerminator()->getIterator(), icmp_np); /* create a new basic block which holds the new EQ icmp */ Instruction *icmp_eq; /* insert middle_bb before end_bb */ - BasicBlock* middle_bb = BasicBlock::Create(C, "injected", - end_bb->getParent(), end_bb); + BasicBlock *middle_bb = + BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); icmp_eq = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, op0, op1); middle_bb->getInstList().push_back(icmp_eq); /* add an unconditional branch to the end of middle_bb with destination @@ -150,7 +159,6 @@ bool SplitComparesTransform::simplifyCompares(Module &M) { BranchInst::Create(end_bb, middle_bb, icmp_np, bb); term->eraseFromParent(); - /* replace the old IcmpInst (which is the first inst in end_bb) with a PHI * inst to wire up the loose ends */ PHINode *PN = PHINode::Create(Int1Ty, 2, ""); @@ -162,118 +170,139 @@ bool SplitComparesTransform::simplifyCompares(Module &M) { /* replace the old IcmpInst with our new and shiny PHI inst */ BasicBlock::iterator ii(IcmpInst); ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN); + } return true; + } /* this function transforms signed compares to equivalent unsigned compares */ bool SplitComparesTransform::simplifySignedness(Module &M) { - LLVMContext &C = M.getContext(); - std::vector icomps; - IntegerType *Int1Ty = IntegerType::getInt1Ty(C); + + LLVMContext & C = M.getContext(); + std::vector icomps; + IntegerType * Int1Ty = IntegerType::getInt1Ty(C); /* iterate over all functions, bbs and instruction and add * all signed compares to icomps vector */ for (auto &F : M) { + for (auto &BB : F) { - for(auto &IN: BB) { - CmpInst* selectcmpInst = nullptr; + + for (auto &IN : BB) { + + CmpInst *selectcmpInst = nullptr; if ((selectcmpInst = dyn_cast(&IN))) { if (selectcmpInst->getPredicate() != CmpInst::ICMP_SGT && - selectcmpInst->getPredicate() != CmpInst::ICMP_SLT - ) { + selectcmpInst->getPredicate() != CmpInst::ICMP_SLT) { + continue; + } auto op0 = selectcmpInst->getOperand(0); auto op1 = selectcmpInst->getOperand(1); - IntegerType* intTyOp0 = dyn_cast(op0->getType()); - IntegerType* intTyOp1 = dyn_cast(op1->getType()); + IntegerType *intTyOp0 = dyn_cast(op0->getType()); + IntegerType *intTyOp1 = dyn_cast(op1->getType()); /* see above */ - if (!intTyOp0 || !intTyOp1) { - continue; - } + if (!intTyOp0 || !intTyOp1) { continue; } /* i think this is not possible but to lazy to look it up */ - if (intTyOp0->getBitWidth() != intTyOp1->getBitWidth()) { - continue; - } + if (intTyOp0->getBitWidth() != intTyOp1->getBitWidth()) { continue; } icomps.push_back(selectcmpInst); + } + } + } + } - if (!icomps.size()) { - return false; - } + if (!icomps.size()) { return false; } - for (auto &IcmpInst: icomps) { - BasicBlock* bb = IcmpInst->getParent(); + for (auto &IcmpInst : icomps) { + + BasicBlock *bb = IcmpInst->getParent(); auto op0 = IcmpInst->getOperand(0); auto op1 = IcmpInst->getOperand(1); - IntegerType* intTyOp0 = dyn_cast(op0->getType()); - unsigned bitw = intTyOp0->getBitWidth(); + IntegerType *intTyOp0 = dyn_cast(op0->getType()); + unsigned bitw = intTyOp0->getBitWidth(); IntegerType *IntType = IntegerType::get(C, bitw); - /* get the new predicate */ - auto pred = dyn_cast(IcmpInst)->getPredicate(); + auto pred = dyn_cast(IcmpInst)->getPredicate(); CmpInst::Predicate new_pred; if (pred == CmpInst::ICMP_SGT) { + new_pred = CmpInst::ICMP_UGT; + } else { + new_pred = CmpInst::ICMP_ULT; + } - BasicBlock* end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); + BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); /* create a 1 bit compare for the sign bit. to do this shift and trunc * the original operands so only the first bit remains.*/ Instruction *s_op0, *t_op0, *s_op1, *t_op1, *icmp_sign_bit; - s_op0 = BinaryOperator::Create(Instruction::LShr, op0, ConstantInt::get(IntType, bitw - 1)); + s_op0 = BinaryOperator::Create(Instruction::LShr, op0, + ConstantInt::get(IntType, bitw - 1)); bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op0); t_op0 = new TruncInst(s_op0, Int1Ty); bb->getInstList().insert(bb->getTerminator()->getIterator(), t_op0); - s_op1 = BinaryOperator::Create(Instruction::LShr, op1, ConstantInt::get(IntType, bitw - 1)); + s_op1 = BinaryOperator::Create(Instruction::LShr, op1, + ConstantInt::get(IntType, bitw - 1)); bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op1); t_op1 = new TruncInst(s_op1, Int1Ty); bb->getInstList().insert(bb->getTerminator()->getIterator(), t_op1); /* compare of the sign bits */ - icmp_sign_bit = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, t_op0, t_op1); + icmp_sign_bit = + CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, t_op0, t_op1); bb->getInstList().insert(bb->getTerminator()->getIterator(), icmp_sign_bit); /* create a new basic block which is executed if the signedness bit is - * different */ + * different */ Instruction *icmp_inv_sig_cmp; - BasicBlock* sign_bb = BasicBlock::Create(C, "sign", end_bb->getParent(), end_bb); + BasicBlock * sign_bb = + BasicBlock::Create(C, "sign", end_bb->getParent(), end_bb); if (pred == CmpInst::ICMP_SGT) { + /* if we check for > and the op0 positive and op1 negative then the final * result is true. if op0 negative and op1 pos, the cmp must result * in false */ - icmp_inv_sig_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, t_op0, t_op1); + icmp_inv_sig_cmp = + CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, t_op0, t_op1); + } else { + /* just the inverse of the above statement */ - icmp_inv_sig_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, t_op0, t_op1); + icmp_inv_sig_cmp = + CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, t_op0, t_op1); + } + sign_bb->getInstList().push_back(icmp_inv_sig_cmp); BranchInst::Create(end_bb, sign_bb); /* create a new bb which is executed if signedness is equal */ Instruction *icmp_usign_cmp; - BasicBlock* middle_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); + BasicBlock * middle_bb = + BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); /* we can do a normal unsigned compare now */ icmp_usign_cmp = CmpInst::Create(Instruction::ICmp, new_pred, op0, op1); middle_bb->getInstList().push_back(icmp_usign_cmp); @@ -285,7 +314,6 @@ bool SplitComparesTransform::simplifySignedness(Module &M) { BranchInst::Create(middle_bb, sign_bb, icmp_sign_bit, bb); term->eraseFromParent(); - PHINode *PN = PHINode::Create(Int1Ty, 2, ""); PN->addIncoming(icmp_usign_cmp, middle_bb); @@ -293,91 +321,100 @@ bool SplitComparesTransform::simplifySignedness(Module &M) { BasicBlock::iterator ii(IcmpInst); ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN); + } return true; + } /* splits icmps of size bitw into two nested icmps with bitw/2 size each */ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { + LLVMContext &C = M.getContext(); IntegerType *Int1Ty = IntegerType::getInt1Ty(C); IntegerType *OldIntType = IntegerType::get(C, bitw); IntegerType *NewIntType = IntegerType::get(C, bitw / 2); - std::vector icomps; + std::vector icomps; - if (bitw % 2) { - return false; - } + if (bitw % 2) { return false; } /* not supported yet */ - if (bitw > 64) { - return false; - } + if (bitw > 64) { return false; } - /* get all EQ, NE, UGT, and ULT icmps of width bitw. if the other two + /* get all EQ, NE, UGT, and ULT icmps of width bitw. if the other two * unctions were executed only these four predicates should exist */ for (auto &F : M) { + for (auto &BB : F) { - for(auto &IN: BB) { - CmpInst* selectcmpInst = nullptr; + + for (auto &IN : BB) { + + CmpInst *selectcmpInst = nullptr; if ((selectcmpInst = dyn_cast(&IN))) { - if(selectcmpInst->getPredicate() != CmpInst::ICMP_EQ && - selectcmpInst->getPredicate() != CmpInst::ICMP_NE && - selectcmpInst->getPredicate() != CmpInst::ICMP_UGT && - selectcmpInst->getPredicate() != CmpInst::ICMP_ULT - ) { + if (selectcmpInst->getPredicate() != CmpInst::ICMP_EQ && + selectcmpInst->getPredicate() != CmpInst::ICMP_NE && + selectcmpInst->getPredicate() != CmpInst::ICMP_UGT && + selectcmpInst->getPredicate() != CmpInst::ICMP_ULT) { + continue; + } auto op0 = selectcmpInst->getOperand(0); auto op1 = selectcmpInst->getOperand(1); - IntegerType* intTyOp0 = dyn_cast(op0->getType()); - IntegerType* intTyOp1 = dyn_cast(op1->getType()); + IntegerType *intTyOp0 = dyn_cast(op0->getType()); + IntegerType *intTyOp1 = dyn_cast(op1->getType()); - if (!intTyOp0 || !intTyOp1) { - continue; - } + if (!intTyOp0 || !intTyOp1) { continue; } /* check if the bitwidths are the one we are looking for */ - if (intTyOp0->getBitWidth() != bitw || intTyOp1->getBitWidth() != bitw) { + if (intTyOp0->getBitWidth() != bitw || + intTyOp1->getBitWidth() != bitw) { + continue; + } icomps.push_back(selectcmpInst); + } + } + } + } - if (!icomps.size()) { - return false; - } + if (!icomps.size()) { return false; } - for (auto &IcmpInst: icomps) { - BasicBlock* bb = IcmpInst->getParent(); + for (auto &IcmpInst : icomps) { + + BasicBlock *bb = IcmpInst->getParent(); auto op0 = IcmpInst->getOperand(0); auto op1 = IcmpInst->getOperand(1); auto pred = dyn_cast(IcmpInst)->getPredicate(); - BasicBlock* end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); + BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst)); /* create the comparison of the top halves of the original operands */ Instruction *s_op0, *op0_high, *s_op1, *op1_high, *icmp_high; - s_op0 = BinaryOperator::Create(Instruction::LShr, op0, ConstantInt::get(OldIntType, bitw / 2)); + s_op0 = BinaryOperator::Create(Instruction::LShr, op0, + ConstantInt::get(OldIntType, bitw / 2)); bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op0); op0_high = new TruncInst(s_op0, NewIntType); bb->getInstList().insert(bb->getTerminator()->getIterator(), op0_high); - s_op1 = BinaryOperator::Create(Instruction::LShr, op1, ConstantInt::get(OldIntType, bitw / 2)); + s_op1 = BinaryOperator::Create(Instruction::LShr, op1, + ConstantInt::get(OldIntType, bitw / 2)); bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op1); op1_high = new TruncInst(s_op1, NewIntType); bb->getInstList().insert(bb->getTerminator()->getIterator(), op1_high); @@ -387,11 +424,13 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { /* now we have to destinguish between == != and > < */ if (pred == CmpInst::ICMP_EQ || pred == CmpInst::ICMP_NE) { + /* transformation for == and != icmps */ /* create a compare for the lower half of the original operands */ Instruction *op0_low, *op1_low, *icmp_low; - BasicBlock* cmp_low_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); + BasicBlock * cmp_low_bb = + BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); op0_low = new TruncInst(op0, NewIntType); cmp_low_bb->getInstList().push_back(op0_low); @@ -407,21 +446,30 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { * the comparison */ auto term = bb->getTerminator(); if (pred == CmpInst::ICMP_EQ) { + BranchInst::Create(cmp_low_bb, end_bb, icmp_high, bb); + } else { + /* CmpInst::ICMP_NE */ BranchInst::Create(end_bb, cmp_low_bb, icmp_high, bb); + } + term->eraseFromParent(); /* create the PHI and connect the edges accordingly */ PHINode *PN = PHINode::Create(Int1Ty, 2, ""); PN->addIncoming(icmp_low, cmp_low_bb); if (pred == CmpInst::ICMP_EQ) { + PN->addIncoming(ConstantInt::get(Int1Ty, 0), bb); + } else { + /* CmpInst::ICMP_NE */ PN->addIncoming(ConstantInt::get(Int1Ty, 1), bb); + } /* replace the old icmp with the new PHI */ @@ -429,19 +477,28 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN); } else { + /* CmpInst::ICMP_UGT and CmpInst::ICMP_ULT */ /* transformations for < and > */ - /* create a basic block which checks for the inverse predicate. + /* create a basic block which checks for the inverse predicate. * if this is true we can go to the end if not we have to got to the * bb which checks the lower half of the operands */ Instruction *icmp_inv_cmp, *op0_low, *op1_low, *icmp_low; - BasicBlock* inv_cmp_bb = BasicBlock::Create(C, "inv_cmp", end_bb->getParent(), end_bb); + BasicBlock * inv_cmp_bb = + BasicBlock::Create(C, "inv_cmp", end_bb->getParent(), end_bb); if (pred == CmpInst::ICMP_UGT) { - icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, op0_high, op1_high); + + icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, + op0_high, op1_high); + } else { - icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, op0_high, op1_high); + + icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, + op0_high, op1_high); + } + inv_cmp_bb->getInstList().push_back(icmp_inv_cmp); auto term = bb->getTerminator(); @@ -449,7 +506,8 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { BranchInst::Create(end_bb, inv_cmp_bb, icmp_high, bb); /* create a bb which handles the cmp of the lower halves */ - BasicBlock* cmp_low_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); + BasicBlock *cmp_low_bb = + BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb); op0_low = new TruncInst(op0, NewIntType); cmp_low_bb->getInstList().push_back(op0_low); op1_low = new TruncInst(op1, NewIntType); @@ -468,57 +526,64 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) { BasicBlock::iterator ii(IcmpInst); ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN); + } + } - return true; + + return true; + } bool SplitComparesTransform::runOnModule(Module &M) { + int bitw = 64; - char* bitw_env = getenv("LAF_SPLIT_COMPARES_BITW"); - if (!bitw_env) - bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW"); - if (bitw_env) { - bitw = atoi(bitw_env); - } + char *bitw_env = getenv("LAF_SPLIT_COMPARES_BITW"); + if (!bitw_env) bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW"); + if (bitw_env) { bitw = atoi(bitw_env); } simplifyCompares(M); simplifySignedness(M); if (getenv("AFL_QUIET") == NULL) - errs() << "Split-compare-pass by laf.intel@gmail.com\n"; + errs() << "Split-compare-pass by laf.intel@gmail.com\n"; switch (bitw) { + case 64: - errs() << "Running split-compare-pass " << 64 << "\n"; + errs() << "Running split-compare-pass " << 64 << "\n"; splitCompares(M, 64); - [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ + [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ case 32: - errs() << "Running split-compare-pass " << 32 << "\n"; + errs() << "Running split-compare-pass " << 32 << "\n"; splitCompares(M, 32); - [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ + [[clang::fallthrough]]; /*FALLTHRU*/ /* FALLTHROUGH */ case 16: - errs() << "Running split-compare-pass " << 16 << "\n"; + errs() << "Running split-compare-pass " << 16 << "\n"; splitCompares(M, 16); break; default: - errs() << "NOT Running split-compare-pass \n"; + errs() << "NOT Running split-compare-pass \n"; return false; break; + } verifyModule(M); return true; + } static void registerSplitComparesPass(const PassManagerBuilder &, - legacy::PassManagerBase &PM) { + legacy::PassManagerBase &PM) { + PM.add(new SplitComparesTransform()); + } static RegisterStandardPasses RegisterSplitComparesPass( @@ -526,3 +591,4 @@ static RegisterStandardPasses RegisterSplitComparesPass( static RegisterStandardPasses RegisterSplitComparesTransPass0( PassManagerBuilder::EP_EnabledOnOptLevel0, registerSplitComparesPass); + diff --git a/llvm_mode/split-switches-pass.so.cc b/llvm_mode/split-switches-pass.so.cc index 1ace3185..2743a71a 100644 --- a/llvm_mode/split-switches-pass.so.cc +++ b/llvm_mode/split-switches-pass.so.cc @@ -36,54 +36,65 @@ using namespace llvm; namespace { - class SplitSwitchesTransform : public ModulePass { +class SplitSwitchesTransform : public ModulePass { - public: - static char ID; - SplitSwitchesTransform() : ModulePass(ID) { - } + public: + static char ID; + SplitSwitchesTransform() : ModulePass(ID) { - bool runOnModule(Module &M) override; + } + + bool runOnModule(Module &M) override; #if LLVM_VERSION_MAJOR >= 4 - StringRef getPassName() const override { + StringRef getPassName() const override { + #else - const char * getPassName() const override { + const char *getPassName() const override { + #endif - return "splits switch constructs"; - } - struct CaseExpr { - ConstantInt* Val; - BasicBlock* BB; + return "splits switch constructs"; - CaseExpr(ConstantInt *val = nullptr, BasicBlock *bb = nullptr) : - Val(val), BB(bb) { } - }; + } - typedef std::vector CaseVector; + struct CaseExpr { + + ConstantInt *Val; + BasicBlock * BB; + + CaseExpr(ConstantInt *val = nullptr, BasicBlock *bb = nullptr) + : Val(val), BB(bb) { + + } - private: - bool splitSwitches(Module &M); - bool transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp); - BasicBlock* switchConvert(CaseVector Cases, std::vector bytesChecked, - BasicBlock* OrigBlock, BasicBlock* NewDefault, - Value* Val, unsigned level); }; -} + typedef std::vector CaseVector; + + private: + bool splitSwitches(Module &M); + bool transformCmps(Module &M, const bool processStrcmp, + const bool processMemcmp); + BasicBlock *switchConvert(CaseVector Cases, std::vector bytesChecked, + BasicBlock *OrigBlock, BasicBlock *NewDefault, + Value *Val, unsigned level); + +}; + +} // namespace char SplitSwitchesTransform::ID = 0; - /* switchConvert - Transform simple list of Cases into list of CaseRange's */ -BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector bytesChecked, - BasicBlock* OrigBlock, BasicBlock* NewDefault, - Value* Val, unsigned level) { +BasicBlock *SplitSwitchesTransform::switchConvert( + CaseVector Cases, std::vector bytesChecked, BasicBlock *OrigBlock, + BasicBlock *NewDefault, Value *Val, unsigned level) { - unsigned ValTypeBitWidth = Cases[0].Val->getBitWidth(); - IntegerType *ValType = IntegerType::get(OrigBlock->getContext(), ValTypeBitWidth); - IntegerType *ByteType = IntegerType::get(OrigBlock->getContext(), 8); - unsigned BytesInValue = bytesChecked.size(); + unsigned ValTypeBitWidth = Cases[0].Val->getBitWidth(); + IntegerType *ValType = + IntegerType::get(OrigBlock->getContext(), ValTypeBitWidth); + IntegerType * ByteType = IntegerType::get(OrigBlock->getContext(), 8); + unsigned BytesInValue = bytesChecked.size(); std::vector setSizes; std::vector> byteSets(BytesInValue, std::set()); @@ -91,43 +102,54 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector< /* for each of the possible cases we iterate over all bytes of the values * build a set of possible values at each byte position in byteSets */ - for (CaseExpr& Case: Cases) { + for (CaseExpr &Case : Cases) { + for (unsigned i = 0; i < BytesInValue; i++) { - uint8_t byte = (Case.Val->getZExtValue() >> (i*8)) & 0xFF; + uint8_t byte = (Case.Val->getZExtValue() >> (i * 8)) & 0xFF; byteSets[i].insert(byte); + } + } /* find the index of the first byte position that was not yet checked. then * save the number of possible values at that byte position */ unsigned smallestIndex = 0; unsigned smallestSize = 257; - for(unsigned i = 0; i < byteSets.size(); i++) { - if (bytesChecked[i]) - continue; + for (unsigned i = 0; i < byteSets.size(); i++) { + + if (bytesChecked[i]) continue; if (byteSets[i].size() < smallestSize) { + smallestIndex = i; smallestSize = byteSets[i].size(); + } + } + assert(bytesChecked[smallestIndex] == false); /* there are only smallestSize different bytes at index smallestIndex */ - + Instruction *Shift, *Trunc; - Function* F = OrigBlock->getParent(); - BasicBlock* NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock", F); - Shift = BinaryOperator::Create(Instruction::LShr, Val, ConstantInt::get(ValType, smallestIndex * 8)); + Function * F = OrigBlock->getParent(); + BasicBlock * NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock", F); + Shift = BinaryOperator::Create(Instruction::LShr, Val, + ConstantInt::get(ValType, smallestIndex * 8)); NewNode->getInstList().push_back(Shift); if (ValTypeBitWidth > 8) { + Trunc = new TruncInst(Shift, ByteType); NewNode->getInstList().push_back(Trunc); - } - else { + + } else { + /* not necessary to trunc */ Trunc = Shift; + } /* this is a trivial case, we can directly check for the byte, @@ -135,118 +157,155 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector< * mark the byte as checked. if this was the last byte to check * we can finally execute the block belonging to this case */ - if (smallestSize == 1) { + uint8_t byte = *(byteSets[smallestIndex].begin()); - /* insert instructions to check whether the value we are switching on is equal to byte */ - ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_EQ, Trunc, ConstantInt::get(ByteType, byte), "byteMatch"); + /* insert instructions to check whether the value we are switching on is + * equal to byte */ + ICmpInst *Comp = + new ICmpInst(ICmpInst::ICMP_EQ, Trunc, ConstantInt::get(ByteType, byte), + "byteMatch"); NewNode->getInstList().push_back(Comp); bytesChecked[smallestIndex] = true; - if (std::all_of(bytesChecked.begin(), bytesChecked.end(), [](bool b){return b;} )) { + if (std::all_of(bytesChecked.begin(), bytesChecked.end(), + [](bool b) { return b; })) { + assert(Cases.size() == 1); BranchInst::Create(Cases[0].BB, NewDefault, Comp, NewNode); /* we have to update the phi nodes! */ - for (BasicBlock::iterator I = Cases[0].BB->begin(); I != Cases[0].BB->end(); ++I) { - if (!isa(&*I)) { - continue; - } + for (BasicBlock::iterator I = Cases[0].BB->begin(); + I != Cases[0].BB->end(); ++I) { + + if (!isa(&*I)) { continue; } PHINode *PN = cast(I); /* Only update the first occurrence. */ unsigned Idx = 0, E = PN->getNumIncomingValues(); for (; Idx != E; ++Idx) { + if (PN->getIncomingBlock(Idx) == OrigBlock) { + PN->setIncomingBlock(Idx, NewNode); break; + } + } + } - } - else { - BasicBlock* BB = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, level + 1); + + } else { + + BasicBlock *BB = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, + Val, level + 1); BranchInst::Create(BB, NewDefault, Comp, NewNode); + } + } + /* there is no byte which we can directly check on, split the tree */ else { std::vector byteVector; - std::copy(byteSets[smallestIndex].begin(), byteSets[smallestIndex].end(), std::back_inserter(byteVector)); + std::copy(byteSets[smallestIndex].begin(), byteSets[smallestIndex].end(), + std::back_inserter(byteVector)); std::sort(byteVector.begin(), byteVector.end()); uint8_t pivot = byteVector[byteVector.size() / 2]; - /* we already chose to divide the cases based on the value of byte at index smallestIndex - * the pivot value determines the threshold for the decicion; if a case value - * is smaller at this byte index move it to the LHS vector, otherwise to the RHS vector */ + /* we already chose to divide the cases based on the value of byte at index + * smallestIndex the pivot value determines the threshold for the decicion; + * if a case value + * is smaller at this byte index move it to the LHS vector, otherwise to the + * RHS vector */ CaseVector LHSCases, RHSCases; - for (CaseExpr& Case: Cases) { - uint8_t byte = (Case.Val->getZExtValue() >> (smallestIndex*8)) & 0xFF; + for (CaseExpr &Case : Cases) { + + uint8_t byte = (Case.Val->getZExtValue() >> (smallestIndex * 8)) & 0xFF; if (byte < pivot) { - LHSCases.push_back(Case); - } - else { - RHSCases.push_back(Case); - } - } - BasicBlock *LBB, *RBB; - LBB = switchConvert(LHSCases, bytesChecked, OrigBlock, NewDefault, Val, level + 1); - RBB = switchConvert(RHSCases, bytesChecked, OrigBlock, NewDefault, Val, level + 1); - /* insert instructions to check whether the value we are switching on is equal to byte */ - ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_ULT, Trunc, ConstantInt::get(ByteType, pivot), "byteMatch"); + LHSCases.push_back(Case); + + } else { + + RHSCases.push_back(Case); + + } + + } + + BasicBlock *LBB, *RBB; + LBB = switchConvert(LHSCases, bytesChecked, OrigBlock, NewDefault, Val, + level + 1); + RBB = switchConvert(RHSCases, bytesChecked, OrigBlock, NewDefault, Val, + level + 1); + + /* insert instructions to check whether the value we are switching on is + * equal to byte */ + ICmpInst *Comp = + new ICmpInst(ICmpInst::ICMP_ULT, Trunc, + ConstantInt::get(ByteType, pivot), "byteMatch"); NewNode->getInstList().push_back(Comp); BranchInst::Create(LBB, RBB, Comp, NewNode); } return NewNode; + } bool SplitSwitchesTransform::splitSwitches(Module &M) { - std::vector switches; + std::vector switches; /* iterate over all functions, bbs and instruction and add * all switches to switches vector for later processing */ for (auto &F : M) { + for (auto &BB : F) { - SwitchInst* switchInst = nullptr; + + SwitchInst *switchInst = nullptr; if ((switchInst = dyn_cast(BB.getTerminator()))) { - if (switchInst->getNumCases() < 1) - continue; - switches.push_back(switchInst); + + if (switchInst->getNumCases() < 1) continue; + switches.push_back(switchInst); + } + } + } - if (!switches.size()) - return false; - errs() << "Rewriting " << switches.size() << " switch statements " << "\n"; + if (!switches.size()) return false; + errs() << "Rewriting " << switches.size() << " switch statements " + << "\n"; - for (auto &SI: switches) { + for (auto &SI : switches) { BasicBlock *CurBlock = SI->getParent(); BasicBlock *OrigBlock = CurBlock; - Function *F = CurBlock->getParent(); + Function * F = CurBlock->getParent(); /* this is the value we are switching on */ - Value *Val = SI->getCondition(); - BasicBlock* Default = SI->getDefaultDest(); - unsigned bitw = Val->getType()->getIntegerBitWidth(); + Value * Val = SI->getCondition(); + BasicBlock *Default = SI->getDefaultDest(); + unsigned bitw = Val->getType()->getIntegerBitWidth(); errs() << "switch: " << SI->getNumCases() << " cases " << bitw << " bit\n"; - /* If there is only the default destination or the condition checks 8 bit or less, don't bother with the code below. */ + /* If there is only the default destination or the condition checks 8 bit or + * less, don't bother with the code below. */ if (!SI->getNumCases() || bitw <= 8) { - if (getenv("AFL_QUIET") == NULL) - errs() << "skip trivial switch..\n"; + + if (getenv("AFL_QUIET") == NULL) errs() << "skip trivial switch..\n"; continue; + } /* Create a new, empty default block so that the new hierarchy of @@ -258,10 +317,10 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) { NewDefault->insertInto(F, Default); BranchInst::Create(Default, NewDefault); - /* Prepare cases vector. */ CaseVector Cases; - for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) + for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; + ++i) #if LLVM_VERSION_MAJOR < 5 Cases.push_back(CaseExpr(i.getCaseValue(), i.getCaseSuccessor())); #else @@ -269,8 +328,10 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) { #endif /* bugfix thanks to pbst * round up bytesChecked (in case getBitWidth() % 8 != 0) */ - std::vector bytesChecked((7 + Cases[0].Val->getBitWidth()) / 8, false); - BasicBlock* SwitchBlock = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, 0); + std::vector bytesChecked((7 + Cases[0].Val->getBitWidth()) / 8, + false); + BasicBlock * SwitchBlock = + switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, 0); /* Branch to our shiny new if-then stuff... */ BranchInst::Create(SwitchBlock, OrigBlock); @@ -278,41 +339,47 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) { /* We are now done with the switch instruction, delete it. */ CurBlock->getInstList().erase(SI); + /* we have to update the phi nodes! */ + for (BasicBlock::iterator I = Default->begin(); I != Default->end(); ++I) { - /* we have to update the phi nodes! */ - for (BasicBlock::iterator I = Default->begin(); I != Default->end(); ++I) { - if (!isa(&*I)) { - continue; - } - PHINode *PN = cast(I); + if (!isa(&*I)) { continue; } + PHINode *PN = cast(I); - /* Only update the first occurrence. */ - unsigned Idx = 0, E = PN->getNumIncomingValues(); - for (; Idx != E; ++Idx) { - if (PN->getIncomingBlock(Idx) == OrigBlock) { - PN->setIncomingBlock(Idx, NewDefault); - break; - } - } - } - } + /* Only update the first occurrence. */ + unsigned Idx = 0, E = PN->getNumIncomingValues(); + for (; Idx != E; ++Idx) { + + if (PN->getIncomingBlock(Idx) == OrigBlock) { + + PN->setIncomingBlock(Idx, NewDefault); + break; + + } + + } + + } + + } + + verifyModule(M); + return true; - verifyModule(M); - return true; } bool SplitSwitchesTransform::runOnModule(Module &M) { if (getenv("AFL_QUIET") == NULL) - llvm::errs() << "Running split-switches-pass by laf.intel@gmail.com\n"; + llvm::errs() << "Running split-switches-pass by laf.intel@gmail.com\n"; splitSwitches(M); verifyModule(M); return true; + } static void registerSplitSwitchesTransPass(const PassManagerBuilder &, - legacy::PassManagerBase &PM) { + legacy::PassManagerBase &PM) { auto p = new SplitSwitchesTransform(); PM.add(p); @@ -324,3 +391,4 @@ static RegisterStandardPasses RegisterSplitSwitchesTransPass( static RegisterStandardPasses RegisterSplitSwitchesTransPass0( PassManagerBuilder::EP_EnabledOnOptLevel0, registerSplitSwitchesTransPass); + diff --git a/qemu_mode/libcompcov/compcovtest.cc b/qemu_mode/libcompcov/compcovtest.cc index fd1fda00..171e4526 100644 --- a/qemu_mode/libcompcov/compcovtest.cc +++ b/qemu_mode/libcompcov/compcovtest.cc @@ -3,13 +3,13 @@ // Author: Mateusz Jurczyk (mjurczyk@google.com) // // Copyright 2019 Google LLC -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,7 +17,8 @@ // limitations under the License. // -// solution: echo -ne 'The quick brown fox jumps over the lazy dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest +// solution: echo -ne 'The quick brown fox jumps over the lazy +// dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest #include #include @@ -25,39 +26,40 @@ #include int main() { - char buffer[44] = { /* zero padding */ }; + + char buffer[44] = {/* zero padding */}; fread(buffer, 1, sizeof(buffer) - 1, stdin); if (memcmp(&buffer[0], "The quick brown fox ", 20) != 0 || strncmp(&buffer[20], "jumps over ", 11) != 0 || strcmp(&buffer[31], "the lazy dog") != 0) { + return 1; + } uint64_t x = 0; fread(&x, sizeof(x), 1, stdin); - if (x != 0xCAFEBABECAFEBABE) { - return 2; - } + if (x != 0xCAFEBABECAFEBABE) { return 2; } uint32_t y = 0; fread(&y, sizeof(y), 1, stdin); - if (y != 0xDEADC0DE) { - return 3; - } + if (y != 0xDEADC0DE) { return 3; } uint16_t z = 0; fread(&z, sizeof(z), 1, stdin); switch (z) { - case 0xBEEF: - break; + + case 0xBEEF: break; - default: - return 4; + default: return 4; + } printf("Puzzle solved, congrats!\n"); abort(); return 0; + } + diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c index 9e44067e..e758c034 100644 --- a/qemu_mode/libcompcov/libcompcov.so.c +++ b/qemu_mode/libcompcov/libcompcov.so.c @@ -40,10 +40,9 @@ #define MAX_CMP_LENGTH 32 -static void *__compcov_code_start, - *__compcov_code_end; +static void *__compcov_code_start, *__compcov_code_end; -static u8 *__compcov_afl_map; +static u8* __compcov_afl_map; static u32 __compcov_level; @@ -55,15 +54,11 @@ static int (*__libc_memcmp)(const void*, const void*, size_t); static int debug_fd = -1; - #define MAX_MAPPINGS 1024 -static struct mapping { - void *st, *en; -} __compcov_ro[MAX_MAPPINGS]; - -static u32 __compcov_ro_cnt; +static struct mapping { void *st, *en; } __compcov_ro[MAX_MAPPINGS]; +static u32 __compcov_ro_cnt; /* Check an address against the list of read-only mappings. */ @@ -71,42 +66,42 @@ static u8 __compcov_is_ro(const void* ptr) { u32 i; - for (i = 0; i < __compcov_ro_cnt; i++) + for (i = 0; i < __compcov_ro_cnt; i++) if (ptr >= __compcov_ro[i].st && ptr <= __compcov_ro[i].en) return 1; return 0; + } +static size_t __strlen2(const char* s1, const char* s2, size_t max_length) { -static size_t __strlen2(const char *s1, const char *s2, size_t max_length) { // from https://github.com/googleprojectzero/CompareCoverage - + size_t len = 0; - for (; len < max_length && s1[len] != '\0' && s2[len] != '\0'; len++) { } + for (; len < max_length && s1[len] != '\0' && s2[len] != '\0'; len++) {} return len; + } /* Identify the binary boundaries in the memory mapping */ static void __compcov_load(void) { - + __libc_strcmp = dlsym(RTLD_NEXT, "strcmp"); __libc_strncmp = dlsym(RTLD_NEXT, "strncmp"); __libc_strcasecmp = dlsym(RTLD_NEXT, "strcasecmp"); __libc_strncasecmp = dlsym(RTLD_NEXT, "strncasecmp"); __libc_memcmp = dlsym(RTLD_NEXT, "memcmp"); - if (getenv("AFL_QEMU_COMPCOV")) { - - __compcov_level = 1; - } + if (getenv("AFL_QEMU_COMPCOV")) { __compcov_level = 1; } if (getenv("AFL_COMPCOV_LEVEL")) { __compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); + } - - char *id_str = getenv(SHM_ENV_VAR); - int shm_id; + + char* id_str = getenv(SHM_ENV_VAR); + int shm_id; if (id_str) { @@ -114,61 +109,72 @@ static void __compcov_load(void) { __compcov_afl_map = shmat(shm_id, NULL, 0); if (__compcov_afl_map == (void*)-1) exit(1); + } else { - + __compcov_afl_map = calloc(1, MAP_SIZE); + } if (getenv("AFL_INST_LIBS")) { - + __compcov_code_start = (void*)0; __compcov_code_end = (void*)-1; return; + } char* bin_name = getenv("AFL_COMPCOV_BINNAME"); procmaps_iterator* maps = pmparser_parse(-1); - procmaps_struct* maps_tmp = NULL; + procmaps_struct* maps_tmp = NULL; while ((maps_tmp = pmparser_next(maps)) != NULL) { - + /* If AFL_COMPCOV_BINNAME is not set pick the first executable segment */ if (!bin_name || strstr(maps_tmp->pathname, bin_name) != NULL) { - + if (maps_tmp->is_x) { - if (!__compcov_code_start) - __compcov_code_start = maps_tmp->addr_start; - if (!__compcov_code_end) - __compcov_code_end = maps_tmp->addr_end; + + if (!__compcov_code_start) __compcov_code_start = maps_tmp->addr_start; + if (!__compcov_code_end) __compcov_code_end = maps_tmp->addr_end; + } + } - + if ((maps_tmp->is_w && !maps_tmp->is_r) || __compcov_ro_cnt == MAX_MAPPINGS) continue; - + __compcov_ro[__compcov_ro_cnt].st = maps_tmp->addr_start; __compcov_ro[__compcov_ro_cnt].en = maps_tmp->addr_end; + } pmparser_free(maps); -} +} static void __compcov_trace(u64 cur_loc, const u8* v0, const u8* v1, size_t n) { size_t i; - + if (debug_fd != 1) { + char debugbuf[4096]; - snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %lu\n", cur_loc, v0 == NULL ? "(null)" : (char*)v0, v1 == NULL ? "(null)" : (char*)v1, n); + snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %lu\n", cur_loc, + v0 == NULL ? "(null)" : (char*)v0, + v1 == NULL ? "(null)" : (char*)v1, n); write(debug_fd, debugbuf, strlen(debugbuf)); + } - + for (i = 0; i < n && v0[i] == v1[i]; ++i) { - - __compcov_afl_map[cur_loc +i]++; + + __compcov_afl_map[cur_loc + i]++; + } + } /* Check an address against the list of read-only mappings. */ @@ -176,8 +182,8 @@ static void __compcov_trace(u64 cur_loc, const u8* v0, const u8* v1, size_t n) { static u8 __compcov_is_in_bound(const void* ptr) { return ptr >= __compcov_code_start && ptr < __compcov_code_end; -} +} /* Replacements for strcmp(), memcmp(), and so on. Note that these will be used only if the target is compiled with -fno-builtins and linked dynamically. */ @@ -187,127 +193,145 @@ static u8 __compcov_is_in_bound(const void* ptr) { int strcmp(const char* str1, const char* str2) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { - size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); - + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(str1) && + !__compcov_is_ro(str2))) { + + size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1); + if (n <= MAX_CMP_LENGTH) { - + u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; - + __compcov_trace(cur_loc, str1, str2, n); + } + } return __libc_strcmp(str1, str2); -} +} #undef strncmp int strncmp(const char* str1, const char* str2, size_t len) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { - size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(str1) && + !__compcov_is_ro(str2))) { + + size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1); n = MIN(n, len); - - if (n <= MAX_CMP_LENGTH) { - - u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); - cur_loc &= MAP_SIZE - 1; - - __compcov_trace(cur_loc, str1, str2, n); - } - } - - return __libc_strncmp(str1, str2, len); -} + if (n <= MAX_CMP_LENGTH) { + + u64 cur_loc = (u64)retaddr; + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc &= MAP_SIZE - 1; + + __compcov_trace(cur_loc, str1, str2, n); + + } + + } + + return __libc_strncmp(str1, str2, len); + +} #undef strcasecmp int strcasecmp(const char* str1, const char* str2) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { + + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(str1) && + !__compcov_is_ro(str2))) { + /* Fallback to strcmp, maybe improve in future */ - size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); - + size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1); + if (n <= MAX_CMP_LENGTH) { - + u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; - + __compcov_trace(cur_loc, str1, str2, n); + } + } return __libc_strcasecmp(str1, str2); -} +} #undef strncasecmp int strncasecmp(const char* str1, const char* str2, size_t len) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(str1) && !__compcov_is_ro(str2))) { + + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(str1) && + !__compcov_is_ro(str2))) { + /* Fallback to strncmp, maybe improve in future */ - size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH +1); + size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1); n = MIN(n, len); - + if (n <= MAX_CMP_LENGTH) { - + u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; - + __compcov_trace(cur_loc, str1, str2, n); + } + } return __libc_strncasecmp(str1, str2, len); -} +} #undef memcmp int memcmp(const void* mem1, const void* mem2, size_t len) { void* retaddr = __builtin_return_address(0); - - if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 && - !__compcov_is_ro(mem1) && !__compcov_is_ro(mem2))) { + + if (__compcov_is_in_bound(retaddr) && + !(__compcov_level < 2 && !__compcov_is_ro(mem1) && + !__compcov_is_ro(mem2))) { size_t n = len; - + if (n <= MAX_CMP_LENGTH) { - + u64 cur_loc = (u64)retaddr; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; - + __compcov_trace(cur_loc, mem1, mem2, n); + } + } return __libc_memcmp(mem1, mem2, len); + } /* Init code to open init the library. */ @@ -315,9 +339,10 @@ int memcmp(const void* mem1, const void* mem2, size_t len) { __attribute__((constructor)) void __compcov_init(void) { if (getenv("AFL_QEMU_COMPCOV_DEBUG") != NULL) - debug_fd = open("compcov.debug", O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, 0644); + debug_fd = + open("compcov.debug", O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, 0644); __compcov_load(); + } - diff --git a/qemu_mode/libcompcov/pmparser.h b/qemu_mode/libcompcov/pmparser.h index 34d0cd50..91dfd032 100644 --- a/qemu_mode/libcompcov/pmparser.h +++ b/qemu_mode/libcompcov/pmparser.h @@ -13,54 +13,60 @@ implied warranty. */ #ifndef H_PMPARSER -#define H_PMPARSER -#include -#include -#include -#include -#include -#include -#include -#include -#include +# define H_PMPARSER +# include +# include +# include +# include +# include +# include +# include +# include +# include -//maximum line length in a procmaps file -#define PROCMAPS_LINE_MAX_LENGTH (PATH_MAX + 100) +// maximum line length in a procmaps file +# define PROCMAPS_LINE_MAX_LENGTH (PATH_MAX + 100) /** * procmaps_struct * @desc hold all the information about an area in the process's VM */ -typedef struct procmaps_struct{ - void* addr_start; //< start address of the area - void* addr_end; //< end address - unsigned long length; //< size of the range +typedef struct procmaps_struct { - char perm[5]; //< permissions rwxp - short is_r; //< rewrote of perm with short flags - short is_w; - short is_x; - short is_p; + void* addr_start; //< start address of the area + void* addr_end; //< end address + unsigned long length; //< size of the range - long offset; //< offset - char dev[12]; //< dev major:minor - int inode; //< inode of the file that backs the area + char perm[5]; //< permissions rwxp + short is_r; //< rewrote of perm with short flags + short is_w; + short is_x; + short is_p; + + long offset; //< offset + char dev[12]; //< dev major:minor + int inode; //< inode of the file that backs the area + + char pathname[600]; //< the path of the file that backs the area + // chained list + struct procmaps_struct* next; //=0 ){ - sprintf(maps_path,"/proc/%d/maps",pid); - }else{ - sprintf(maps_path,"/proc/self/maps"); - } - FILE* file=fopen(maps_path,"r"); - if(!file){ - fprintf(stderr,"pmparser : cannot open the memory maps, %s\n",strerror(errno)); - return NULL; - } - int ind=0;char buf[PROCMAPS_LINE_MAX_LENGTH]; - //int c; - procmaps_struct* list_maps=NULL; - procmaps_struct* tmp; - procmaps_struct* current_node=list_maps; - char addr1[20],addr2[20], perm[8], offset[20], dev[10],inode[30],pathname[PATH_MAX]; - while( !feof(file) ){ - fgets(buf,PROCMAPS_LINE_MAX_LENGTH,file); - //allocate a node - tmp=(procmaps_struct*)malloc(sizeof(procmaps_struct)); - //fill the node - _pmparser_split_line(buf,addr1,addr2,perm,offset, dev,inode,pathname); - //printf("#%s",buf); - //printf("%s-%s %s %s %s %s\t%s\n",addr1,addr2,perm,offset,dev,inode,pathname); - //addr_start & addr_end - //unsigned long l_addr_start; - sscanf(addr1,"%lx",(long unsigned *)&tmp->addr_start ); - sscanf(addr2,"%lx",(long unsigned *)&tmp->addr_end ); - //size - tmp->length=(unsigned long)(tmp->addr_end-tmp->addr_start); - //perm - strcpy(tmp->perm,perm); - tmp->is_r=(perm[0]=='r'); - tmp->is_w=(perm[1]=='w'); - tmp->is_x=(perm[2]=='x'); - tmp->is_p=(perm[3]=='p'); + procmaps_iterator* maps_it = malloc(sizeof(procmaps_iterator)); + char maps_path[500]; + if (pid >= 0) { - //offset - sscanf(offset,"%lx",&tmp->offset ); - //device - strcpy(tmp->dev,dev); - //inode - tmp->inode=atoi(inode); - //pathname - strcpy(tmp->pathname,pathname); - tmp->next=NULL; - //attach the node - if(ind==0){ - list_maps=tmp; - list_maps->next=NULL; - current_node=list_maps; - } - current_node->next=tmp; - current_node=tmp; - ind++; - //printf("%s",buf); - } + sprintf(maps_path, "/proc/%d/maps", pid); - //close file - fclose(file); + } else { + sprintf(maps_path, "/proc/self/maps"); - //g_last_head=list_maps; - maps_it->head = list_maps; - maps_it->current = list_maps; - return maps_it; -} + } + FILE* file = fopen(maps_path, "r"); + if (!file) { -procmaps_struct* pmparser_next(procmaps_iterator* p_procmaps_it){ - if(p_procmaps_it->current == NULL) - return NULL; - procmaps_struct* p_current = p_procmaps_it->current; - p_procmaps_it->current = p_procmaps_it->current->next; - return p_current; - /* - if(g_current==NULL){ - g_current=g_last_head; - }else - g_current=g_current->next; + fprintf(stderr, "pmparser : cannot open the memory maps, %s\n", + strerror(errno)); + return NULL; - return g_current; - */ -} + } + int ind = 0; + char buf[PROCMAPS_LINE_MAX_LENGTH]; + // int c; + procmaps_struct* list_maps = NULL; + procmaps_struct* tmp; + procmaps_struct* current_node = list_maps; + char addr1[20], addr2[20], perm[8], offset[20], dev[10], inode[30], + pathname[PATH_MAX]; + while (!feof(file)) { + fgets(buf, PROCMAPS_LINE_MAX_LENGTH, file); + // allocate a node + tmp = (procmaps_struct*)malloc(sizeof(procmaps_struct)); + // fill the node + _pmparser_split_line(buf, addr1, addr2, perm, offset, dev, inode, pathname); + // printf("#%s",buf); + // printf("%s-%s %s %s %s + // %s\t%s\n",addr1,addr2,perm,offset,dev,inode,pathname); addr_start & + // addr_end unsigned long l_addr_start; + sscanf(addr1, "%lx", (long unsigned*)&tmp->addr_start); + sscanf(addr2, "%lx", (long unsigned*)&tmp->addr_end); + // size + tmp->length = (unsigned long)(tmp->addr_end - tmp->addr_start); + // perm + strcpy(tmp->perm, perm); + tmp->is_r = (perm[0] == 'r'); + tmp->is_w = (perm[1] == 'w'); + tmp->is_x = (perm[2] == 'x'); + tmp->is_p = (perm[3] == 'p'); -void pmparser_free(procmaps_iterator* p_procmaps_it){ - procmaps_struct* maps_list = p_procmaps_it->head; - if(maps_list==NULL) return ; - procmaps_struct* act=maps_list; - procmaps_struct* nxt=act->next; - while(act!=NULL){ - free(act); - act=nxt; - if(nxt!=NULL) - nxt=nxt->next; - } + // offset + sscanf(offset, "%lx", &tmp->offset); + // device + strcpy(tmp->dev, dev); + // inode + tmp->inode = atoi(inode); + // pathname + strcpy(tmp->pathname, pathname); + tmp->next = NULL; + // attach the node + if (ind == 0) { + + list_maps = tmp; + list_maps->next = NULL; + current_node = list_maps; + + } + + current_node->next = tmp; + current_node = tmp; + ind++; + // printf("%s",buf); + + } + + // close file + fclose(file); + + // g_last_head=list_maps; + maps_it->head = list_maps; + maps_it->current = list_maps; + return maps_it; } +procmaps_struct* pmparser_next(procmaps_iterator* p_procmaps_it) { -void _pmparser_split_line( - char*buf,char*addr1,char*addr2, - char*perm,char* offset,char* device,char*inode, - char* pathname){ - // - int orig=0; - int i=0; - //addr1 - while(buf[i]!='-'){ - addr1[i-orig]=buf[i]; - i++; - } - addr1[i]='\0'; - i++; - //addr2 - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - addr2[i-orig]=buf[i]; - i++; - } - addr2[i-orig]='\0'; + if (p_procmaps_it->current == NULL) return NULL; + procmaps_struct* p_current = p_procmaps_it->current; + p_procmaps_it->current = p_procmaps_it->current->next; + return p_current; + /* + if(g_current==NULL){ - //perm - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - perm[i-orig]=buf[i]; - i++; - } - perm[i-orig]='\0'; - //offset - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - offset[i-orig]=buf[i]; - i++; - } - offset[i-orig]='\0'; - //dev - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - device[i-orig]=buf[i]; - i++; - } - device[i-orig]='\0'; - //inode - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' '){ - inode[i-orig]=buf[i]; - i++; - } - inode[i-orig]='\0'; - //pathname - pathname[0]='\0'; - while(buf[i]=='\t' || buf[i]==' ') - i++; - orig=i; - while(buf[i]!='\t' && buf[i]!=' ' && buf[i]!='\n'){ - pathname[i-orig]=buf[i]; - i++; - } - pathname[i-orig]='\0'; + g_current=g_last_head; + + }else + + g_current=g_current->next; + + return g_current; + */ } +void pmparser_free(procmaps_iterator* p_procmaps_it) { + + procmaps_struct* maps_list = p_procmaps_it->head; + if (maps_list == NULL) return; + procmaps_struct* act = maps_list; + procmaps_struct* nxt = act->next; + while (act != NULL) { + + free(act); + act = nxt; + if (nxt != NULL) nxt = nxt->next; + + } + +} + +void _pmparser_split_line(char* buf, char* addr1, char* addr2, char* perm, + char* offset, char* device, char* inode, + char* pathname) { + + // + int orig = 0; + int i = 0; + // addr1 + while (buf[i] != '-') { + + addr1[i - orig] = buf[i]; + i++; + + } + + addr1[i] = '\0'; + i++; + // addr2 + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + addr2[i - orig] = buf[i]; + i++; + + } + + addr2[i - orig] = '\0'; + + // perm + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + perm[i - orig] = buf[i]; + i++; + + } + + perm[i - orig] = '\0'; + // offset + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + offset[i - orig] = buf[i]; + i++; + + } + + offset[i - orig] = '\0'; + // dev + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + device[i - orig] = buf[i]; + i++; + + } + + device[i - orig] = '\0'; + // inode + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ') { + + inode[i - orig] = buf[i]; + i++; + + } + + inode[i - orig] = '\0'; + // pathname + pathname[0] = '\0'; + while (buf[i] == '\t' || buf[i] == ' ') + i++; + orig = i; + while (buf[i] != '\t' && buf[i] != ' ' && buf[i] != '\n') { + + pathname[i - orig] = buf[i]; + i++; + + } + + pathname[i - orig] = '\0'; + +} #endif + diff --git a/qemu_mode/patches/afl-qemu-common.h b/qemu_mode/patches/afl-qemu-common.h index c475cb58..c87bacb6 100644 --- a/qemu_mode/patches/afl-qemu-common.h +++ b/qemu_mode/patches/afl-qemu-common.h @@ -33,19 +33,17 @@ #include "../../config.h" -/* NeverZero */ +/* NeverZero */ #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) -# define INC_AFL_AREA(loc) \ - asm volatile ( \ - "incb (%0, %1, 1)\n" \ - "adcb $0, (%0, %1, 1)\n" \ - : /* no out */ \ - : "r" (afl_area_ptr), "r" (loc) \ - : "memory", "eax" \ - ) +# define INC_AFL_AREA(loc) \ + asm volatile( \ + "incb (%0, %1, 1)\n" \ + "adcb $0, (%0, %1, 1)\n" \ + : /* no out */ \ + : "r"(afl_area_ptr), "r"(loc) \ + : "memory", "eax") #else -# define INC_AFL_AREA(loc) \ - afl_area_ptr[loc]++ +# define INC_AFL_AREA(loc) afl_area_ptr[loc]++ #endif diff --git a/qemu_mode/patches/afl-qemu-cpu-inl.h b/qemu_mode/patches/afl-qemu-cpu-inl.h index 4ad31b60..2a1331cb 100644 --- a/qemu_mode/patches/afl-qemu-cpu-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-inl.h @@ -42,11 +42,16 @@ _start and does the usual forkserver stuff, not very different from regular instrumentation injected via afl-as.h. */ -#define AFL_QEMU_CPU_SNIPPET2 do { \ - if(itb->pc == afl_entry_point) { \ - afl_setup(); \ - afl_forkserver(cpu); \ - } \ +#define AFL_QEMU_CPU_SNIPPET2 \ + do { \ + \ + if (itb->pc == afl_entry_point) { \ + \ + afl_setup(); \ + afl_forkserver(cpu); \ + \ + } \ + \ } while (0) /* We use one additional file descriptor to relay "needs translation" @@ -56,60 +61,71 @@ /* This is equivalent to afl-as.h: */ -static unsigned char dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */ -unsigned char *afl_area_ptr = dummy; /* Exported for afl_gen_trace */ +static unsigned char + dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */ +unsigned char *afl_area_ptr = dummy; /* Exported for afl_gen_trace */ /* Exported variables populated by the code patched into elfload.c: */ -abi_ulong afl_entry_point, /* ELF entry point (_start) */ - afl_start_code, /* .text start pointer */ - afl_end_code; /* .text end pointer */ +abi_ulong afl_entry_point, /* ELF entry point (_start) */ + afl_start_code, /* .text start pointer */ + afl_end_code; /* .text end pointer */ u8 afl_compcov_level; /* Set in the child process in forkserver mode: */ -static int forkserver_installed = 0; +static int forkserver_installed = 0; static unsigned char afl_fork_child; -unsigned int afl_forksrv_pid; +unsigned int afl_forksrv_pid; /* Instrumentation ratio: */ -unsigned int afl_inst_rms = MAP_SIZE; /* Exported for afl_gen_trace */ +unsigned int afl_inst_rms = MAP_SIZE; /* Exported for afl_gen_trace */ /* Function declarations. */ static void afl_setup(void); -static void afl_forkserver(CPUState*); +static void afl_forkserver(CPUState *); -static void afl_wait_tsl(CPUState*, int); -static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t, TranslationBlock*, int); +static void afl_wait_tsl(CPUState *, int); +static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t, + TranslationBlock *, int); /* Data structures passed around by the translate handlers: */ struct afl_tb { + target_ulong pc; target_ulong cs_base; - uint32_t flags; - uint32_t cf_mask; + uint32_t flags; + uint32_t cf_mask; + }; struct afl_tsl { + struct afl_tb tb; - char is_chain; + char is_chain; + }; struct afl_chain { + struct afl_tb last_tb; - uint32_t cf_mask; - int tb_exit; + uint32_t cf_mask; + int tb_exit; + }; /* Some forward decls: */ -TranslationBlock *tb_htable_lookup(CPUState*, target_ulong, target_ulong, uint32_t, uint32_t); -static inline TranslationBlock *tb_find(CPUState*, TranslationBlock*, int, uint32_t); -static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb_next); +TranslationBlock *tb_htable_lookup(CPUState *, target_ulong, target_ulong, + uint32_t, uint32_t); +static inline TranslationBlock *tb_find(CPUState *, TranslationBlock *, int, + uint32_t); +static inline void tb_add_jump(TranslationBlock *tb, int n, + TranslationBlock *tb_next); /************************* * ACTUAL IMPLEMENTATION * @@ -119,8 +135,7 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb static void afl_setup(void) { - char *id_str = getenv(SHM_ENV_VAR), - *inst_r = getenv("AFL_INST_RATIO"); + char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO"); int shm_id; @@ -142,7 +157,7 @@ static void afl_setup(void) { shm_id = atoi(id_str); afl_area_ptr = shmat(shm_id, NULL, 0); - if (afl_area_ptr == (void*)-1) exit(1); + if (afl_area_ptr == (void *)-1) exit(1); /* With AFL_INST_RATIO set to a low value, we want to touch the bitmap so that the parent doesn't give up on us. */ @@ -154,18 +169,16 @@ static void afl_setup(void) { if (getenv("AFL_INST_LIBS")) { afl_start_code = 0; - afl_end_code = (abi_ulong)-1; + afl_end_code = (abi_ulong)-1; } - + /* Maintain for compatibility */ - if (getenv("AFL_QEMU_COMPCOV")) { - - afl_compcov_level = 1; - } + if (getenv("AFL_QEMU_COMPCOV")) { afl_compcov_level = 1; } if (getenv("AFL_COMPCOV_LEVEL")) { afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); + } /* pthread_atfork() seems somewhat broken in util/rcu.c, and I'm @@ -176,17 +189,15 @@ static void afl_setup(void) { } - /* Fork server logic, invoked once we hit _start. */ static void afl_forkserver(CPUState *cpu) { static unsigned char tmp[4]; - if (forkserver_installed == 1) - return; + if (forkserver_installed == 1) return; forkserver_installed = 1; - //if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer + // if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer /* Tell the parent that we're alive. If the parent doesn't want to talk, assume that we're not running in forkserver mode. */ @@ -200,7 +211,7 @@ static void afl_forkserver(CPUState *cpu) { while (1) { pid_t child_pid; - int status, t_fd[2]; + int status, t_fd[2]; /* Whoops, parent dead? */ @@ -246,59 +257,60 @@ static void afl_forkserver(CPUState *cpu) { } - /* This code is invoked whenever QEMU decides that it doesn't have a translation of a particular block and needs to compute it, or when it decides to chain two TBs together. When this happens, we tell the parent to mirror the operation, so that the next fork() has a cached copy. */ -static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, uint32_t cf_mask, - TranslationBlock *last_tb, int tb_exit) { +static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, + uint32_t cf_mask, TranslationBlock *last_tb, + int tb_exit) { - struct afl_tsl t; + struct afl_tsl t; struct afl_chain c; if (!afl_fork_child) return; - t.tb.pc = pc; + t.tb.pc = pc; t.tb.cs_base = cb; - t.tb.flags = flags; + t.tb.flags = flags; t.tb.cf_mask = cf_mask; - t.is_chain = (last_tb != NULL); + t.is_chain = (last_tb != NULL); if (write(TSL_FD, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) return; if (t.is_chain) { - c.last_tb.pc = last_tb->pc; + + c.last_tb.pc = last_tb->pc; c.last_tb.cs_base = last_tb->cs_base; - c.last_tb.flags = last_tb->flags; - c.cf_mask = cf_mask; - c.tb_exit = tb_exit; + c.last_tb.flags = last_tb->flags; + c.cf_mask = cf_mask; + c.tb_exit = tb_exit; if (write(TSL_FD, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain)) return; + } } - /* Check if an address is valid in the current mapping */ static inline int is_valid_addr(target_ulong addr) { - int l, flags; - target_ulong page; - void * p; - - page = addr & TARGET_PAGE_MASK; - l = (page + TARGET_PAGE_SIZE) - addr; - - flags = page_get_flags(page); - if (!(flags & PAGE_VALID) || !(flags & PAGE_READ)) - return 0; - - return 1; + int l, flags; + target_ulong page; + void * p; + + page = addr & TARGET_PAGE_MASK; + l = (page + TARGET_PAGE_SIZE) - addr; + + flags = page_get_flags(page); + if (!(flags & PAGE_VALID) || !(flags & PAGE_READ)) return 0; + + return 1; + } /* This is the other side of the same channel. Since timeouts are handled by @@ -306,8 +318,8 @@ static inline int is_valid_addr(target_ulong addr) { static void afl_wait_tsl(CPUState *cpu, int fd) { - struct afl_tsl t; - struct afl_chain c; + struct afl_tsl t; + struct afl_chain c; TranslationBlock *tb, *last_tb; while (1) { @@ -316,30 +328,33 @@ static void afl_wait_tsl(CPUState *cpu, int fd) { /* Broken pipe means it's time to return to the fork server routine. */ - if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) - break; + if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break; tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask); - if(!tb) { - + if (!tb) { + /* The child may request to transate a block of memory that is not mapped in the parent (e.g. jitted code or dlopened code). This causes a SIGSEV in gen_intermediate_code() and associated subroutines. We simply avoid caching of such blocks. */ if (is_valid_addr(t.tb.pc)) { - + mmap_lock(); tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask); mmap_unlock(); + } else { - - invalid_pc = 1; + + invalid_pc = 1; + } + } if (t.is_chain) { + if (read(fd, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain)) break; @@ -347,10 +362,10 @@ static void afl_wait_tsl(CPUState *cpu, int fd) { last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base, c.last_tb.flags, c.cf_mask); - if (last_tb) { - tb_add_jump(last_tb, c.tb_exit, tb); - } + if (last_tb) { tb_add_jump(last_tb, c.tb_exit, tb); } + } + } } @@ -358,3 +373,4 @@ static void afl_wait_tsl(CPUState *cpu, int fd) { close(fd); } + diff --git a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h index 09ecb9d2..3d3c1b6b 100644 --- a/qemu_mode/patches/afl-qemu-cpu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-cpu-translate-inl.h @@ -37,9 +37,9 @@ /* Declared in afl-qemu-cpu-inl.h */ extern unsigned char *afl_area_ptr; -extern unsigned int afl_inst_rms; -extern abi_ulong afl_start_code, afl_end_code; -extern u8 afl_compcov_level; +extern unsigned int afl_inst_rms; +extern abi_ulong afl_start_code, afl_end_code; +extern u8 afl_compcov_level; void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2); @@ -47,81 +47,93 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { - if ((arg1 & 0xff) == (arg2 & 0xff)) { - INC_AFL_AREA(cur_loc); - } + if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); } + } static void afl_compcov_log_32(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { if ((arg1 & 0xff) == (arg2 & 0xff)) { + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - INC_AFL_AREA(cur_loc +1); - if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - INC_AFL_AREA(cur_loc +2); - } + + INC_AFL_AREA(cur_loc + 1); + if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { INC_AFL_AREA(cur_loc + 2); } + } + } + } static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1, target_ulong arg2) { if ((arg1 & 0xff) == (arg2 & 0xff)) { + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - INC_AFL_AREA(cur_loc +1); - if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - INC_AFL_AREA(cur_loc +2); - if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) { - INC_AFL_AREA(cur_loc +3); - if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) { - INC_AFL_AREA(cur_loc +4); - if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) { - INC_AFL_AREA(cur_loc +5); - if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) { - INC_AFL_AREA(cur_loc +6); - } - } - } - } - } - } - } -} + INC_AFL_AREA(cur_loc + 1); + if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { + + INC_AFL_AREA(cur_loc + 2); + if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) { + + INC_AFL_AREA(cur_loc + 3); + if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) { + + INC_AFL_AREA(cur_loc + 4); + if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) { + + INC_AFL_AREA(cur_loc + 5); + if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) { + + INC_AFL_AREA(cur_loc + 6); + + } + + } + + } + + } + + } + + } + + } + +} static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2, TCGMemOp ot, int is_imm) { void *func; - + if (!afl_compcov_level || cur_loc > afl_end_code || cur_loc < afl_start_code) return; - - if (!is_imm && afl_compcov_level < 2) - return; + + if (!is_imm && afl_compcov_level < 2) return; switch (ot) { - case MO_64: - func = &afl_compcov_log_64; - break; - case MO_32: - func = &afl_compcov_log_32; - break; - case MO_16: - func = &afl_compcov_log_16; - break; - default: - return; + + case MO_64: func = &afl_compcov_log_64; break; + case MO_32: func = &afl_compcov_log_32; break; + case MO_16: func = &afl_compcov_log_16; break; + default: return; + } - - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 7; - + if (cur_loc >= afl_inst_rms) return; - + tcg_gen_afl_compcov_log_call(func, cur_loc, arg1, arg2); + } + diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h index a9c53b8c..d53a1ccf 100644 --- a/qemu_mode/patches/afl-qemu-tcg-inl.h +++ b/qemu_mode/patches/afl-qemu-tcg-inl.h @@ -31,275 +31,343 @@ */ -void afl_maybe_log(void* cur_loc); +void afl_maybe_log(void *cur_loc); /* Note: we convert the 64 bit args to 32 bit and do some alignment and endian swap. Maybe it would be better to do the alignment and endian swap in tcg_reg_alloc_call(). */ -void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) -{ - int real_args, pi; - unsigned sizemask, flags; - TCGOp *op; +void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) { - TCGTemp *arg = tcgv_i64_temp( tcg_const_tl(cur_loc) ); + int real_args, pi; + unsigned sizemask, flags; + TCGOp * op; - flags = 0; - sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1); + TCGTemp *arg = tcgv_i64_temp(tcg_const_tl(cur_loc)); -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* We have 64-bit values in one register, but need to pass as two - separate parameters. Split them. */ - int orig_sizemask = sizemask; - TCGv_i64 retl, reth; - TCGTemp *split_args[MAX_OPC_PARAM]; + flags = 0; + sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1); - retl = NULL; - reth = NULL; - if (sizemask != 0) { - real_args = 0; - int is_64bit = sizemask & (1 << 2); - if (is_64bit) { - TCGv_i64 orig = temp_tcgv_i64(arg); - TCGv_i32 h = tcg_temp_new_i32(); - TCGv_i32 l = tcg_temp_new_i32(); - tcg_gen_extr_i64_i32(l, h, orig); - split_args[real_args++] = tcgv_i32_temp(h); - split_args[real_args++] = tcgv_i32_temp(l); - } else { - split_args[real_args++] = arg; - } - nargs = real_args; - args = split_args; - sizemask = 0; - } -#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 - int is_64bit = sizemask & (1 << 2); - int is_signed = sizemask & (2 << 2); - if (!is_64bit) { - TCGv_i64 temp = tcg_temp_new_i64(); - TCGv_i64 orig = temp_tcgv_i64(arg); - if (is_signed) { - tcg_gen_ext32s_i64(temp, orig); - } else { - tcg_gen_ext32u_i64(temp, orig); - } - arg = tcgv_i64_temp(temp); - } -#endif /* TCG_TARGET_EXTEND_ARGS */ +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* We have 64-bit values in one register, but need to pass as two + separate parameters. Split them. */ + int orig_sizemask = sizemask; + TCGv_i64 retl, reth; + TCGTemp *split_args[MAX_OPC_PARAM]; - op = tcg_emit_op(INDEX_op_call); - - pi = 0; - - TCGOP_CALLO(op) = 0; + retl = NULL; + reth = NULL; + if (sizemask != 0) { real_args = 0; int is_64bit = sizemask & (1 << 2); - if (TCG_TARGET_REG_BITS < 64 && is_64bit) { -#ifdef TCG_TARGET_CALL_ALIGN_ARGS - /* some targets want aligned 64 bit args */ - if (real_args & 1) { - op->args[pi++] = TCG_CALL_DUMMY_ARG; - real_args++; - } -#endif - /* If stack grows up, then we will be placing successive - arguments at lower addresses, which means we need to - reverse the order compared to how we would normally - treat either big or little-endian. For those arguments - that will wind up in registers, this still works for - HPPA (the only current STACK_GROWSUP target) since the - argument registers are *also* allocated in decreasing - order. If another such target is added, this logic may - have to get more complicated to differentiate between - stack arguments and register arguments. */ -#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) - op->args[pi++] = temp_arg(arg + 1); - op->args[pi++] = temp_arg(arg); -#else - op->args[pi++] = temp_arg(arg); - op->args[pi++] = temp_arg(arg + 1); -#endif - real_args += 2; + if (is_64bit) { + + TCGv_i64 orig = temp_tcgv_i64(arg); + TCGv_i32 h = tcg_temp_new_i32(); + TCGv_i32 l = tcg_temp_new_i32(); + tcg_gen_extr_i64_i32(l, h, orig); + split_args[real_args++] = tcgv_i32_temp(h); + split_args[real_args++] = tcgv_i32_temp(l); + + } else { + + split_args[real_args++] = arg; + } + nargs = real_args; + args = split_args; + sizemask = 0; + + } + +#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 + int is_64bit = sizemask & (1 << 2); + int is_signed = sizemask & (2 << 2); + if (!is_64bit) { + + TCGv_i64 temp = tcg_temp_new_i64(); + TCGv_i64 orig = temp_tcgv_i64(arg); + if (is_signed) { + + tcg_gen_ext32s_i64(temp, orig); + + } else { + + tcg_gen_ext32u_i64(temp, orig); + + } + + arg = tcgv_i64_temp(temp); + + } + +#endif /* TCG_TARGET_EXTEND_ARGS */ + + op = tcg_emit_op(INDEX_op_call); + + pi = 0; + + TCGOP_CALLO(op) = 0; + + real_args = 0; + int is_64bit = sizemask & (1 << 2); + if (TCG_TARGET_REG_BITS < 64 && is_64bit) { + +#ifdef TCG_TARGET_CALL_ALIGN_ARGS + /* some targets want aligned 64 bit args */ + if (real_args & 1) { + + op->args[pi++] = TCG_CALL_DUMMY_ARG; + real_args++; + + } + +#endif + /* If stack grows up, then we will be placing successive + arguments at lower addresses, which means we need to + reverse the order compared to how we would normally + treat either big or little-endian. For those arguments + that will wind up in registers, this still works for + HPPA (the only current STACK_GROWSUP target) since the + argument registers are *also* allocated in decreasing + order. If another such target is added, this logic may + have to get more complicated to differentiate between + stack arguments and register arguments. */ +#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) + op->args[pi++] = temp_arg(arg + 1); op->args[pi++] = temp_arg(arg); +#else + op->args[pi++] = temp_arg(arg); + op->args[pi++] = temp_arg(arg + 1); +#endif + real_args += 2; + + } + + op->args[pi++] = temp_arg(arg); + real_args++; + + op->args[pi++] = (uintptr_t)&afl_maybe_log; + op->args[pi++] = flags; + TCGOP_CALLI(op) = real_args; + + /* Make sure the fields didn't overflow. */ + tcg_debug_assert(TCGOP_CALLI(op) == real_args); + tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); + +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* Free all of the parts we allocated above. */ + real_args = 0; + int is_64bit = orig_sizemask & (1 << 2); + if (is_64bit) { + + tcg_temp_free_internal(args[real_args++]); + tcg_temp_free_internal(args[real_args++]); + + } else { + real_args++; - op->args[pi++] = (uintptr_t)&afl_maybe_log; - op->args[pi++] = flags; - TCGOP_CALLI(op) = real_args; + } - /* Make sure the fields didn't overflow. */ - tcg_debug_assert(TCGOP_CALLI(op) == real_args); - tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); + if (orig_sizemask & 1) { + + /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. + Note that describing these as TCGv_i64 eliminates an unnecessary + zero-extension that tcg_gen_concat_i32_i64 would create. */ + tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth); + tcg_temp_free_i64(retl); + tcg_temp_free_i64(reth); + + } -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* Free all of the parts we allocated above. */ - real_args = 0; - int is_64bit = orig_sizemask & (1 << 2); - if (is_64bit) { - tcg_temp_free_internal(args[real_args++]); - tcg_temp_free_internal(args[real_args++]); - } else { - real_args++; - } - if (orig_sizemask & 1) { - /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. - Note that describing these as TCGv_i64 eliminates an unnecessary - zero-extension that tcg_gen_concat_i32_i64 would create. */ - tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth); - tcg_temp_free_i64(retl); - tcg_temp_free_i64(reth); - } #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 - int is_64bit = sizemask & (1 << 2); + int is_64bit = sizemask & (1 << 2); + if (!is_64bit) { tcg_temp_free_internal(arg); } +#endif /* TCG_TARGET_EXTEND_ARGS */ + +} + +void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, + TCGv_i64 arg1, TCGv_i64 arg2) { + + int i, real_args, nb_rets, pi; + unsigned sizemask, flags; + TCGOp * op; + + const int nargs = 3; + TCGTemp *args[3] = {tcgv_i64_temp(tcg_const_tl(cur_loc)), tcgv_i64_temp(arg1), + tcgv_i64_temp(arg2)}; + + flags = 0; + sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | dh_sizemask(i64, 2) | + dh_sizemask(i64, 3); + +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* We have 64-bit values in one register, but need to pass as two + separate parameters. Split them. */ + int orig_sizemask = sizemask; + int orig_nargs = nargs; + TCGv_i64 retl, reth; + TCGTemp *split_args[MAX_OPC_PARAM]; + + retl = NULL; + reth = NULL; + if (sizemask != 0) { + + for (i = real_args = 0; i < nargs; ++i) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + if (is_64bit) { + + TCGv_i64 orig = temp_tcgv_i64(args[i]); + TCGv_i32 h = tcg_temp_new_i32(); + TCGv_i32 l = tcg_temp_new_i32(); + tcg_gen_extr_i64_i32(l, h, orig); + split_args[real_args++] = tcgv_i32_temp(h); + split_args[real_args++] = tcgv_i32_temp(l); + + } else { + + split_args[real_args++] = args[i]; + + } + + } + + nargs = real_args; + args = split_args; + sizemask = 0; + + } + +#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 + for (i = 0; i < nargs; ++i) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + int is_signed = sizemask & (2 << (i + 1) * 2); if (!is_64bit) { - tcg_temp_free_internal(arg); + + TCGv_i64 temp = tcg_temp_new_i64(); + TCGv_i64 orig = temp_tcgv_i64(args[i]); + if (is_signed) { + + tcg_gen_ext32s_i64(temp, orig); + + } else { + + tcg_gen_ext32u_i64(temp, orig); + + } + + args[i] = tcgv_i64_temp(temp); + } -#endif /* TCG_TARGET_EXTEND_ARGS */ -} -void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2) -{ - int i, real_args, nb_rets, pi; - unsigned sizemask, flags; - TCGOp *op; + } - const int nargs = 3; - TCGTemp *args[3] = { tcgv_i64_temp( tcg_const_tl(cur_loc) ), - tcgv_i64_temp(arg1), - tcgv_i64_temp(arg2) }; - - flags = 0; - sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | - dh_sizemask(i64, 2) | dh_sizemask(i64, 3); - -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* We have 64-bit values in one register, but need to pass as two - separate parameters. Split them. */ - int orig_sizemask = sizemask; - int orig_nargs = nargs; - TCGv_i64 retl, reth; - TCGTemp *split_args[MAX_OPC_PARAM]; - - retl = NULL; - reth = NULL; - if (sizemask != 0) { - for (i = real_args = 0; i < nargs; ++i) { - int is_64bit = sizemask & (1 << (i+1)*2); - if (is_64bit) { - TCGv_i64 orig = temp_tcgv_i64(args[i]); - TCGv_i32 h = tcg_temp_new_i32(); - TCGv_i32 l = tcg_temp_new_i32(); - tcg_gen_extr_i64_i32(l, h, orig); - split_args[real_args++] = tcgv_i32_temp(h); - split_args[real_args++] = tcgv_i32_temp(l); - } else { - split_args[real_args++] = args[i]; - } - } - nargs = real_args; - args = split_args; - sizemask = 0; - } -#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 - for (i = 0; i < nargs; ++i) { - int is_64bit = sizemask & (1 << (i+1)*2); - int is_signed = sizemask & (2 << (i+1)*2); - if (!is_64bit) { - TCGv_i64 temp = tcg_temp_new_i64(); - TCGv_i64 orig = temp_tcgv_i64(args[i]); - if (is_signed) { - tcg_gen_ext32s_i64(temp, orig); - } else { - tcg_gen_ext32u_i64(temp, orig); - } - args[i] = tcgv_i64_temp(temp); - } - } #endif /* TCG_TARGET_EXTEND_ARGS */ - op = tcg_emit_op(INDEX_op_call); + op = tcg_emit_op(INDEX_op_call); - pi = 0; - nb_rets = 0; - TCGOP_CALLO(op) = nb_rets; + pi = 0; + nb_rets = 0; + TCGOP_CALLO(op) = nb_rets; + + real_args = 0; + for (i = 0; i < nargs; i++) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + if (TCG_TARGET_REG_BITS < 64 && is_64bit) { - real_args = 0; - for (i = 0; i < nargs; i++) { - int is_64bit = sizemask & (1 << (i+1)*2); - if (TCG_TARGET_REG_BITS < 64 && is_64bit) { #ifdef TCG_TARGET_CALL_ALIGN_ARGS - /* some targets want aligned 64 bit args */ - if (real_args & 1) { - op->args[pi++] = TCG_CALL_DUMMY_ARG; - real_args++; - } -#endif - /* If stack grows up, then we will be placing successive - arguments at lower addresses, which means we need to - reverse the order compared to how we would normally - treat either big or little-endian. For those arguments - that will wind up in registers, this still works for - HPPA (the only current STACK_GROWSUP target) since the - argument registers are *also* allocated in decreasing - order. If another such target is added, this logic may - have to get more complicated to differentiate between - stack arguments and register arguments. */ -#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) - op->args[pi++] = temp_arg(args[i] + 1); - op->args[pi++] = temp_arg(args[i]); -#else - op->args[pi++] = temp_arg(args[i]); - op->args[pi++] = temp_arg(args[i] + 1); -#endif - real_args += 2; - continue; - } + /* some targets want aligned 64 bit args */ + if (real_args & 1) { - op->args[pi++] = temp_arg(args[i]); + op->args[pi++] = TCG_CALL_DUMMY_ARG; real_args++; - } - op->args[pi++] = (uintptr_t)func; - op->args[pi++] = flags; - TCGOP_CALLI(op) = real_args; - /* Make sure the fields didn't overflow. */ - tcg_debug_assert(TCGOP_CALLI(op) == real_args); - tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); + } + +#endif + /* If stack grows up, then we will be placing successive + arguments at lower addresses, which means we need to + reverse the order compared to how we would normally + treat either big or little-endian. For those arguments + that will wind up in registers, this still works for + HPPA (the only current STACK_GROWSUP target) since the + argument registers are *also* allocated in decreasing + order. If another such target is added, this logic may + have to get more complicated to differentiate between + stack arguments and register arguments. */ +#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) + op->args[pi++] = temp_arg(args[i] + 1); + op->args[pi++] = temp_arg(args[i]); +#else + op->args[pi++] = temp_arg(args[i]); + op->args[pi++] = temp_arg(args[i] + 1); +#endif + real_args += 2; + continue; -#if defined(__sparc__) && !defined(__arch64__) \ - && !defined(CONFIG_TCG_INTERPRETER) - /* Free all of the parts we allocated above. */ - for (i = real_args = 0; i < orig_nargs; ++i) { - int is_64bit = orig_sizemask & (1 << (i+1)*2); - if (is_64bit) { - tcg_temp_free_internal(args[real_args++]); - tcg_temp_free_internal(args[real_args++]); - } else { - real_args++; - } } - if (orig_sizemask & 1) { - /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. - Note that describing these as TCGv_i64 eliminates an unnecessary - zero-extension that tcg_gen_concat_i32_i64 would create. */ - tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth); - tcg_temp_free_i64(retl); - tcg_temp_free_i64(reth); + + op->args[pi++] = temp_arg(args[i]); + real_args++; + + } + + op->args[pi++] = (uintptr_t)func; + op->args[pi++] = flags; + TCGOP_CALLI(op) = real_args; + + /* Make sure the fields didn't overflow. */ + tcg_debug_assert(TCGOP_CALLI(op) == real_args); + tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); + +#if defined(__sparc__) && !defined(__arch64__) && \ + !defined(CONFIG_TCG_INTERPRETER) + /* Free all of the parts we allocated above. */ + for (i = real_args = 0; i < orig_nargs; ++i) { + + int is_64bit = orig_sizemask & (1 << (i + 1) * 2); + if (is_64bit) { + + tcg_temp_free_internal(args[real_args++]); + tcg_temp_free_internal(args[real_args++]); + + } else { + + real_args++; + } + + } + + if (orig_sizemask & 1) { + + /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. + Note that describing these as TCGv_i64 eliminates an unnecessary + zero-extension that tcg_gen_concat_i32_i64 would create. */ + tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth); + tcg_temp_free_i64(retl); + tcg_temp_free_i64(reth); + + } + #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 - for (i = 0; i < nargs; ++i) { - int is_64bit = sizemask & (1 << (i+1)*2); - if (!is_64bit) { - tcg_temp_free_internal(args[i]); - } - } + for (i = 0; i < nargs; ++i) { + + int is_64bit = sizemask & (1 << (i + 1) * 2); + if (!is_64bit) { tcg_temp_free_internal(args[i]); } + + } + #endif /* TCG_TARGET_EXTEND_ARGS */ + } diff --git a/qemu_mode/patches/afl-qemu-translate-inl.h b/qemu_mode/patches/afl-qemu-translate-inl.h index ffe43dba..9abaa961 100644 --- a/qemu_mode/patches/afl-qemu-translate-inl.h +++ b/qemu_mode/patches/afl-qemu-translate-inl.h @@ -36,8 +36,8 @@ /* Declared in afl-qemu-cpu-inl.h */ extern unsigned char *afl_area_ptr; -extern unsigned int afl_inst_rms; -extern abi_ulong afl_start_code, afl_end_code; +extern unsigned int afl_inst_rms; +extern abi_ulong afl_start_code, afl_end_code; void tcg_gen_afl_maybe_log_call(target_ulong cur_loc); @@ -59,14 +59,16 @@ static void afl_gen_trace(target_ulong cur_loc) { /* Optimize for cur_loc > afl_end_code, which is the most likely case on Linux systems. */ - if (cur_loc > afl_end_code || cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of static dummy buffer + if (cur_loc > afl_end_code || + cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of + // static dummy buffer return; /* Looks like QEMU always maps to fixed locations, so ASLR is not a concern. Phew. But instruction addresses may be aligned. Let's mangle the value to get something quasi-uniform. */ - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; /* Implement probabilistic instrumentation by looking at scrambled block @@ -75,5 +77,6 @@ static void afl_gen_trace(target_ulong cur_loc) { if (cur_loc >= afl_inst_rms) return; tcg_gen_afl_maybe_log_call(cur_loc); - + } + diff --git a/src/afl-analyze.c b/src/afl-analyze.c index 5bb96154..e3014256 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -22,7 +22,7 @@ #define AFL_MAIN #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -50,61 +50,59 @@ #include #include -static s32 child_pid; /* PID of the tested program */ +static s32 child_pid; /* PID of the tested program */ - u8* trace_bits; /* SHM with instrumentation bitmap */ +u8* trace_bits; /* SHM with instrumentation bitmap */ -static u8 *in_file, /* Analyzer input test case */ - *prog_in, /* Targeted program input file */ - *target_path, /* Path to target binary */ - *doc_path; /* Path to docs */ +static u8 *in_file, /* Analyzer input test case */ + *prog_in, /* Targeted program input file */ + *target_path, /* Path to target binary */ + *doc_path; /* Path to docs */ -static u8 *in_data; /* Input data for analysis */ +static u8* in_data; /* Input data for analysis */ -static u32 in_len, /* Input data length */ - orig_cksum, /* Original checksum */ - total_execs, /* Total number of execs */ - exec_hangs, /* Total number of hangs */ - exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ +static u32 in_len, /* Input data length */ + orig_cksum, /* Original checksum */ + total_execs, /* Total number of execs */ + exec_hangs, /* Total number of hangs */ + exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ -static u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ +static u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ -static s32 dev_null_fd = -1; /* FD to /dev/null */ +static s32 dev_null_fd = -1; /* FD to /dev/null */ -static u8 edges_only, /* Ignore hit counts? */ - use_hex_offsets, /* Show hex offsets? */ - use_stdin = 1; /* Use stdin for program input? */ - -static volatile u8 - stop_soon, /* Ctrl-C pressed? */ - child_timed_out; /* Child timed out? */ +static u8 edges_only, /* Ignore hit counts? */ + use_hex_offsets, /* Show hex offsets? */ + use_stdin = 1; /* Use stdin for program input? */ +static volatile u8 stop_soon, /* Ctrl-C pressed? */ + child_timed_out; /* Child timed out? */ /* Constants used for describing byte behavior. */ -#define RESP_NONE 0x00 /* Changing byte is a no-op. */ -#define RESP_MINOR 0x01 /* Some changes have no effect. */ -#define RESP_VARIABLE 0x02 /* Changes produce variable paths. */ -#define RESP_FIXED 0x03 /* Changes produce fixed patterns. */ +#define RESP_NONE 0x00 /* Changing byte is a no-op. */ +#define RESP_MINOR 0x01 /* Some changes have no effect. */ +#define RESP_VARIABLE 0x02 /* Changes produce variable paths. */ +#define RESP_FIXED 0x03 /* Changes produce fixed patterns. */ -#define RESP_LEN 0x04 /* Potential length field */ -#define RESP_CKSUM 0x05 /* Potential checksum */ -#define RESP_SUSPECT 0x06 /* Potential "suspect" blob */ +#define RESP_LEN 0x04 /* Potential length field */ +#define RESP_CKSUM 0x05 /* Potential checksum */ +#define RESP_SUSPECT 0x06 /* Potential "suspect" blob */ - -/* Classify tuple counts. This is a slow & naive version, but good enough here. */ +/* Classify tuple counts. This is a slow & naive version, but good enough here. + */ static u8 count_class_lookup[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 4, - [4 ... 7] = 8, - [8 ... 15] = 16, - [16 ... 31] = 32, - [32 ... 127] = 64, - [128 ... 255] = 128 + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 }; @@ -115,61 +113,62 @@ static void classify_counts(u8* mem) { if (edges_only) { while (i--) { + if (*mem) *mem = 1; mem++; + } } else { while (i--) { + *mem = count_class_lookup[*mem]; mem++; + } } } - /* See if any bytes are set in the bitmap. */ static inline u8 anything_set(void) { u32* ptr = (u32*)trace_bits; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); - while (i--) if (*(ptr++)) return 1; + while (i--) + if (*(ptr++)) return 1; return 0; } - /* Get rid of temp files (atexit handler). */ static void at_exit_handler(void) { - unlink(prog_in); /* Ignore errors */ + unlink(prog_in); /* Ignore errors */ } - /* Read initial file. */ static void read_initial_file(void) { struct stat st; - s32 fd = open(in_file, O_RDONLY); + s32 fd = open(in_file, O_RDONLY); if (fd < 0) PFATAL("Unable to open '%s'", in_file); - if (fstat(fd, &st) || !st.st_size) - FATAL("Zero-sized input file."); + if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file."); if (st.st_size >= TMIN_MAX_FILE) FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024); - in_len = st.st_size; + in_len = st.st_size; in_data = ck_alloc_nozero(in_len); ck_read(fd, in_data, in_len, in_file); @@ -180,14 +179,13 @@ static void read_initial_file(void) { } - /* Write output file. */ static s32 write_to_file(u8* path, u8* mem, u32 len) { s32 ret; - unlink(path); /* Ignore errors */ + unlink(path); /* Ignore errors */ ret = open(path, O_RDWR | O_CREAT | O_EXCL, 0600); @@ -201,7 +199,6 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) { } - /* Handle timeout signal. */ static void handle_timeout(int sig) { @@ -211,14 +208,13 @@ static void handle_timeout(int sig) { } - /* Execute target application. Returns exec checksum, or 0 if program times out. */ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { static struct itimerval it; - int status = 0; + int status = 0; s32 prog_in_fd; u32 cksum; @@ -237,8 +233,7 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { struct rlimit r; if (dup2(use_stdin ? prog_in_fd : dev_null_fd, 0) < 0 || - dup2(dev_null_fd, 1) < 0 || - dup2(dev_null_fd, 2) < 0) { + dup2(dev_null_fd, 1) < 0 || dup2(dev_null_fd, 2) < 0) { *(u32*)trace_bits = EXEC_FAIL_SIG; PFATAL("dup2() failed"); @@ -254,18 +249,18 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { #ifdef RLIMIT_AS - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ #else - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ #endif /* ^RLIMIT_AS */ } r.rlim_max = r.rlim_cur = 0; - setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ execv(target_path, argv); @@ -303,8 +298,10 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { total_execs++; if (stop_soon) { + SAYF(cRST cLRD "\n+++ Analysis aborted by user +++\n" cRST); exit(1); + } /* Always discard inputs that time out. */ @@ -335,7 +332,6 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { } - #ifdef USE_COLOR /* Helper function to display a human-readable character. */ @@ -353,24 +349,25 @@ static void show_char(u8 val) { } - /* Show the legend */ static void show_legend(void) { - SAYF(" " cLGR bgGRA " 01 " cRST " - no-op block " - cBLK bgLGN " 01 " cRST " - suspected length field\n" - " " cBRI bgGRA " 01 " cRST " - superficial content " - cBLK bgYEL " 01 " cRST " - suspected cksum or magic int\n" - " " cBLK bgCYA " 01 " cRST " - critical stream " - cBLK bgLRD " 01 " cRST " - suspected checksummed block\n" + SAYF(" " cLGR bgGRA " 01 " cRST " - no-op block " cBLK bgLGN + " 01 " cRST + " - suspected length field\n" + " " cBRI bgGRA " 01 " cRST " - superficial content " cBLK bgYEL + " 01 " cRST + " - suspected cksum or magic int\n" + " " cBLK bgCYA " 01 " cRST " - critical stream " cBLK bgLRD + " 01 " cRST + " - suspected checksummed block\n" " " cBLK bgMGN " 01 " cRST " - \"magic value\" section\n\n"); } #endif /* USE_COLOR */ - /* Interpret and report a pattern in the input file. */ static void dump_hex(u8* buf, u32 len, u8* b_data) { @@ -385,7 +382,7 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { u32 rlen = 1; #endif /* ^USE_COLOR */ - u8 rtype = b_data[i] & 0x0f; + u8 rtype = b_data[i] & 0x0f; /* Look ahead to determine the length of run. */ @@ -404,51 +401,61 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { case 2: { - u16 val = *(u16*)(in_data + i); + u16 val = *(u16*)(in_data + i); - /* Small integers may be length fields. */ + /* Small integers may be length fields. */ - if (val && (val <= in_len || SWAP16(val) <= in_len)) { - rtype = RESP_LEN; - break; - } - - /* Uniform integers may be checksums. */ - - if (val && abs(in_data[i] - in_data[i + 1]) > 32) { - rtype = RESP_CKSUM; - break; - } + if (val && (val <= in_len || SWAP16(val) <= in_len)) { + rtype = RESP_LEN; break; } + /* Uniform integers may be checksums. */ + + if (val && abs(in_data[i] - in_data[i + 1]) > 32) { + + rtype = RESP_CKSUM; + break; + + } + + break; + + } + case 4: { - u32 val = *(u32*)(in_data + i); + u32 val = *(u32*)(in_data + i); - /* Small integers may be length fields. */ + /* Small integers may be length fields. */ - if (val && (val <= in_len || SWAP32(val) <= in_len)) { - rtype = RESP_LEN; - break; - } - - /* Uniform integers may be checksums. */ - - if (val && (in_data[i] >> 7 != in_data[i + 1] >> 7 || - in_data[i] >> 7 != in_data[i + 2] >> 7 || - in_data[i] >> 7 != in_data[i + 3] >> 7)) { - rtype = RESP_CKSUM; - break; - } + if (val && (val <= in_len || SWAP32(val) <= in_len)) { + rtype = RESP_LEN; break; } - case 1: case 3: case 5 ... MAX_AUTO_EXTRA - 1: break; + /* Uniform integers may be checksums. */ + + if (val && (in_data[i] >> 7 != in_data[i + 1] >> 7 || + in_data[i] >> 7 != in_data[i + 2] >> 7 || + in_data[i] >> 7 != in_data[i + 3] >> 7)) { + + rtype = RESP_CKSUM; + break; + + } + + break; + + } + + case 1: + case 3: + case 5 ... MAX_AUTO_EXTRA - 1: break; default: rtype = RESP_SUSPECT; @@ -477,19 +484,22 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { switch (rtype) { - case RESP_NONE: SAYF(cLGR bgGRA); break; - case RESP_MINOR: SAYF(cBRI bgGRA); break; + case RESP_NONE: SAYF(cLGR bgGRA); break; + case RESP_MINOR: SAYF(cBRI bgGRA); break; case RESP_VARIABLE: SAYF(cBLK bgCYA); break; - case RESP_FIXED: SAYF(cBLK bgMGN); break; - case RESP_LEN: SAYF(cBLK bgLGN); break; - case RESP_CKSUM: SAYF(cBLK bgYEL); break; - case RESP_SUSPECT: SAYF(cBLK bgLRD); break; + case RESP_FIXED: SAYF(cBLK bgMGN); break; + case RESP_LEN: SAYF(cBLK bgLGN); break; + case RESP_CKSUM: SAYF(cBLK bgYEL); break; + case RESP_SUSPECT: SAYF(cBLK bgLRD); break; } show_char(in_data[i + off]); - if (off != rlen - 1 && (i + off + 1) % 16) SAYF(" "); else SAYF(cRST " "); + if (off != rlen - 1 && (i + off + 1) % 16) + SAYF(" "); + else + SAYF(cRST " "); } @@ -502,13 +512,13 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { switch (rtype) { - case RESP_NONE: SAYF("no-op block\n"); break; - case RESP_MINOR: SAYF("superficial content\n"); break; + case RESP_NONE: SAYF("no-op block\n"); break; + case RESP_MINOR: SAYF("superficial content\n"); break; case RESP_VARIABLE: SAYF("critical stream\n"); break; - case RESP_FIXED: SAYF("\"magic value\" section\n"); break; - case RESP_LEN: SAYF("suspected length field\n"); break; - case RESP_CKSUM: SAYF("suspected cksum or magic int\n"); break; - case RESP_SUSPECT: SAYF("suspected checksummed block\n"); break; + case RESP_FIXED: SAYF("\"magic value\" section\n"); break; + case RESP_LEN: SAYF("suspected length field\n"); break; + case RESP_CKSUM: SAYF("suspected cksum or magic int\n"); break; + case RESP_SUSPECT: SAYF("suspected checksummed block\n"); break; } @@ -524,8 +534,6 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { } - - /* Actually analyze! */ static void analyze(char** argv) { @@ -536,7 +544,7 @@ static void analyze(char** argv) { u8* b_data = ck_alloc(in_len + 1); u8 seq_byte = 0; - b_data[in_len] = 0xff; /* Intentional terminator. */ + b_data[in_len] = 0xff; /* Intentional terminator. */ ACTF("Analyzing input file (this may take a while)...\n"); @@ -587,12 +595,15 @@ static void analyze(char** argv) { b_data[i] = RESP_FIXED; - } else b_data[i] = RESP_VARIABLE; + } else + + b_data[i] = RESP_VARIABLE; /* When all checksums change, flip most significant bit of b_data. */ - if (prev_xff != xor_ff && prev_x01 != xor_01 && - prev_s10 != sub_10 && prev_a10 != add_10) seq_byte ^= 0x80; + if (prev_xff != xor_ff && prev_x01 != xor_01 && prev_s10 != sub_10 && + prev_a10 != add_10) + seq_byte ^= 0x80; b_data[i] |= seq_byte; @@ -601,7 +612,7 @@ static void analyze(char** argv) { prev_s10 = sub_10; prev_a10 = add_10; - } + } dump_hex(in_data, in_len, b_data); @@ -618,8 +629,6 @@ static void analyze(char** argv) { } - - /* Handle Ctrl-C and the like. */ static void handle_stop_sig(int sig) { @@ -630,7 +639,6 @@ static void handle_stop_sig(int sig) { } - /* Do basic preparations - persistent fds, filenames, etc. */ static void set_up_environment(void) { @@ -674,18 +682,20 @@ static void set_up_environment(void) { if (x) { if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR))) - FATAL("Custom MSAN_OPTIONS set without exit_code=" - STRINGIFY(MSAN_ERROR) " - please fix!"); + FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY( + MSAN_ERROR) " - please fix!"); if (!strstr(x, "symbolize=0")) FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!"); } - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" "symbolize=0:" @@ -694,21 +704,22 @@ static void set_up_environment(void) { "msan_track_origins=0", 0); if (getenv("AFL_PRELOAD")) { + setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1); setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1); + } } - /* Setup signal handlers, duh. */ static void setup_signal_handlers(void) { struct sigaction sa; - sa.sa_handler = NULL; - sa.sa_flags = SA_RESTART; + sa.sa_handler = NULL; + sa.sa_flags = SA_RESTART; sa.sa_sigaction = NULL; sigemptyset(&sa.sa_mask); @@ -727,43 +738,42 @@ static void setup_signal_handlers(void) { } - /* Display usage hints. */ static void usage(u8* argv0) { - SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" + SAYF( + "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" - "Required parameters:\n\n" + "Required parameters:\n\n" - " -i file - input test case to be analyzed by the tool\n" + " -i file - input test case to be analyzed by the tool\n" - "Execution control settings:\n\n" + "Execution control settings:\n\n" - " -f file - input file read by the tested program (stdin)\n" - " -t msec - timeout for each run (%d ms)\n" - " -m megs - memory limit for child process (%d MB)\n" - " -Q - use binary-only instrumentation (QEMU mode)\n" - " -U - use unicorn-based instrumentation (Unicorn mode)\n\n" + " -f file - input file read by the tested program (stdin)\n" + " -t msec - timeout for each run (%d ms)\n" + " -m megs - memory limit for child process (%d MB)\n" + " -Q - use binary-only instrumentation (QEMU mode)\n" + " -U - use unicorn-based instrumentation (Unicorn mode)\n\n" - "Analysis settings:\n\n" + "Analysis settings:\n\n" - " -e - look for edge coverage only, ignore hit counts\n\n" + " -e - look for edge coverage only, ignore hit counts\n\n" - "For additional tips, please consult %s/README.\n\n", + "For additional tips, please consult %s/README.\n\n", - argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); + argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); exit(1); } - /* Find binary. */ static void find_binary(u8* fname) { - u8* env_path = 0; + u8* env_path = 0; struct stat st; if (strchr(fname, '/') || !(env_path = getenv("PATH"))) { @@ -786,7 +796,9 @@ static void find_binary(u8* fname) { memcpy(cur_elem, env_path, delim - env_path); delim++; - } else cur_elem = ck_strdup(env_path); + } else + + cur_elem = ck_strdup(env_path); env_path = delim; @@ -798,7 +810,8 @@ static void find_binary(u8* fname) { ck_free(cur_elem); if (!stat(target_path, &st) && S_ISREG(st.st_mode) && - (st.st_mode & 0111) && st.st_size >= 4) break; + (st.st_mode & 0111) && st.st_size >= 4) + break; ck_free(target_path); target_path = 0; @@ -811,13 +824,12 @@ static void find_binary(u8* fname) { } - /* Fix up argv for QEMU. */ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); - u8 *tmp, *cp, *rsl, *own_copy; + u8 * tmp, *cp, *rsl, *own_copy; memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); @@ -832,8 +844,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { cp = alloc_printf("%s/afl-qemu-trace", tmp); - if (access(cp, X_OK)) - FATAL("Unable to find '%s'", tmp); + if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp); target_path = new_argv[0] = cp; return new_argv; @@ -857,7 +868,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } - } else ck_free(own_copy); + } else + + ck_free(own_copy); if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { @@ -882,7 +895,7 @@ int main(int argc, char** argv) { SAYF(cCYA "afl-analyze" VERSION cRST " by \n"); - while ((opt = getopt(argc,argv,"+i:f:m:t:eQU")) > 0) + while ((opt = getopt(argc, argv, "+i:f:m:t:eQU")) > 0) switch (opt) { @@ -896,7 +909,7 @@ int main(int argc, char** argv) { if (prog_in) FATAL("Multiple -f options not supported"); use_stdin = 0; - prog_in = optarg; + prog_in = optarg; break; case 'e': @@ -907,40 +920,41 @@ int main(int argc, char** argv) { case 'm': { - u8 suffix = 'M'; + u8 suffix = 'M'; - if (mem_limit_given) FATAL("Multiple -m options not supported"); - mem_limit_given = 1; + if (mem_limit_given) FATAL("Multiple -m options not supported"); + mem_limit_given = 1; - if (!strcmp(optarg, "none")) { + if (!strcmp(optarg, "none")) { - mem_limit = 0; - break; - - } - - if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -m"); - - switch (suffix) { - - case 'T': mem_limit *= 1024 * 1024; break; - case 'G': mem_limit *= 1024; break; - case 'k': mem_limit /= 1024; break; - case 'M': break; - - default: FATAL("Unsupported suffix or bad syntax for -m"); - - } - - if (mem_limit < 5) FATAL("Dangerously low value of -m"); - - if (sizeof(rlim_t) == 4 && mem_limit > 2000) - FATAL("Value of -m out of range on 32-bit systems"); + mem_limit = 0; + break; } - break; + if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -m"); + + switch (suffix) { + + case 'T': mem_limit *= 1024 * 1024; break; + case 'G': mem_limit *= 1024; break; + case 'k': mem_limit /= 1024; break; + case 'M': break; + + default: FATAL("Unsupported suffix or bad syntax for -m"); + + } + + if (mem_limit < 5) FATAL("Dangerously low value of -m"); + + if (sizeof(rlim_t) == 4 && mem_limit > 2000) + FATAL("Value of -m out of range on 32-bit systems"); + + } + + break; case 't': @@ -970,9 +984,7 @@ int main(int argc, char** argv) { unicorn_mode = 1; break; - default: - - usage(argv[0]); + default: usage(argv[0]); } diff --git a/src/afl-as.c b/src/afl-as.c index 94595f24..57f4c4a3 100644 --- a/src/afl-as.c +++ b/src/afl-as.c @@ -48,39 +48,38 @@ #include #include -static u8** as_params; /* Parameters passed to the real 'as' */ +static u8** as_params; /* Parameters passed to the real 'as' */ -static u8* input_file; /* Originally specified input file */ -static u8* modified_file; /* Instrumented file for the real 'as' */ +static u8* input_file; /* Originally specified input file */ +static u8* modified_file; /* Instrumented file for the real 'as' */ -static u8 be_quiet, /* Quiet mode (no stderr output) */ - clang_mode, /* Running in clang mode? */ - pass_thru, /* Just pass data through? */ - just_version, /* Just show version? */ - sanitizer; /* Using ASAN / MSAN */ +static u8 be_quiet, /* Quiet mode (no stderr output) */ + clang_mode, /* Running in clang mode? */ + pass_thru, /* Just pass data through? */ + just_version, /* Just show version? */ + sanitizer; /* Using ASAN / MSAN */ -static u32 inst_ratio = 100, /* Instrumentation probability (%) */ - as_par_cnt = 1; /* Number of params to 'as' */ +static u32 inst_ratio = 100, /* Instrumentation probability (%) */ + as_par_cnt = 1; /* Number of params to 'as' */ -/* If we don't find --32 or --64 in the command line, default to +/* If we don't find --32 or --64 in the command line, default to instrumentation for whichever mode we were compiled with. This is not perfect, but should do the trick for almost all use cases. */ #ifdef __x86_64__ -static u8 use_64bit = 1; +static u8 use_64bit = 1; #else -static u8 use_64bit = 0; +static u8 use_64bit = 0; -#ifdef __APPLE__ -# error "Sorry, 32-bit Apple platforms are not supported." -#endif /* __APPLE__ */ +# ifdef __APPLE__ +# error "Sorry, 32-bit Apple platforms are not supported." +# endif /* __APPLE__ */ #endif /* ^__x86_64__ */ - /* Examine and modify parameters to pass to 'as'. Note that the file name is always the last parameter passed by GCC, so we exploit this property to keep the code simple. */ @@ -134,8 +133,10 @@ static void edit_params(int argc, char** argv) { for (i = 1; i < argc - 1; i++) { - if (!strcmp(argv[i], "--64")) use_64bit = 1; - else if (!strcmp(argv[i], "--32")) use_64bit = 0; + if (!strcmp(argv[i], "--64")) + use_64bit = 1; + else if (!strcmp(argv[i], "--32")) + use_64bit = 0; #ifdef __APPLE__ @@ -143,7 +144,8 @@ static void edit_params(int argc, char** argv) { if (!strcmp(argv[i], "-arch") && i + 1 < argc) { - if (!strcmp(argv[i + 1], "x86_64")) use_64bit = 1; + if (!strcmp(argv[i + 1], "x86_64")) + use_64bit = 1; else if (!strcmp(argv[i + 1], "i386")) FATAL("Sorry, 32-bit Apple platforms are not supported."); @@ -181,13 +183,17 @@ static void edit_params(int argc, char** argv) { if (input_file[0] == '-') { if (!strcmp(input_file + 1, "-version")) { + just_version = 1; modified_file = input_file; goto wrap_things_up; + } - if (input_file[1]) FATAL("Incorrect use (not called through afl-gcc?)"); - else input_file = NULL; + if (input_file[1]) + FATAL("Incorrect use (not called through afl-gcc?)"); + else + input_file = NULL; } else { @@ -197,22 +203,21 @@ static void edit_params(int argc, char** argv) { NSS. */ if (strncmp(input_file, tmp_dir, strlen(tmp_dir)) && - strncmp(input_file, "/var/tmp/", 9) && - strncmp(input_file, "/tmp/", 5)) pass_thru = 1; + strncmp(input_file, "/var/tmp/", 9) && strncmp(input_file, "/tmp/", 5)) + pass_thru = 1; } - modified_file = alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(), - (u32)time(NULL)); + modified_file = + alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(), (u32)time(NULL)); wrap_things_up: as_params[as_par_cnt++] = modified_file; - as_params[as_par_cnt] = NULL; + as_params[as_par_cnt] = NULL; } - /* Process input file, generate modified_file. Insert instrumentation in all the appropriate places. */ @@ -222,11 +227,11 @@ static void add_instrumentation(void) { FILE* inf; FILE* outf; - s32 outfd; - u32 ins_lines = 0; + s32 outfd; + u32 ins_lines = 0; - u8 instr_ok = 0, skip_csect = 0, skip_next_label = 0, - skip_intel = 0, skip_app = 0, instrument_next = 0; + u8 instr_ok = 0, skip_csect = 0, skip_next_label = 0, skip_intel = 0, + skip_app = 0, instrument_next = 0; #ifdef __APPLE__ @@ -239,7 +244,9 @@ static void add_instrumentation(void) { inf = fopen(input_file, "r"); if (!inf) PFATAL("Unable to read '%s'", input_file); - } else inf = stdin; + } else + + inf = stdin; outfd = open(modified_file, O_WRONLY | O_EXCL | O_CREAT, 0600); @@ -247,7 +254,7 @@ static void add_instrumentation(void) { outf = fdopen(outfd, "w"); - if (!outf) PFATAL("fdopen() failed"); + if (!outf) PFATAL("fdopen() failed"); while (fgets(line, MAX_LINE, inf)) { @@ -284,22 +291,26 @@ static void add_instrumentation(void) { around them, so we use that as a signal. */ if (!clang_mode && instr_ok && !strncmp(line + 2, "p2align ", 8) && - isdigit(line[10]) && line[11] == '\n') skip_next_label = 1; + isdigit(line[10]) && line[11] == '\n') + skip_next_label = 1; if (!strncmp(line + 2, "text\n", 5) || !strncmp(line + 2, "section\t.text", 13) || !strncmp(line + 2, "section\t__TEXT,__text", 21) || !strncmp(line + 2, "section __TEXT,__text", 21)) { + instr_ok = 1; - continue; + continue; + } if (!strncmp(line + 2, "section\t", 8) || - !strncmp(line + 2, "section ", 8) || - !strncmp(line + 2, "bss\n", 4) || + !strncmp(line + 2, "section ", 8) || !strncmp(line + 2, "bss\n", 4) || !strncmp(line + 2, "data\n", 5)) { + instr_ok = 0; continue; + } } @@ -354,8 +365,9 @@ static void add_instrumentation(void) { */ - if (skip_intel || skip_app || skip_csect || !instr_ok || - line[0] == '#' || line[0] == ' ') continue; + if (skip_intel || skip_app || skip_csect || !instr_ok || line[0] == '#' || + line[0] == ' ') + continue; /* Conditional branch instruction (jnz, etc). We append the instrumentation right after the branch (to instrument the not-taken path) and at the @@ -404,15 +416,16 @@ static void add_instrumentation(void) { /* Apple: L / LBB */ - if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3))) - && R(100) < inst_ratio) { + if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3))) && + R(100) < inst_ratio) { #else /* Apple: .L / .LBB */ - if ((isdigit(line[2]) || (clang_mode && !strncmp(line + 1, "LBB", 3))) - && R(100) < inst_ratio) { + if ((isdigit(line[2]) || + (clang_mode && !strncmp(line + 1, "LBB", 3))) && + R(100) < inst_ratio) { #endif /* __APPLE__ */ @@ -427,7 +440,10 @@ static void add_instrumentation(void) { .Lfunc_begin0-style exception handling calculations (a problem on MacOS X). */ - if (!skip_next_label) instrument_next = 1; else skip_next_label = 0; + if (!skip_next_label) + instrument_next = 1; + else + skip_next_label = 0; } @@ -436,34 +452,34 @@ static void add_instrumentation(void) { /* Function label (always instrumented, deferred mode). */ instrument_next = 1; - + } } } - if (ins_lines) - fputs(use_64bit ? main_payload_64 : main_payload_32, outf); + if (ins_lines) fputs(use_64bit ? main_payload_64 : main_payload_32, outf); if (input_file) fclose(inf); fclose(outf); if (!be_quiet) { - if (!ins_lines) WARNF("No instrumentation targets found%s.", - pass_thru ? " (pass-thru mode)" : ""); - else OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).", - ins_lines, use_64bit ? "64" : "32", - getenv("AFL_HARDEN") ? "hardened" : - (sanitizer ? "ASAN/MSAN" : "non-hardened"), - inst_ratio); - + if (!ins_lines) + WARNF("No instrumentation targets found%s.", + pass_thru ? " (pass-thru mode)" : ""); + else + OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).", ins_lines, + use_64bit ? "64" : "32", + getenv("AFL_HARDEN") ? "hardened" + : (sanitizer ? "ASAN/MSAN" : "non-hardened"), + inst_ratio); + } } - /* Main entry point */ int main(int argc, char** argv) { @@ -473,7 +489,7 @@ int main(int argc, char** argv) { int status; u8* inst_ratio_str = getenv("AFL_INST_RATIO"); - struct timeval tv; + struct timeval tv; struct timezone tz; clang_mode = !!getenv(CLANG_ENV_VAR); @@ -481,19 +497,26 @@ int main(int argc, char** argv) { if (isatty(2) && !getenv("AFL_QUIET")) { SAYF(cCYA "afl-as" VERSION cRST " by \n"); - - } else be_quiet = 1; + + } else + + be_quiet = 1; if (argc < 2) { - SAYF("\n" - "This is a helper application for afl-fuzz. It is a wrapper around GNU 'as',\n" - "executed by the toolchain whenever using afl-gcc or afl-clang. You probably\n" - "don't want to run this program directly.\n\n" + SAYF( + "\n" + "This is a helper application for afl-fuzz. It is a wrapper around GNU " + "'as',\n" + "executed by the toolchain whenever using afl-gcc or afl-clang. You " + "probably\n" + "don't want to run this program directly.\n\n" - "Rarely, when dealing with extremely complex projects, it may be advisable to\n" - "set AFL_INST_RATIO to a value less than 100 in order to reduce the odds of\n" - "instrumenting every discovered branch.\n\n"); + "Rarely, when dealing with extremely complex projects, it may be " + "advisable to\n" + "set AFL_INST_RATIO to a value less than 100 in order to reduce the " + "odds of\n" + "instrumenting every discovered branch.\n\n"); exit(1); @@ -509,7 +532,7 @@ int main(int argc, char** argv) { if (inst_ratio_str) { - if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || inst_ratio > 100) + if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || inst_ratio > 100) FATAL("Bad value of AFL_INST_RATIO (must be between 0 and 100)"); } @@ -524,9 +547,10 @@ int main(int argc, char** argv) { that... */ if (getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) { + sanitizer = 1; - if (!getenv("AFL_INST_RATIO")) - inst_ratio /= 3; + if (!getenv("AFL_INST_RATIO")) inst_ratio /= 3; + } if (!just_version) add_instrumentation(); diff --git a/src/afl-common.c b/src/afl-common.c index f3bbdfb4..9f1f45eb 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -13,25 +13,29 @@ /* Detect @@ in args. */ #ifndef __glibc__ -#include +# include #endif - void detect_file_args(char** argv, u8* prog_in) { u32 i = 0; #ifdef __GLIBC__ - u8* cwd = getcwd(NULL, 0); /* non portable glibc extension */ + u8* cwd = getcwd(NULL, 0); /* non portable glibc extension */ #else - u8* cwd; - char *buf; - long size = pathconf(".", _PC_PATH_MAX); - if ((buf = (char *)malloc((size_t)size)) != NULL) { - cwd = getcwd(buf, (size_t)size); /* portable version */ + u8* cwd; + char* buf; + long size = pathconf(".", _PC_PATH_MAX); + if ((buf = (char*)malloc((size_t)size)) != NULL) { + + cwd = getcwd(buf, (size_t)size); /* portable version */ + } else { + PFATAL("getcwd() failed"); - cwd = 0; /* for dumb compilers */ + cwd = 0; /* for dumb compilers */ + } + #endif if (!cwd) PFATAL("getcwd() failed"); @@ -48,8 +52,10 @@ void detect_file_args(char** argv, u8* prog_in) { /* Be sure that we're always using fully-qualified paths. */ - if (prog_in[0] == '/') aa_subst = prog_in; - else aa_subst = alloc_printf("%s/%s", cwd, prog_in); + if (prog_in[0] == '/') + aa_subst = prog_in; + else + aa_subst = alloc_printf("%s/%s", cwd, prog_in); /* Construct a replacement argv value. */ @@ -66,7 +72,7 @@ void detect_file_args(char** argv, u8* prog_in) { } - free(cwd); /* not tracked */ + free(cwd); /* not tracked */ } diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 0051f6b0..152ae802 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -15,34 +15,39 @@ #include /* a program that includes afl-forkserver needs to define these */ -extern u8 uses_asan; +extern u8 uses_asan; extern u8 *trace_bits; extern s32 forksrv_pid, child_pid, fsrv_ctl_fd, fsrv_st_fd; -extern s32 out_fd, out_dir_fd, dev_urandom_fd, dev_null_fd; /* initialize these with -1 */ -extern u32 exec_tmout; -extern u64 mem_limit; -extern u8 *out_file, *target_path, *doc_path; +extern s32 out_fd, out_dir_fd, dev_urandom_fd, + dev_null_fd; /* initialize these with -1 */ +extern u32 exec_tmout; +extern u64 mem_limit; +extern u8 * out_file, *target_path, *doc_path; extern FILE *plot_file; -/* we need this internally but can be defined and read extern in the main source */ +/* we need this internally but can be defined and read extern in the main source + */ u8 child_timed_out; - /* Describe integer as memory size. */ -u8* forkserver_DMS(u64 val) { +u8 *forkserver_DMS(u64 val) { static u8 tmp[12][16]; static u8 cur; -#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \ - if (val < (_divisor) * (_limit_mult)) { \ +#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) \ + do { \ + \ + if (val < (_divisor) * (_limit_mult)) { \ + \ sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \ - return tmp[cur]; \ - } \ + return tmp[cur]; \ + \ + } \ + \ } while (0) - cur = (cur + 1) % 12; /* 0-9999 */ @@ -86,20 +91,23 @@ u8* forkserver_DMS(u64 val) { } - - /* the timeout handler */ void handle_timeout(int sig) { - if (child_pid > 0) { - child_timed_out = 1; - kill(child_pid, SIGKILL); - } else if (child_pid == -1 && forksrv_pid > 0) { - child_timed_out = 1; - kill(forksrv_pid, SIGKILL); - } -} + if (child_pid > 0) { + + child_timed_out = 1; + kill(child_pid, SIGKILL); + + } else if (child_pid == -1 && forksrv_pid > 0) { + + child_timed_out = 1; + kill(forksrv_pid, SIGKILL); + + } + +} /* Spin up fork server (instrumented mode only). The idea is explained here: @@ -112,20 +120,18 @@ void handle_timeout(int sig) { void init_forkserver(char **argv) { static struct itimerval it; - int st_pipe[2], ctl_pipe[2]; - int status; - s32 rlen; + int st_pipe[2], ctl_pipe[2]; + int status; + s32 rlen; ACTF("Spinning up the fork server..."); - if (pipe(st_pipe) || pipe(ctl_pipe)) - PFATAL("pipe() failed"); + if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed"); child_timed_out = 0; forksrv_pid = fork(); - if (forksrv_pid < 0) - PFATAL("fork() failed"); + if (forksrv_pid < 0) PFATAL("fork() failed"); if (!forksrv_pid) { @@ -137,29 +143,33 @@ void init_forkserver(char **argv) { soft 128. Let's try to fix that... */ if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) { + r.rlim_cur = FORKSRV_FD + 2; - setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ + setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ + } if (mem_limit) { + r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; #ifdef RLIMIT_AS - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ #else /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but according to reliable sources, RLIMIT_DATA covers anonymous maps - so we should be getting good protection against OOM bugs. */ - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ #endif /* ^RLIMIT_AS */ + } /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered before the dump is complete. */ -// r.rlim_max = r.rlim_cur = 0; -// setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + // r.rlim_max = r.rlim_cur = 0; + // setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ /* Isolate the process and configure standard descriptors. If out_file is specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ @@ -167,23 +177,27 @@ void init_forkserver(char **argv) { setsid(); if (!getenv("AFL_DEBUG_CHILD_OUTPUT")) { + dup2(dev_null_fd, 1); dup2(dev_null_fd, 2); + } if (out_file) { + dup2(dev_null_fd, 0); + } else { + dup2(out_fd, 0); close(out_fd); + } /* Set up control and status pipes, close the unneeded original fds. */ - if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) - PFATAL("dup2() failed"); - if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) - PFATAL("dup2() failed"); + if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed"); + if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed"); close(ctl_pipe[0]); close(ctl_pipe[1]); @@ -198,8 +212,7 @@ void init_forkserver(char **argv) { /* This should improve performance a bit, since it stops the linker from doing extra work post-fork(). */ - if (!getenv("LD_BIND_LAZY")) - setenv("LD_BIND_NOW", "1", 0); + if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0); /* Set sane defaults for ASAN if nothing else specified. */ @@ -228,6 +241,7 @@ void init_forkserver(char **argv) { *(u32 *)trace_bits = EXEC_FAIL_SIG; exit(0); + } /* PARENT PROCESS */ @@ -243,8 +257,10 @@ void init_forkserver(char **argv) { /* Wait for the fork server to come up, but don't wait too long. */ if (exec_tmout) { + it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000); it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000; + } setitimer(ITIMER_REAL, &it, NULL); @@ -260,22 +276,24 @@ void init_forkserver(char **argv) { Otherwise, try to figure out what went wrong. */ if (rlen == 4) { + OKF("All right - fork server is up."); return; + } if (child_timed_out) FATAL("Timeout while initializing fork server (adjusting -t may help)"); - if (waitpid(forksrv_pid, &status, 0) <= 0) - PFATAL("waitpid() failed"); + if (waitpid(forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed"); if (WIFSIGNALED(status)) { if (mem_limit && mem_limit < 500 && uses_asan) { - SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, " - "before receiving any input\n" + SAYF("\n" cLRD "[-] " cRST + "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" " from the fuzzer! Since it seems to be built with ASAN and you " "have a\n" " restrictive memory limit configured, this is expected; please " @@ -285,8 +303,9 @@ void init_forkserver(char **argv) { } else if (!mem_limit) { - SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, " - "before receiving any input\n" + SAYF("\n" cLRD "[-] " cRST + "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" " from the fuzzer! There are several probable explanations:\n\n" " - The binary is just buggy and explodes entirely on its own. " @@ -303,8 +322,9 @@ void init_forkserver(char **argv) { } else { - SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, " - "before receiving any input\n" + SAYF("\n" cLRD "[-] " cRST + "Whoops, the target binary crashed suddenly, " + "before receiving any input\n" " from the fuzzer! There are several probable explanations:\n\n" " - The current memory limit (%s) is too restrictive, causing " @@ -315,7 +335,8 @@ void init_forkserver(char **argv) { "way confirm\n" " this diagnosis would be:\n\n" - MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" + MSG_ULIMIT_USAGE + " /path/to/fuzzed_app )\n\n" " Tip: you can use http://jwilk.net/software/recidivm to " "quickly\n" @@ -334,9 +355,11 @@ void init_forkserver(char **argv) { " fail, poke for troubleshooting " "tips.\n", forkserver_DMS(mem_limit << 20), mem_limit - 1); + } FATAL("Fork server crashed with signal %d", WTERMSIG(status)); + } if (*(u32 *)trace_bits == EXEC_FAIL_SIG) @@ -344,8 +367,9 @@ void init_forkserver(char **argv) { if (mem_limit && mem_limit < 500 && uses_asan) { - SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated " - "before we could complete a\n" + SAYF("\n" cLRD "[-] " cRST + "Hmm, looks like the target binary terminated " + "before we could complete a\n" " handshake with the injected code. Since it seems to be built " "with ASAN and\n" " you have a restrictive memory limit configured, this is " @@ -355,8 +379,9 @@ void init_forkserver(char **argv) { } else if (!mem_limit) { - SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated " - "before we could complete a\n" + SAYF("\n" cLRD "[-] " cRST + "Hmm, looks like the target binary terminated " + "before we could complete a\n" " handshake with the injected code. Perhaps there is a horrible " "bug in the\n" " fuzzer. Poke for troubleshooting " @@ -365,8 +390,9 @@ void init_forkserver(char **argv) { } else { SAYF( - "\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated " - "before we could complete a\n" + "\n" cLRD "[-] " cRST + "Hmm, looks like the target binary terminated " + "before we could complete a\n" " handshake with the injected code. There are %s probable " "explanations:\n\n" @@ -377,7 +403,8 @@ void init_forkserver(char **argv) { "option. A\n" " simple way to confirm the diagnosis may be:\n\n" - MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n" + MSG_ULIMIT_USAGE + " /path/to/fuzzed_app )\n\n" " Tip: you can use http://jwilk.net/software/recidivm to quickly\n" " estimate the required amount of virtual memory for the " @@ -394,8 +421,10 @@ void init_forkserver(char **argv) { " reached before the program terminates.\n\n" : "", forkserver_DMS(mem_limit << 20), mem_limit - 1); + } FATAL("Fork server handshake failed"); + } diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 1a77dc13..be187fff 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -46,7 +46,6 @@ void write_bitmap(void) { } - /* Read bitmap from file. This is for the -B option again. */ void read_bitmap(u8* fname) { @@ -61,10 +60,9 @@ void read_bitmap(u8* fname) { } - /* Check if the current execution path brings anything new to the table. Update virgin bits to reflect the finds. Returns 1 if the only change is - the hit-count for a particular tuple; 2 if there are new tuples seen. + the hit-count for a particular tuple; 2 if there are new tuples seen. Updates the map, so subsequent calls will always return 0. This function is called after every exec() on a fairly large buffer, so @@ -75,20 +73,20 @@ u8 has_new_bits(u8* virgin_map) { #ifdef __x86_64__ u64* current = (u64*)trace_bits; - u64* virgin = (u64*)virgin_map; + u64* virgin = (u64*)virgin_map; - u32 i = (MAP_SIZE >> 3); + u32 i = (MAP_SIZE >> 3); #else u32* current = (u32*)trace_bits; - u32* virgin = (u32*)virgin_map; + u32* virgin = (u32*)virgin_map; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); #endif /* ^__x86_64__ */ - u8 ret = 0; + u8 ret = 0; while (i--) { @@ -111,14 +109,18 @@ u8 has_new_bits(u8* virgin_map) { if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) || (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) || - (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2; - else ret = 1; + (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) + ret = 2; + else + ret = 1; #else if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || - (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2; - else ret = 1; + (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) + ret = 2; + else + ret = 1; #endif /* ^__x86_64__ */ @@ -139,14 +141,13 @@ u8 has_new_bits(u8* virgin_map) { } - /* Count the number of bits set in the provided bitmap. Used for the status screen several times every second, does not have to be fast. */ u32 count_bits(u8* mem) { u32* ptr = (u32*)mem; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); u32 ret = 0; while (i--) { @@ -157,8 +158,10 @@ u32 count_bits(u8* mem) { data. */ if (v == 0xffffffff) { + ret += 32; continue; + } v -= ((v >> 1) & 0x55555555); @@ -171,8 +174,7 @@ u32 count_bits(u8* mem) { } - -#define FF(_b) (0xff << ((_b) << 3)) +#define FF(_b) (0xff << ((_b) << 3)) /* Count the number of bytes set in the bitmap. Called fairly sporadically, mostly to update the status screen or calibrate and examine confirmed @@ -181,7 +183,7 @@ u32 count_bits(u8* mem) { u32 count_bytes(u8* mem) { u32* ptr = (u32*)mem; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); u32 ret = 0; while (i--) { @@ -200,14 +202,13 @@ u32 count_bytes(u8* mem) { } - /* Count the number of non-255 bytes set in the bitmap. Used strictly for the status screen, several calls per second or so. */ u32 count_non_255_bytes(u8* mem) { u32* ptr = (u32*)mem; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); u32 ret = 0; while (i--) { @@ -229,16 +230,14 @@ u32 count_non_255_bytes(u8* mem) { } - /* Destructively simplify trace by eliminating hit count information and replacing it with 0x80 or 0x01 depending on whether the tuple is hit or not. Called on every new crash or timeout, should be reasonably fast. */ -const u8 simplify_lookup[256] = { +const u8 simplify_lookup[256] = { - [0] = 1, - [1 ... 255] = 128 + [0] = 1, [1 ... 255] = 128 }; @@ -265,7 +264,9 @@ void simplify_trace(u64* mem) { mem8[6] = simplify_lookup[mem8[6]]; mem8[7] = simplify_lookup[mem8[7]]; - } else *mem = 0x0101010101010101ULL; + } else + + *mem = 0x0101010101010101ULL; ++mem; @@ -292,50 +293,49 @@ void simplify_trace(u32* mem) { mem8[2] = simplify_lookup[mem8[2]]; mem8[3] = simplify_lookup[mem8[3]]; - } else *mem = 0x01010101; + } else + + *mem = 0x01010101; ++mem; + } } #endif /* ^__x86_64__ */ - /* Destructively classify execution counts in a trace. This is used as a preprocessing step for any newly acquired traces. Called on every exec, must be fast. */ static const u8 count_class_lookup8[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 4, - [4 ... 7] = 8, - [8 ... 15] = 16, - [16 ... 31] = 32, - [32 ... 127] = 64, - [128 ... 255] = 128 + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 }; static u16 count_class_lookup16[65536]; - void init_count_class16(void) { u32 b1, b2; - for (b1 = 0; b1 < 256; b1++) + for (b1 = 0; b1 < 256; b1++) for (b2 = 0; b2 < 256; b2++) - count_class_lookup16[(b1 << 8) + b2] = - (count_class_lookup8[b1] << 8) | - count_class_lookup8[b2]; + count_class_lookup16[(b1 << 8) + b2] = + (count_class_lookup8[b1] << 8) | count_class_lookup8[b2]; } - #ifdef __x86_64__ void classify_counts(u64* mem) { @@ -390,7 +390,6 @@ void classify_counts(u32* mem) { #endif /* ^__x86_64__ */ - /* Compact trace bytes into a smaller bitmap. We effectively just drop the count information here. This is called only sporadically, for some new paths. */ @@ -408,7 +407,6 @@ void minimize_bits(u8* dst, u8* src) { } - #ifndef SIMPLE_FILES /* Construct a file name for a new test case, capturing the operation @@ -428,8 +426,7 @@ u8* describe_op(u8 hnb) { sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time); - if (splicing_with >= 0) - sprintf(ret + strlen(ret), "+%06d", splicing_with); + if (splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", splicing_with); sprintf(ret + strlen(ret), ",op:%s", stage_short); @@ -438,11 +435,12 @@ u8* describe_op(u8 hnb) { sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte); if (stage_val_type != STAGE_VAL_NONE) - sprintf(ret + strlen(ret), ",val:%s%+d", - (stage_val_type == STAGE_VAL_BE) ? "be:" : "", - stage_cur_val); + sprintf(ret + strlen(ret), ",val:%s%+d", + (stage_val_type == STAGE_VAL_BE) ? "be:" : "", stage_cur_val); - } else sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val); + } else + + sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val); } @@ -454,13 +452,12 @@ u8* describe_op(u8 hnb) { #endif /* !SIMPLE_FILES */ - /* Write a message accompanying the crash directory :-) */ static void write_crash_readme(void) { - u8* fn = alloc_printf("%s/crashes/README.txt", out_dir); - s32 fd; + u8* fn = alloc_printf("%s/crashes/README.txt", out_dir); + s32 fd; FILE* f; fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); @@ -473,32 +470,38 @@ static void write_crash_readme(void) { f = fdopen(fd, "w"); if (!f) { + close(fd); return; + } - fprintf(f, "Command line used to find this crash:\n\n" + fprintf( + f, + "Command line used to find this crash:\n\n" - "%s\n\n" + "%s\n\n" - "If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n" - "memory limit. The limit used for this fuzzing session was %s.\n\n" + "If you can't reproduce a bug outside of afl-fuzz, be sure to set the " + "same\n" + "memory limit. The limit used for this fuzzing session was %s.\n\n" - "Need a tool to minimize test cases before investigating the crashes or sending\n" - "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n" + "Need a tool to minimize test cases before investigating the crashes or " + "sending\n" + "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n" - "Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n" - "an mail at once the issues are fixed\n\n" + "Found any cool bugs in open-source tools using afl-fuzz? If yes, please " + "drop\n" + "an mail at once the issues are fixed\n\n" - " https://github.com/vanhauser-thc/AFLplusplus\n\n", + " https://github.com/vanhauser-thc/AFLplusplus\n\n", - orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */ + orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */ fclose(f); } - /* Check if the result of an execve() during routine fuzzing is interesting, save or queue the input test case for further analysis if so. Returns 1 if entry is saved, 0 otherwise. */ @@ -507,7 +510,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { if (len == 0) return 0; - u8 *fn = ""; + u8* fn = ""; u8 hnb; s32 fd; u8 keeping = 0, res; @@ -517,8 +520,8 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { struct queue_entry* q = queue; while (q) { - if (q->exec_cksum == cksum) - q->n_fuzz = q->n_fuzz + 1; + + if (q->exec_cksum == cksum) q->n_fuzz = q->n_fuzz + 1; q = q->next; @@ -530,9 +533,11 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { future fuzzing, etc. */ if (!(hnb = has_new_bits(virgin_bits))) { + if (crash_mode) ++total_crashes; return 0; - } + + } #ifndef SIMPLE_FILES @@ -548,8 +553,10 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { add_to_queue(fn, len, 0); if (hnb == 2) { + queue_top->has_new_cov = 1; ++queued_with_cov; + } queue_top->exec_cksum = cksum; @@ -559,8 +566,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0); - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", fn); @@ -620,13 +626,12 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { #ifndef SIMPLE_FILES - fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, - unique_hangs, describe_op(0)); + fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, unique_hangs, + describe_op(0)); #else - fn = alloc_printf("%s/hangs/id_%06llu", out_dir, - unique_hangs); + fn = alloc_printf("%s/hangs/id_%06llu", out_dir, unique_hangs); #endif /* ^!SIMPLE_FILES */ @@ -638,7 +643,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { case FAULT_CRASH: -keep_as_crash: + keep_as_crash: /* This is handled in a manner roughly similar to timeouts, except for slightly different limits and no need to re-run test diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 1f52181d..f43c86f4 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -22,32 +22,32 @@ #include "afl-fuzz.h" - /* Helper function for load_extras. */ static int compare_extras_len(const void* p1, const void* p2) { - struct extra_data *e1 = (struct extra_data*)p1, - *e2 = (struct extra_data*)p2; + + struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2; return e1->len - e2->len; + } static int compare_extras_use_d(const void* p1, const void* p2) { - struct extra_data *e1 = (struct extra_data*)p1, - *e2 = (struct extra_data*)p2; + + struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2; return e2->hit_cnt - e1->hit_cnt; -} +} /* Read extras from a file, sort by size. */ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { FILE* f; - u8 buf[MAX_LINE]; - u8 *lptr; - u32 cur_line = 0; + u8 buf[MAX_LINE]; + u8* lptr; + u32 cur_line = 0; f = fopen(fname, "r"); @@ -62,10 +62,12 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { /* Trim on left and right. */ - while (isspace(*lptr)) ++lptr; + while (isspace(*lptr)) + ++lptr; rptr = lptr + strlen(lptr) - 1; - while (rptr >= lptr && isspace(*rptr)) --rptr; + while (rptr >= lptr && isspace(*rptr)) + --rptr; ++rptr; *rptr = 0; @@ -84,7 +86,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { /* Skip alphanumerics and dashes (label). */ - while (isalnum(*lptr) || *lptr == '_') ++lptr; + while (isalnum(*lptr) || *lptr == '_') + ++lptr; /* If @number follows, parse that. */ @@ -92,13 +95,15 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { ++lptr; if (atoi(lptr) > dict_level) continue; - while (isdigit(*lptr)) ++lptr; + while (isdigit(*lptr)) + ++lptr; } /* Skip whitespace and = signs. */ - while (isspace(*lptr) || *lptr == '=') ++lptr; + while (isspace(*lptr) || *lptr == '=') + ++lptr; /* Consume opening '"'. */ @@ -112,8 +117,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { /* Okay, let's allocate memory and copy data between "...", handling \xNN escaping, \\, and \". */ - extras = ck_realloc_block(extras, (extras_cnt + 1) * - sizeof(struct extra_data)); + extras = + ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data)); wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr); @@ -132,27 +137,25 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { ++lptr; if (*lptr == '\\' || *lptr == '"') { + *(wptr++) = *(lptr++); klen++; break; + } if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2])) FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line); - *(wptr++) = - ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) | - (strchr(hexdigits, tolower(lptr[2])) - hexdigits); + *(wptr++) = ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) | + (strchr(hexdigits, tolower(lptr[2])) - hexdigits); lptr += 3; ++klen; break; - default: - - *(wptr++) = *(lptr++); - ++klen; + default: *(wptr++) = *(lptr++); ++klen; } @@ -161,8 +164,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { extras[extras_cnt].len = klen; if (extras[extras_cnt].len > MAX_DICT_FILE) - FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, - DMS(klen), DMS(MAX_DICT_FILE)); + FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, DMS(klen), + DMS(MAX_DICT_FILE)); if (*min_len > klen) *min_len = klen; if (*max_len < klen) *max_len = klen; @@ -175,15 +178,14 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) { } - /* Read extras from the extras directory and sort them by size. */ void load_extras(u8* dir) { - DIR* d; + DIR* d; struct dirent* de; - u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0; - u8* x; + u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0; + u8* x; /* If the name ends with @, extract level and continue. */ @@ -201,8 +203,10 @@ void load_extras(u8* dir) { if (!d) { if (errno == ENOTDIR) { + load_extras_file(dir, &min_len, &max_len, dict_level); goto check_and_sort; + } PFATAL("Unable to open '%s'", dir); @@ -214,11 +218,10 @@ void load_extras(u8* dir) { while ((de = readdir(d))) { struct stat st; - u8* fn = alloc_printf("%s/%s", dir, de->d_name); - s32 fd; + u8* fn = alloc_printf("%s/%s", dir, de->d_name); + s32 fd; - if (lstat(fn, &st) || access(fn, R_OK)) - PFATAL("Unable to access '%s'", fn); + if (lstat(fn, &st) || access(fn, R_OK)) PFATAL("Unable to access '%s'", fn); /* This also takes care of . and .. */ if (!S_ISREG(st.st_mode) || !st.st_size) { @@ -229,17 +232,17 @@ void load_extras(u8* dir) { } if (st.st_size > MAX_DICT_FILE) - FATAL("Extra '%s' is too big (%s, limit is %s)", fn, - DMS(st.st_size), DMS(MAX_DICT_FILE)); + FATAL("Extra '%s' is too big (%s, limit is %s)", fn, DMS(st.st_size), + DMS(MAX_DICT_FILE)); if (min_len > st.st_size) min_len = st.st_size; if (max_len < st.st_size) max_len = st.st_size; - extras = ck_realloc_block(extras, (extras_cnt + 1) * - sizeof(struct extra_data)); + extras = + ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data)); extras[extras_cnt].data = ck_alloc(st.st_size); - extras[extras_cnt].len = st.st_size; + extras[extras_cnt].len = st.st_size; fd = open(fn, O_RDONLY); @@ -262,8 +265,8 @@ check_and_sort: qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len); - OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, - DMS(min_len), DMS(max_len)); + OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, DMS(min_len), + DMS(max_len)); if (max_len > 32) WARNF("Some tokens are relatively large (%s) - consider trimming.", @@ -275,18 +278,16 @@ check_and_sort: } - - /* Helper function for maybe_add_auto() */ static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) { - while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1; + while (len--) + if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1; return 0; } - /* Maybe add automatic extra. */ void maybe_add_auto(u8* mem, u32 len) { @@ -310,9 +311,10 @@ void maybe_add_auto(u8* mem, u32 len) { i = sizeof(interesting_16) >> 1; - while (i--) + while (i--) if (*((u16*)mem) == interesting_16[i] || - *((u16*)mem) == SWAP16(interesting_16[i])) return; + *((u16*)mem) == SWAP16(interesting_16[i])) + return; } @@ -320,9 +322,10 @@ void maybe_add_auto(u8* mem, u32 len) { i = sizeof(interesting_32) >> 2; - while (i--) + while (i--) if (*((u32*)mem) == interesting_32[i] || - *((u32*)mem) == SWAP32(interesting_32[i])) return; + *((u32*)mem) == SWAP32(interesting_32[i])) + return; } @@ -358,22 +361,21 @@ void maybe_add_auto(u8* mem, u32 len) { if (a_extras_cnt < MAX_AUTO_EXTRAS) { - a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) * - sizeof(struct extra_data)); + a_extras = ck_realloc_block(a_extras, + (a_extras_cnt + 1) * sizeof(struct extra_data)); a_extras[a_extras_cnt].data = ck_memdup(mem, len); - a_extras[a_extras_cnt].len = len; + a_extras[a_extras_cnt].len = len; ++a_extras_cnt; } else { - i = MAX_AUTO_EXTRAS / 2 + - UR((MAX_AUTO_EXTRAS + 1) / 2); + i = MAX_AUTO_EXTRAS / 2 + UR((MAX_AUTO_EXTRAS + 1) / 2); ck_free(a_extras[i].data); - a_extras[i].data = ck_memdup(mem, len); - a_extras[i].len = len; + a_extras[i].data = ck_memdup(mem, len); + a_extras[i].len = len; a_extras[i].hit_cnt = 0; } @@ -387,12 +389,11 @@ sort_a_extras: /* Then, sort the top USE_AUTO_EXTRAS entries by size. */ - qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), - sizeof(struct extra_data), compare_extras_len); + qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), sizeof(struct extra_data), + compare_extras_len); } - /* Save automatically generated extras. */ void save_auto(void) { @@ -420,7 +421,6 @@ void save_auto(void) { } - /* Load automatically generated extras. */ void load_auto(void) { @@ -458,24 +458,25 @@ void load_auto(void) { } - if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i); - else OKF("No auto-generated dictionary tokens to reuse."); + if (i) + OKF("Loaded %u auto-discovered dictionary tokens.", i); + else + OKF("No auto-generated dictionary tokens to reuse."); } - /* Destroy extras. */ void destroy_extras(void) { u32 i; - for (i = 0; i < extras_cnt; ++i) + for (i = 0; i < extras_cnt; ++i) ck_free(extras[i].data); ck_free(extras); - for (i = 0; i < a_extras_cnt; ++i) + for (i = 0; i < a_extras_cnt; ++i) ck_free(a_extras[i].data); ck_free(a_extras); diff --git a/src/afl-fuzz-globals.c b/src/afl-fuzz-globals.c index e28c3099..8fded173 100644 --- a/src/afl-fuzz-globals.c +++ b/src/afl-fuzz-globals.c @@ -25,27 +25,13 @@ /* MOpt: Lots of globals, but mostly for the status UI and other things where it really makes no sense to haul them around as function parameters. */ -u64 limit_time_puppet, - orig_hit_cnt_puppet, - last_limit_time_start, - tmp_pilot_time, - total_pacemaker_time, - total_puppet_find, - temp_puppet_find, - most_time_key, - most_time, - most_execs_key, - most_execs, - old_hit_count; +u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start, + tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find, + most_time_key, most_time, most_execs_key, most_execs, old_hit_count; -s32 SPLICE_CYCLES_puppet, - limit_time_sig, - key_puppet, - key_module; +s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module; -double w_init = 0.9, - w_end = 0.3, - w_now; +double w_init = 0.9, w_end = 0.3, w_now; s32 g_now; s32 g_max = 5000; @@ -53,15 +39,13 @@ s32 g_max = 5000; u64 tmp_core_time; s32 swarm_now; -double x_now[swarm_num][operator_num], - L_best[swarm_num][operator_num], - eff_best[swarm_num][operator_num], - G_best[operator_num], - v_now[swarm_num][operator_num], - probability_now[swarm_num][operator_num], - swarm_fitness[swarm_num]; +double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num], + eff_best[swarm_num][operator_num], G_best[operator_num], + v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num], + swarm_fitness[swarm_num]; -u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per fuzz stage */ +u64 stage_finds_puppet[swarm_num] + [operator_num], /* Patterns found per fuzz stage */ stage_finds_puppet_v2[swarm_num][operator_num], stage_cycles_puppet_v2[swarm_num][operator_num], stage_cycles_puppet_v3[swarm_num][operator_num], @@ -71,207 +55,197 @@ u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per core_operator_finds_puppet_v2[operator_num], core_operator_cycles_puppet[operator_num], core_operator_cycles_puppet_v2[operator_num], - core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ + core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */ double period_pilot_tmp = 5000.0; -s32 key_lv; +s32 key_lv; -u8 *in_dir, /* Input directory with test cases */ - *out_dir, /* Working & output directory */ - *tmp_dir , /* Temporary directory for input */ - *sync_dir, /* Synchronization directory */ - *sync_id, /* Fuzzer ID */ - *power_name, /* Power schedule name */ - *use_banner, /* Display banner */ - *in_bitmap, /* Input bitmap */ - *file_extension, /* File extension */ - *orig_cmdline; /* Original command line */ -u8 *doc_path, /* Path to documentation dir */ - *target_path, /* Path to target binary */ - *out_file; /* File to fuzz, if any */ +u8 *in_dir, /* Input directory with test cases */ + *out_dir, /* Working & output directory */ + *tmp_dir, /* Temporary directory for input */ + *sync_dir, /* Synchronization directory */ + *sync_id, /* Fuzzer ID */ + *power_name, /* Power schedule name */ + *use_banner, /* Display banner */ + *in_bitmap, /* Input bitmap */ + *file_extension, /* File extension */ + *orig_cmdline; /* Original command line */ +u8 *doc_path, /* Path to documentation dir */ + *target_path, /* Path to target binary */ + *out_file; /* File to fuzz, if any */ -u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */ -u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */ +u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */ +u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */ -u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */ +u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */ -u8 cal_cycles = CAL_CYCLES, /* Calibration cycles defaults */ - cal_cycles_long = CAL_CYCLES_LONG, - debug, /* Debug mode */ - python_only; /* Python-only mode */ +u8 cal_cycles = CAL_CYCLES, /* Calibration cycles defaults */ + cal_cycles_long = CAL_CYCLES_LONG, debug, /* Debug mode */ + python_only; /* Python-only mode */ -u32 stats_update_freq = 1; /* Stats update frequency (execs) */ +u32 stats_update_freq = 1; /* Stats update frequency (execs) */ -char *power_names[POWER_SCHEDULES_NUM] = { - "explore", - "fast", - "coe", - "lin", - "quad", - "exploit" -}; +char *power_names[POWER_SCHEDULES_NUM] = {"explore", "fast", "coe", + "lin", "quad", "exploit"}; -u8 schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/ +u8 schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/ u8 havoc_max_mult = HAVOC_MAX_MULT; -u8 skip_deterministic, /* Skip deterministic stages? */ - force_deterministic, /* Force deterministic stages? */ - use_splicing, /* Recombine input files? */ - dumb_mode, /* Run in non-instrumented mode? */ - score_changed, /* Scoring for favorites changed? */ - kill_signal, /* Signal that killed the child */ - resuming_fuzz, /* Resuming an older fuzzing job? */ - timeout_given, /* Specific timeout given? */ - not_on_tty, /* stdout is not a tty */ - term_too_small, /* terminal dimensions too small */ - no_forkserver, /* Disable forkserver? */ - crash_mode, /* Crash mode! Yeah! */ - in_place_resume, /* Attempt in-place resume? */ - auto_changed, /* Auto-generated tokens changed? */ - no_cpu_meter_red, /* Feng shui on the status screen */ - no_arith, /* Skip most arithmetic ops */ - shuffle_queue, /* Shuffle input queue? */ - bitmap_changed = 1, /* Time to update bitmap? */ - qemu_mode, /* Running in QEMU mode? */ - unicorn_mode, /* Running in Unicorn mode? */ - skip_requested, /* Skip request, via SIGUSR1 */ - run_over10m, /* Run time over 10 minutes? */ - persistent_mode, /* Running in persistent mode? */ - deferred_mode, /* Deferred forkserver mode? */ - fixed_seed, /* do not reseed */ - fast_cal, /* Try to calibrate faster? */ - uses_asan; /* Target uses ASAN? */ +u8 skip_deterministic, /* Skip deterministic stages? */ + force_deterministic, /* Force deterministic stages? */ + use_splicing, /* Recombine input files? */ + dumb_mode, /* Run in non-instrumented mode? */ + score_changed, /* Scoring for favorites changed? */ + kill_signal, /* Signal that killed the child */ + resuming_fuzz, /* Resuming an older fuzzing job? */ + timeout_given, /* Specific timeout given? */ + not_on_tty, /* stdout is not a tty */ + term_too_small, /* terminal dimensions too small */ + no_forkserver, /* Disable forkserver? */ + crash_mode, /* Crash mode! Yeah! */ + in_place_resume, /* Attempt in-place resume? */ + auto_changed, /* Auto-generated tokens changed? */ + no_cpu_meter_red, /* Feng shui on the status screen */ + no_arith, /* Skip most arithmetic ops */ + shuffle_queue, /* Shuffle input queue? */ + bitmap_changed = 1, /* Time to update bitmap? */ + qemu_mode, /* Running in QEMU mode? */ + unicorn_mode, /* Running in Unicorn mode? */ + skip_requested, /* Skip request, via SIGUSR1 */ + run_over10m, /* Run time over 10 minutes? */ + persistent_mode, /* Running in persistent mode? */ + deferred_mode, /* Deferred forkserver mode? */ + fixed_seed, /* do not reseed */ + fast_cal, /* Try to calibrate faster? */ + uses_asan; /* Target uses ASAN? */ -s32 out_fd, /* Persistent fd for out_file */ +s32 out_fd, /* Persistent fd for out_file */ #ifndef HAVE_ARC4RANDOM - dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ + dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ #endif - dev_null_fd = -1, /* Persistent fd for /dev/null */ - fsrv_ctl_fd, /* Fork server control pipe (write) */ - fsrv_st_fd; /* Fork server status pipe (read) */ + dev_null_fd = -1, /* Persistent fd for /dev/null */ + fsrv_ctl_fd, /* Fork server control pipe (write) */ + fsrv_st_fd; /* Fork server status pipe (read) */ - s32 forksrv_pid, /* PID of the fork server */ - child_pid = -1, /* PID of the fuzzed program */ - out_dir_fd = -1; /* FD of the lock file */ +s32 forksrv_pid, /* PID of the fork server */ + child_pid = -1, /* PID of the fuzzed program */ + out_dir_fd = -1; /* FD of the lock file */ - u8* trace_bits; /* SHM with instrumentation bitmap */ +u8 *trace_bits; /* SHM with instrumentation bitmap */ -u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ - virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ - virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ +u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ + virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */ + virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ -u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ +u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ -volatile u8 stop_soon, /* Ctrl-C pressed? */ - clear_screen = 1, /* Window resized? */ - child_timed_out; /* Traced process timed out? */ +volatile u8 stop_soon, /* Ctrl-C pressed? */ + clear_screen = 1, /* Window resized? */ + child_timed_out; /* Traced process timed out? */ -u32 queued_paths, /* Total number of queued testcases */ - queued_variable, /* Testcases with variable behavior */ - queued_at_start, /* Total number of initial inputs */ - queued_discovered, /* Items discovered during this run */ - queued_imported, /* Items imported via -S */ - queued_favored, /* Paths deemed favorable */ - queued_with_cov, /* Paths with new coverage bytes */ - pending_not_fuzzed, /* Queued but not done yet */ - pending_favored, /* Pending favored paths */ - cur_skipped_paths, /* Abandoned inputs in cur cycle */ - cur_depth, /* Current path depth */ - max_depth, /* Max path depth */ - useless_at_start, /* Number of useless starting paths */ - var_byte_count, /* Bitmap bytes with var behavior */ - current_entry, /* Current queue entry ID */ - havoc_div = 1; /* Cycle count divisor for havoc */ +u32 queued_paths, /* Total number of queued testcases */ + queued_variable, /* Testcases with variable behavior */ + queued_at_start, /* Total number of initial inputs */ + queued_discovered, /* Items discovered during this run */ + queued_imported, /* Items imported via -S */ + queued_favored, /* Paths deemed favorable */ + queued_with_cov, /* Paths with new coverage bytes */ + pending_not_fuzzed, /* Queued but not done yet */ + pending_favored, /* Pending favored paths */ + cur_skipped_paths, /* Abandoned inputs in cur cycle */ + cur_depth, /* Current path depth */ + max_depth, /* Max path depth */ + useless_at_start, /* Number of useless starting paths */ + var_byte_count, /* Bitmap bytes with var behavior */ + current_entry, /* Current queue entry ID */ + havoc_div = 1; /* Cycle count divisor for havoc */ -u64 total_crashes, /* Total number of crashes */ - unique_crashes, /* Crashes with unique signatures */ - total_tmouts, /* Total number of timeouts */ - unique_tmouts, /* Timeouts with unique signatures */ - unique_hangs, /* Hangs with unique signatures */ - total_execs, /* Total execve() calls */ - slowest_exec_ms, /* Slowest testcase non hang in ms */ - start_time, /* Unix start time (ms) */ - last_path_time, /* Time for most recent path (ms) */ - last_crash_time, /* Time for most recent crash (ms) */ - last_hang_time, /* Time for most recent hang (ms) */ - last_crash_execs, /* Exec counter at last crash */ - queue_cycle, /* Queue round counter */ - cycles_wo_finds, /* Cycles without any new paths */ - trim_execs, /* Execs done to trim input files */ - bytes_trim_in, /* Bytes coming into the trimmer */ - bytes_trim_out, /* Bytes coming outa the trimmer */ - blocks_eff_total, /* Blocks subject to effector maps */ - blocks_eff_select; /* Blocks selected as fuzzable */ +u64 total_crashes, /* Total number of crashes */ + unique_crashes, /* Crashes with unique signatures */ + total_tmouts, /* Total number of timeouts */ + unique_tmouts, /* Timeouts with unique signatures */ + unique_hangs, /* Hangs with unique signatures */ + total_execs, /* Total execve() calls */ + slowest_exec_ms, /* Slowest testcase non hang in ms */ + start_time, /* Unix start time (ms) */ + last_path_time, /* Time for most recent path (ms) */ + last_crash_time, /* Time for most recent crash (ms) */ + last_hang_time, /* Time for most recent hang (ms) */ + last_crash_execs, /* Exec counter at last crash */ + queue_cycle, /* Queue round counter */ + cycles_wo_finds, /* Cycles without any new paths */ + trim_execs, /* Execs done to trim input files */ + bytes_trim_in, /* Bytes coming into the trimmer */ + bytes_trim_out, /* Bytes coming outa the trimmer */ + blocks_eff_total, /* Blocks subject to effector maps */ + blocks_eff_select; /* Blocks selected as fuzzable */ -u32 subseq_tmouts; /* Number of timeouts in a row */ +u32 subseq_tmouts; /* Number of timeouts in a row */ -u8 *stage_name = "init", /* Name of the current fuzz stage */ - *stage_short, /* Short stage name */ - *syncing_party; /* Currently syncing with... */ +u8 *stage_name = "init", /* Name of the current fuzz stage */ + *stage_short, /* Short stage name */ + *syncing_party; /* Currently syncing with... */ -s32 stage_cur, stage_max; /* Stage progression */ -s32 splicing_with = -1; /* Splicing with which test case? */ +s32 stage_cur, stage_max; /* Stage progression */ +s32 splicing_with = -1; /* Splicing with which test case? */ -u32 master_id, master_max; /* Master instance job splitting */ +u32 master_id, master_max; /* Master instance job splitting */ -u32 syncing_case; /* Syncing with case #... */ +u32 syncing_case; /* Syncing with case #... */ -s32 stage_cur_byte, /* Byte offset of current stage op */ - stage_cur_val; /* Value used for stage op */ +s32 stage_cur_byte, /* Byte offset of current stage op */ + stage_cur_val; /* Value used for stage op */ -u8 stage_val_type; /* Value type (STAGE_VAL_*) */ +u8 stage_val_type; /* Value type (STAGE_VAL_*) */ -u64 stage_finds[32], /* Patterns found per fuzz stage */ - stage_cycles[32]; /* Execs per fuzz stage */ +u64 stage_finds[32], /* Patterns found per fuzz stage */ + stage_cycles[32]; /* Execs per fuzz stage */ #ifndef HAVE_ARC4RANDOM -u32 rand_cnt; /* Random number counter */ +u32 rand_cnt; /* Random number counter */ #endif -u64 total_cal_us, /* Total calibration time (us) */ - total_cal_cycles; /* Total calibration cycles */ +u64 total_cal_us, /* Total calibration time (us) */ + total_cal_cycles; /* Total calibration cycles */ -u64 total_bitmap_size, /* Total bit count for all bitmaps */ - total_bitmap_entries; /* Number of bitmaps counted */ +u64 total_bitmap_size, /* Total bit count for all bitmaps */ + total_bitmap_entries; /* Number of bitmaps counted */ -s32 cpu_core_count; /* CPU core count */ +s32 cpu_core_count; /* CPU core count */ #ifdef HAVE_AFFINITY -s32 cpu_aff = -1; /* Selected CPU core */ +s32 cpu_aff = -1; /* Selected CPU core */ #endif /* HAVE_AFFINITY */ -FILE* plot_file; /* Gnuplot output file */ +FILE *plot_file; /* Gnuplot output file */ +struct queue_entry *queue, /* Fuzzing queue (linked list) */ + *queue_cur, /* Current offset within the queue */ + *queue_top, /* Top of the list */ + *q_prev100; /* Previous 100 marker */ +struct queue_entry *top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ -struct queue_entry *queue, /* Fuzzing queue (linked list) */ - *queue_cur, /* Current offset within the queue */ - *queue_top, /* Top of the list */ - *q_prev100; /* Previous 100 marker */ +struct extra_data *extras; /* Extra tokens to fuzz with */ +u32 extras_cnt; /* Total number of tokens read */ -struct queue_entry* - top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ +struct extra_data *a_extras; /* Automatically selected extras */ +u32 a_extras_cnt; /* Total number of tokens available */ -struct extra_data* extras; /* Extra tokens to fuzz with */ -u32 extras_cnt; /* Total number of tokens read */ - -struct extra_data* a_extras; /* Automatically selected extras */ -u32 a_extras_cnt; /* Total number of tokens available */ - -u8* (*post_handler)(u8* buf, u32* len); +u8 *(*post_handler)(u8 *buf, u32 *len); /* hooks for the custom mutator function */ -size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed); +size_t (*custom_mutator)(u8 *data, size_t size, u8 *mutated_out, + size_t max_size, unsigned int seed); size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data); - /* Interesting values, as per config.h */ -s8 interesting_8[] = { INTERESTING_8 }; -s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 }; -s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 }; +s8 interesting_8[] = {INTERESTING_8}; +s16 interesting_16[] = {INTERESTING_8, INTERESTING_16}; +s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32}; /* Python stuff */ #ifdef USE_PYTHON diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index f66db74c..8a3ee6fa 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -22,7 +22,6 @@ #include "afl-fuzz.h" - #ifdef HAVE_AFFINITY /* Build a list of processes bound to specific cores. Returns -1 if nothing @@ -30,11 +29,11 @@ void bind_to_free_cpu(void) { - DIR* d; + DIR* d; struct dirent* de; - cpu_set_t c; + cpu_set_t c; - u8 cpu_used[4096] = { 0 }; + u8 cpu_used[4096] = {0}; u32 i; if (cpu_core_count < 2) return; @@ -69,18 +68,20 @@ void bind_to_free_cpu(void) { while ((de = readdir(d))) { - u8* fn; + u8* fn; FILE* f; - u8 tmp[MAX_LINE]; - u8 has_vmsize = 0; + u8 tmp[MAX_LINE]; + u8 has_vmsize = 0; if (!isdigit(de->d_name[0])) continue; fn = alloc_printf("/proc/%s/status", de->d_name); if (!(f = fopen(fn, "r"))) { + ck_free(fn); continue; + } while (fgets(tmp, MAX_LINE, f)) { @@ -91,10 +92,9 @@ void bind_to_free_cpu(void) { if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1; - if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) && - !strchr(tmp, '-') && !strchr(tmp, ',') && - sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) && - has_vmsize) { + if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) && !strchr(tmp, '-') && + !strchr(tmp, ',') && sscanf(tmp + 19, "%u", &hval) == 1 && + hval < sizeof(cpu_used) && has_vmsize) { cpu_used[hval] = 1; break; @@ -110,14 +110,17 @@ void bind_to_free_cpu(void) { closedir(d); - for (i = 0; i < cpu_core_count; ++i) if (!cpu_used[i]) break; + for (i = 0; i < cpu_core_count; ++i) + if (!cpu_used[i]) break; if (i == cpu_core_count) { SAYF("\n" cLRD "[-] " cRST "Uh-oh, looks like all %d CPU cores on your system are allocated to\n" - " other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n" - " another fuzzer on this machine is probably a bad plan, but if you are\n" + " other instances of afl-fuzz (or similar CPU-locked tasks). " + "Starting\n" + " another fuzzer on this machine is probably a bad plan, but if " + "you are\n" " absolutely sure, you can set AFL_NO_AFFINITY and try again.\n", cpu_core_count); @@ -132,8 +135,7 @@ void bind_to_free_cpu(void) { CPU_ZERO(&c); CPU_SET(i, &c); - if (sched_setaffinity(0, sizeof(c), &c)) - PFATAL("sched_setaffinity failed"); + if (sched_setaffinity(0, sizeof(c), &c)) PFATAL("sched_setaffinity failed"); } @@ -144,8 +146,8 @@ void bind_to_free_cpu(void) { void setup_post(void) { void* dh; - u8* fn = getenv("AFL_POST_LIBRARY"); - u32 tlen = 6; + u8* fn = getenv("AFL_POST_LIBRARY"); + u32 tlen = 6; if (!fn) return; @@ -166,8 +168,9 @@ void setup_post(void) { } void setup_custom_mutator(void) { + void* dh; - u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY"); + u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY"); if (!fn) return; @@ -180,11 +183,11 @@ void setup_custom_mutator(void) { if (!custom_mutator) FATAL("Symbol 'afl_custom_mutator' not found."); pre_save_handler = dlsym(dh, "afl_pre_save_handler"); -// if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found."); + // if (!pre_save_handler) WARNF("Symbol 'afl_pre_save_handler' not found."); OKF("Custom mutator installed successfully."); -} +} /* Shuffle an array of pointers. Might be slightly biased. */ @@ -194,8 +197,8 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) { for (i = 0; i < cnt - 2; ++i) { - u32 j = i + UR(cnt - i); - void *s = ptrs[i]; + u32 j = i + UR(cnt - i); + void* s = ptrs[i]; ptrs[i] = ptrs[j]; ptrs[j] = s; @@ -208,15 +211,18 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) { void read_testcases(void) { - struct dirent **nl; - s32 nl_cnt; - u32 i; - u8* fn1; + struct dirent** nl; + s32 nl_cnt; + u32 i; + u8* fn1; /* Auto-detect non-in-place resumption attempts. */ fn1 = alloc_printf("%s/queue", in_dir); - if (!access(fn1, F_OK)) in_dir = fn1; else ck_free(fn1); + if (!access(fn1, F_OK)) + in_dir = fn1; + else + ck_free(fn1); ACTF("Scanning '%s'...", in_dir); @@ -231,9 +237,12 @@ void read_testcases(void) { if (errno == ENOENT || errno == ENOTDIR) SAYF("\n" cLRD "[-] " cRST - "The input directory does not seem to be valid - try again. The fuzzer needs\n" - " one or more test case to start with - ideally, a small file under 1 kB\n" - " or so. The cases must be stored as regular files directly in the input\n" + "The input directory does not seem to be valid - try again. The " + "fuzzer needs\n" + " one or more test case to start with - ideally, a small file " + "under 1 kB\n" + " or so. The cases must be stored as regular files directly in " + "the input\n" " directory.\n"); PFATAL("Unable to open '%s'", in_dir); @@ -252,12 +261,13 @@ void read_testcases(void) { struct stat st; u8* fn2 = alloc_printf("%s/%s", in_dir, nl[i]->d_name); - u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name); + u8* dfn = + alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_name); - u8 passed_det = 0; + u8 passed_det = 0; + + free(nl[i]); /* not tracked */ - free(nl[i]); /* not tracked */ - if (lstat(fn2, &st) || access(fn2, R_OK)) PFATAL("Unable to access '%s'", fn2); @@ -271,9 +281,9 @@ void read_testcases(void) { } - if (st.st_size > MAX_FILE) - FATAL("Test case '%s' is too big (%s, limit is %s)", fn2, - DMS(st.st_size), DMS(MAX_FILE)); + if (st.st_size > MAX_FILE) + FATAL("Test case '%s' is too big (%s, limit is %s)", fn2, DMS(st.st_size), + DMS(MAX_FILE)); /* Check for metadata that indicates that deterministic fuzzing is complete for this entry. We don't want to repeat deterministic @@ -287,14 +297,17 @@ void read_testcases(void) { } - free(nl); /* not tracked */ + free(nl); /* not tracked */ if (!queued_paths) { SAYF("\n" cLRD "[-] " cRST - "Looks like there are no valid test cases in the input directory! The fuzzer\n" - " needs one or more test case to start with - ideally, a small file under\n" - " 1 kB or so. The cases must be stored as regular files directly in the\n" + "Looks like there are no valid test cases in the input directory! The " + "fuzzer\n" + " needs one or more test case to start with - ideally, a small " + "file under\n" + " 1 kB or so. The cases must be stored as regular files directly " + "in the\n" " input directory.\n"); FATAL("No usable test cases in '%s'", in_dir); @@ -306,7 +319,6 @@ void read_testcases(void) { } - /* Examine map coverage. Called once, for first test case. */ static void check_map_coverage(void) { @@ -322,15 +334,14 @@ static void check_map_coverage(void) { } - /* Perform dry run of all test cases to confirm that the app is working as expected. This is done only for the initial inputs, and only once. */ void perform_dry_run(char** argv) { struct queue_entry* q = queue; - u32 cal_failures = 0; - u8* skip_crashes = getenv("AFL_SKIP_CRASHES"); + u32 cal_failures = 0; + u8* skip_crashes = getenv("AFL_SKIP_CRASHES"); while (q) { @@ -358,7 +369,7 @@ void perform_dry_run(char** argv) { if (stop_soon) return; if (res == crash_mode || res == FAULT_NOBITS) - SAYF(cGRA " len = %u, map size = %u, exec speed = %llu us\n" cRST, + SAYF(cGRA " len = %u, map size = %u, exec speed = %llu us\n" cRST, q->len, q->bitmap_size, q->exec_us); switch (res) { @@ -380,90 +391,119 @@ void perform_dry_run(char** argv) { out. */ if (timeout_given > 1) { + WARNF("Test case results in a timeout (skipping)"); q->cal_failed = CAL_CHANCES; ++cal_failures; break; + } SAYF("\n" cLRD "[-] " cRST - "The program took more than %u ms to process one of the initial test cases.\n" - " Usually, the right thing to do is to relax the -t option - or to delete it\n" - " altogether and allow the fuzzer to auto-calibrate. That said, if you know\n" - " what you are doing and want to simply skip the unruly test cases, append\n" - " '+' at the end of the value passed to -t ('-t %u+').\n", exec_tmout, - exec_tmout); + "The program took more than %u ms to process one of the initial " + "test cases.\n" + " Usually, the right thing to do is to relax the -t option - " + "or to delete it\n" + " altogether and allow the fuzzer to auto-calibrate. That " + "said, if you know\n" + " what you are doing and want to simply skip the unruly test " + "cases, append\n" + " '+' at the end of the value passed to -t ('-t %u+').\n", + exec_tmout, exec_tmout); FATAL("Test case '%s' results in a timeout", fn); } else { SAYF("\n" cLRD "[-] " cRST - "The program took more than %u ms to process one of the initial test cases.\n" - " This is bad news; raising the limit with the -t option is possible, but\n" + "The program took more than %u ms to process one of the initial " + "test cases.\n" + " This is bad news; raising the limit with the -t option is " + "possible, but\n" " will probably make the fuzzing process extremely slow.\n\n" - " If this test case is just a fluke, the other option is to just avoid it\n" - " altogether, and find one that is less of a CPU hog.\n", exec_tmout); + " If this test case is just a fluke, the other option is to " + "just avoid it\n" + " altogether, and find one that is less of a CPU hog.\n", + exec_tmout); FATAL("Test case '%s' results in a timeout", fn); } - case FAULT_CRASH: + case FAULT_CRASH: if (crash_mode) break; if (skip_crashes) { + WARNF("Test case results in a crash (skipping)"); q->cal_failed = CAL_CHANCES; ++cal_failures; break; + } if (mem_limit) { SAYF("\n" cLRD "[-] " cRST - "Oops, the program crashed with one of the test cases provided. There are\n" + "Oops, the program crashed with one of the test cases provided. " + "There are\n" " several possible explanations:\n\n" - " - The test case causes known crashes under normal working conditions. If\n" - " so, please remove it. The fuzzer should be seeded with interesting\n" + " - The test case causes known crashes under normal working " + "conditions. If\n" + " so, please remove it. The fuzzer should be seeded with " + "interesting\n" " inputs - but not ones that cause an outright crash.\n\n" - " - The current memory limit (%s) is too low for this program, causing\n" - " it to die due to OOM when parsing valid files. To fix this, try\n" - " bumping it up with the -m setting in the command line. If in doubt,\n" + " - The current memory limit (%s) is too low for this " + "program, causing\n" + " it to die due to OOM when parsing valid files. To fix " + "this, try\n" + " bumping it up with the -m setting in the command line. " + "If in doubt,\n" " try something along the lines of:\n\n" - MSG_ULIMIT_USAGE " /path/to/binary [...] for troubleshooting tips.\n", + " - Least likely, there is a horrible bug in the fuzzer. If " + "other options\n" + " fail, poke for " + "troubleshooting tips.\n", DMS(mem_limit << 20), mem_limit - 1, doc_path); } else { SAYF("\n" cLRD "[-] " cRST - "Oops, the program crashed with one of the test cases provided. There are\n" + "Oops, the program crashed with one of the test cases provided. " + "There are\n" " several possible explanations:\n\n" - " - The test case causes known crashes under normal working conditions. If\n" - " so, please remove it. The fuzzer should be seeded with interesting\n" + " - The test case causes known crashes under normal working " + "conditions. If\n" + " so, please remove it. The fuzzer should be seeded with " + "interesting\n" " inputs - but not ones that cause an outright crash.\n\n" MSG_FORK_ON_APPLE - " - Least likely, there is a horrible bug in the fuzzer. If other options\n" - " fail, poke for troubleshooting tips.\n"); + " - Least likely, there is a horrible bug in the fuzzer. If " + "other options\n" + " fail, poke for " + "troubleshooting tips.\n"); } + #undef MSG_ULIMIT_USAGE #undef MSG_FORK_ON_APPLE @@ -473,11 +513,9 @@ void perform_dry_run(char** argv) { FATAL("Unable to execute target application ('%s')", argv[0]); - case FAULT_NOINST: + case FAULT_NOINST: FATAL("No instrumentation detected"); - FATAL("No instrumentation detected"); - - case FAULT_NOBITS: + case FAULT_NOBITS: ++useless_at_start; @@ -513,7 +551,6 @@ void perform_dry_run(char** argv) { } - /* Helper function: link() if possible, copy otherwise. */ static void link_or_copy(u8* old_path, u8* new_path) { @@ -532,7 +569,7 @@ static void link_or_copy(u8* old_path, u8* new_path) { tmp = ck_alloc(64 * 1024); - while ((i = read(sfd, tmp, 64 * 1024)) > 0) + while ((i = read(sfd, tmp, 64 * 1024)) > 0) ck_write(dfd, tmp, i, new_path); if (i < 0) PFATAL("read() failed"); @@ -543,23 +580,25 @@ static void link_or_copy(u8* old_path, u8* new_path) { } - /* Create hard links for input test cases in the output directory, choosing good names and pivoting accordingly. */ void pivot_inputs(void) { struct queue_entry* q = queue; - u32 id = 0; + u32 id = 0; ACTF("Creating hard links for all input files..."); while (q) { - u8 *nfn, *rsl = strrchr(q->fname, '/'); + u8 *nfn, *rsl = strrchr(q->fname, '/'); u32 orig_id; - if (!rsl) rsl = q->fname; else ++rsl; + if (!rsl) + rsl = q->fname; + else + ++rsl; /* If the original file name conforms to the syntax and the recorded ID matches the one we'd assign, just use the original file name. @@ -582,7 +621,8 @@ void pivot_inputs(void) { if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) { struct queue_entry* s = queue; - while (src_id-- && s) s = s->next; + while (src_id-- && s) + s = s->next; if (s) q->depth = s->depth + 1; if (max_depth < q->depth) max_depth = q->depth; @@ -598,7 +638,10 @@ void pivot_inputs(void) { u8* use_name = strstr(rsl, ",orig:"); - if (use_name) use_name += 6; else use_name = rsl; + if (use_name) + use_name += 6; + else + use_name = rsl; nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name); #else @@ -628,29 +671,31 @@ void pivot_inputs(void) { } - /* When resuming, try to find the queue position to start from. This makes sense only when resuming, and when we can find the original fuzzer_stats. */ u32 find_start_position(void) { - static u8 tmp[4096]; /* Ought to be enough for anybody. */ + static u8 tmp[4096]; /* Ought to be enough for anybody. */ - u8 *fn, *off; + u8 *fn, *off; s32 fd, i; u32 ret; if (!resuming_fuzz) return 0; - if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); - else fn = alloc_printf("%s/../fuzzer_stats", in_dir); + if (in_place_resume) + fn = alloc_printf("%s/fuzzer_stats", out_dir); + else + fn = alloc_printf("%s/../fuzzer_stats", in_dir); fd = open(fn, O_RDONLY); ck_free(fn); if (fd < 0) return 0; - i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ + i = read(fd, tmp, sizeof(tmp) - 1); + (void)i; /* Ignore errors */ close(fd); off = strstr(tmp, "cur_path : "); @@ -662,30 +707,32 @@ u32 find_start_position(void) { } - /* The same, but for timeouts. The idea is that when resuming sessions without -t given, we don't want to keep auto-scaling the timeout over and over again to prevent it from growing due to random flukes. */ void find_timeout(void) { - static u8 tmp[4096]; /* Ought to be enough for anybody. */ + static u8 tmp[4096]; /* Ought to be enough for anybody. */ - u8 *fn, *off; + u8 *fn, *off; s32 fd, i; u32 ret; if (!resuming_fuzz) return; - if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); - else fn = alloc_printf("%s/../fuzzer_stats", in_dir); + if (in_place_resume) + fn = alloc_printf("%s/fuzzer_stats", out_dir); + else + fn = alloc_printf("%s/../fuzzer_stats", in_dir); fd = open(fn, O_RDONLY); ck_free(fn); if (fd < 0) return; - i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ + i = read(fd, tmp, sizeof(tmp) - 1); + (void)i; /* Ignore errors */ close(fd); off = strstr(tmp, "exec_timeout : "); @@ -699,14 +746,12 @@ void find_timeout(void) { } - - /* A helper function for maybe_delete_out_dir(), deleting all prefixed files in a directory. */ static u8 delete_files(u8* path, u8* prefix) { - DIR* d; + DIR* d; struct dirent* d_ent; d = opendir(path); @@ -715,8 +760,8 @@ static u8 delete_files(u8* path, u8* prefix) { while ((d_ent = readdir(d))) { - if (d_ent->d_name[0] != '.' && (!prefix || - !strncmp(d_ent->d_name, prefix, strlen(prefix)))) { + if (d_ent->d_name[0] != '.' && + (!prefix || !strncmp(d_ent->d_name, prefix, strlen(prefix)))) { u8* fname = alloc_printf("%s/%s", path, d_ent->d_name); if (unlink(fname)) PFATAL("Unable to delete '%s'", fname); @@ -732,14 +777,13 @@ static u8 delete_files(u8* path, u8* prefix) { } - /* Get the number of runnable processes, with some simple smoothing. */ double get_runnable_processes(void) { static double res; -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) /* I don't see any portable sysctl or so that would quickly give us the number of runnable processes; the 1-minute load average can be a @@ -762,10 +806,11 @@ double get_runnable_processes(void) { while (fgets(tmp, sizeof(tmp), f)) { if (!strncmp(tmp, "procs_running ", 14) || - !strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14); + !strncmp(tmp, "procs_blocked ", 14)) + val += atoi(tmp + 14); } - + fclose(f); if (!res) { @@ -785,7 +830,6 @@ double get_runnable_processes(void) { } - /* Delete the temporary directory used for in-place session resume. */ void nuke_resume_dir(void) { @@ -824,14 +868,13 @@ dir_cleanup_failed: } - /* Delete fuzzer output directory if we recognize it as ours, if the fuzzer is not currently running, and if the last run time isn't too great. */ void maybe_delete_out_dir(void) { FILE* f; - u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir); + u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); /* See if the output directory is locked. If yes, bail out. If not, create a lock that will persist for the lifetime of the process @@ -845,7 +888,8 @@ void maybe_delete_out_dir(void) { if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) { SAYF("\n" cLRD "[-] " cRST - "Looks like the job output directory is being actively used by another\n" + "Looks like the job output directory is being actively used by " + "another\n" " instance of afl-fuzz. You will need to choose a different %s\n" " or stop the other process first.\n", sync_id ? "fuzzer ID" : "output location"); @@ -862,8 +906,10 @@ void maybe_delete_out_dir(void) { u64 start_time2, last_update; - if (fscanf(f, "start_time : %llu\n" - "last_update : %llu\n", &start_time2, &last_update) != 2) + if (fscanf(f, + "start_time : %llu\n" + "last_update : %llu\n", + &start_time2, &last_update) != 2) FATAL("Malformed data in '%s'", fn); fclose(f); @@ -873,16 +919,22 @@ void maybe_delete_out_dir(void) { if (!in_place_resume && last_update - start_time2 > OUTPUT_GRACE * 60) { SAYF("\n" cLRD "[-] " cRST - "The job output directory already exists and contains the results of more\n" - " than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz will *NOT*\n" + "The job output directory already exists and contains the results " + "of more\n" + " than %d minutes worth of fuzzing. To avoid data loss, afl-fuzz " + "will *NOT*\n" " automatically delete this data for you.\n\n" - " If you wish to start a new session, remove or rename the directory manually,\n" - " or specify a different output location for this job. To resume the old\n" - " session, put '-' as the input directory in the command line ('-i -') and\n" - " try again.\n", OUTPUT_GRACE); + " If you wish to start a new session, remove or rename the " + "directory manually,\n" + " or specify a different output location for this job. To resume " + "the old\n" + " session, put '-' as the input directory in the command line " + "('-i -') and\n" + " try again.\n", + OUTPUT_GRACE); - FATAL("At-risk data found in '%s'", out_dir); + FATAL("At-risk data found in '%s'", out_dir); } @@ -902,7 +954,7 @@ void maybe_delete_out_dir(void) { in_dir = alloc_printf("%s/_resume", out_dir); - rename(orig_q, in_dir); /* Ignore errors */ + rename(orig_q, in_dir); /* Ignore errors */ OKF("Output directory exists, will attempt session resume."); @@ -961,7 +1013,7 @@ void maybe_delete_out_dir(void) { if (!in_place_resume) { fn = alloc_printf("%s/crashes/README.txt", out_dir); - unlink(fn); /* Ignore errors */ + unlink(fn); /* Ignore errors */ ck_free(fn); } @@ -973,7 +1025,7 @@ void maybe_delete_out_dir(void) { if (in_place_resume && rmdir(fn)) { - time_t cur_t = time(0); + time_t cur_t = time(0); struct tm* t = localtime(&cur_t); #ifndef SIMPLE_FILES @@ -984,13 +1036,13 @@ void maybe_delete_out_dir(void) { #else - u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, - t->tm_hour, t->tm_min, t->tm_sec); + u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900, + t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, + t->tm_sec); #endif /* ^!SIMPLE_FILES */ - rename(fn, nfn); /* Ignore errors. */ + rename(fn, nfn); /* Ignore errors. */ ck_free(nfn); } @@ -1004,7 +1056,7 @@ void maybe_delete_out_dir(void) { if (in_place_resume && rmdir(fn)) { - time_t cur_t = time(0); + time_t cur_t = time(0); struct tm* t = localtime(&cur_t); #ifndef SIMPLE_FILES @@ -1015,13 +1067,13 @@ void maybe_delete_out_dir(void) { #else - u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, - t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, - t->tm_hour, t->tm_min, t->tm_sec); + u8* nfn = alloc_printf("%s_%04d%02d%02d%02d%02d%02d", fn, t->tm_year + 1900, + t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, + t->tm_sec); #endif /* ^!SIMPLE_FILES */ - rename(fn, nfn); /* Ignore errors. */ + rename(fn, nfn); /* Ignore errors. */ ck_free(nfn); } @@ -1032,9 +1084,13 @@ void maybe_delete_out_dir(void) { /* And now, for some finishing touches. */ if (file_extension) { + fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); + } else { + fn = alloc_printf("%s/.cur_input", out_dir); + } if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; @@ -1045,9 +1101,11 @@ void maybe_delete_out_dir(void) { ck_free(fn); if (!in_place_resume) { - fn = alloc_printf("%s/fuzzer_stats", out_dir); + + fn = alloc_printf("%s/fuzzer_stats", out_dir); if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; ck_free(fn); + } fn = alloc_printf("%s/plot_data", out_dir); @@ -1067,19 +1125,22 @@ void maybe_delete_out_dir(void) { dir_cleanup_failed: SAYF("\n" cLRD "[-] " cRST - "Whoops, the fuzzer tried to reuse your output directory, but bumped into\n" - " some files that shouldn't be there or that couldn't be removed - so it\n" + "Whoops, the fuzzer tried to reuse your output directory, but bumped " + "into\n" + " some files that shouldn't be there or that couldn't be removed - " + "so it\n" " decided to abort! This happened while processing this path:\n\n" " %s\n\n" - " Please examine and manually delete the files, or specify a different\n" - " output location for the tool.\n", fn); + " Please examine and manually delete the files, or specify a " + "different\n" + " output location for the tool.\n", + fn); FATAL("Output directory cleanup failed"); } - /* Prepare output directories and fds. */ void setup_dirs_fds(void) { @@ -1090,7 +1151,7 @@ void setup_dirs_fds(void) { ACTF("Setting up output directories..."); if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST) - PFATAL("Unable to create '%s'", sync_dir); + PFATAL("Unable to create '%s'", sync_dir); if (mkdir(out_dir, 0700)) { @@ -1197,14 +1258,16 @@ void setup_dirs_fds(void) { plot_file = fdopen(fd, "w"); if (!plot_file) PFATAL("fdopen() failed"); - fprintf(plot_file, "# unix_time, cycles_done, cur_path, paths_total, " - "pending_total, pending_favs, map_size, unique_crashes, " - "unique_hangs, max_depth, execs_per_sec\n"); - /* ignore errors */ + fprintf(plot_file, + "# unix_time, cycles_done, cur_path, paths_total, " + "pending_total, pending_favs, map_size, unique_crashes, " + "unique_hangs, max_depth, execs_per_sec\n"); + /* ignore errors */ } void setup_cmdline_file(char** argv) { + u8* tmp; s32 fd; u32 i = 0; @@ -1221,13 +1284,15 @@ void setup_cmdline_file(char** argv) { if (!cmdline_file) PFATAL("fdopen() failed"); while (argv[i]) { + fprintf(cmdline_file, "%s\n", argv[i]); ++i; + } fclose(cmdline_file); -} +} /* Setup the output file for fuzzed data, if not using -f. */ @@ -1235,12 +1300,16 @@ void setup_stdio_file(void) { u8* fn; if (file_extension) { + fn = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); + } else { + fn = alloc_printf("%s/.cur_input", out_dir); + } - unlink(fn); /* Ignore errors */ + unlink(fn); /* Ignore errors */ out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600); @@ -1250,32 +1319,34 @@ void setup_stdio_file(void) { } - /* Make sure that core dumps don't go to a program. */ void check_crash_handling(void) { #ifdef __APPLE__ - /* Yuck! There appears to be no simple C API to query for the state of + /* Yuck! There appears to be no simple C API to query for the state of loaded daemons on MacOS X, and I'm a bit hesitant to do something more sophisticated, such as disabling crash reporting via Mach ports, until I get a box to test the code. So, for now, we check for crash reporting the awful way. */ - + if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return; - SAYF("\n" cLRD "[-] " cRST - "Whoops, your system is configured to forward crash notifications to an\n" - " external crash reporting utility. This will cause issues due to the\n" - " extended delay between the fuzzed binary malfunctioning and this fact\n" - " being relayed to the fuzzer via the standard waitpid() API.\n\n" - " To avoid having crashes misinterpreted as timeouts, please run the\n" - " following commands:\n\n" + SAYF( + "\n" cLRD "[-] " cRST + "Whoops, your system is configured to forward crash notifications to an\n" + " external crash reporting utility. This will cause issues due to " + "the\n" + " extended delay between the fuzzed binary malfunctioning and this " + "fact\n" + " being relayed to the fuzzer via the standard waitpid() API.\n\n" + " To avoid having crashes misinterpreted as timeouts, please run the\n" + " following commands:\n\n" - " SL=/System/Library; PL=com.apple.ReportCrash\n" - " launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n" - " sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n"); + " SL=/System/Library; PL=com.apple.ReportCrash\n" + " launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n" + " sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n"); if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) FATAL("Crash reporter detected"); @@ -1283,10 +1354,10 @@ void check_crash_handling(void) { #else /* This is Linux specific, but I don't think there's anything equivalent on - *BSD, so we can just let it slide for now. */ + *BSD, so we can just let it slide for now. */ s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY); - u8 fchar; + u8 fchar; if (fd < 0) return; @@ -1294,54 +1365,68 @@ void check_crash_handling(void) { if (read(fd, &fchar, 1) == 1 && fchar == '|') { - SAYF("\n" cLRD "[-] " cRST - "Hmm, your system is configured to send core dump notifications to an\n" - " external utility. This will cause issues: there will be an extended delay\n" - " between stumbling upon a crash and having this information relayed to the\n" - " fuzzer via the standard waitpid() API.\n\n" + SAYF( + "\n" cLRD "[-] " cRST + "Hmm, your system is configured to send core dump notifications to an\n" + " external utility. This will cause issues: there will be an " + "extended delay\n" + " between stumbling upon a crash and having this information " + "relayed to the\n" + " fuzzer via the standard waitpid() API.\n\n" - " To avoid having crashes misinterpreted as timeouts, please log in as root\n" - " and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n" + " To avoid having crashes misinterpreted as timeouts, please log in " + "as root\n" + " and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n" - " echo core >/proc/sys/kernel/core_pattern\n"); + " echo core >/proc/sys/kernel/core_pattern\n"); if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) FATAL("Pipe at the beginning of 'core_pattern'"); } - + close(fd); #endif /* ^__APPLE__ */ } - /* Check CPU governor. */ void check_cpu_governor(void) { + #ifdef __linux__ FILE* f; - u8 tmp[128]; - u64 min = 0, max = 0; + u8 tmp[128]; + u64 min = 0, max = 0; if (getenv("AFL_SKIP_CPUFREQ")) return; if (cpu_aff > 0) - snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff, "/cpufreq/scaling_governor"); + snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpu", cpu_aff, + "/cpufreq/scaling_governor"); else - snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"); + snprintf(tmp, sizeof(tmp), "%s", + "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"); f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r"); if (!f) { + if (cpu_aff > 0) - snprintf(tmp, sizeof(tmp), "%s%d%s", "/sys/devices/system/cpu/cpufreq/policy", cpu_aff, "/scaling_governor"); + snprintf(tmp, sizeof(tmp), "%s%d%s", + "/sys/devices/system/cpu/cpufreq/policy", cpu_aff, + "/scaling_governor"); else - snprintf(tmp, sizeof(tmp), "%s", "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor"); + snprintf(tmp, sizeof(tmp), "%s", + "/sys/devices/system/cpu/cpufreq/policy0/scaling_governor"); f = fopen(tmp, "r"); + } + if (!f) { + WARNF("Could not check CPU scaling governor"); return; + } ACTF("Checking CPU scaling governor..."); @@ -1355,71 +1440,79 @@ void check_cpu_governor(void) { f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r"); if (f) { + if (fscanf(f, "%llu", &min) != 1) min = 0; fclose(f); + } f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r"); if (f) { + if (fscanf(f, "%llu", &max) != 1) max = 0; fclose(f); + } if (min == max) return; SAYF("\n" cLRD "[-] " cRST "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n" - " between %llu and %llu MHz. Unfortunately, the scaling algorithm in the\n" - " kernel is imperfect and can miss the short-lived processes spawned by\n" + " between %llu and %llu MHz. Unfortunately, the scaling algorithm in " + "the\n" + " kernel is imperfect and can miss the short-lived processes spawned " + "by\n" " afl-fuzz. To keep things moving, run these commands as root:\n\n" " cd /sys/devices/system/cpu\n" " echo performance | tee cpu*/cpufreq/scaling_governor\n\n" - " You can later go back to the original state by replacing 'performance' with\n" - " 'ondemand'. If you don't want to change the settings, set AFL_SKIP_CPUFREQ\n" - " to make afl-fuzz skip this check - but expect some performance drop.\n", + " You can later go back to the original state by replacing " + "'performance' with\n" + " 'ondemand'. If you don't want to change the settings, set " + "AFL_SKIP_CPUFREQ\n" + " to make afl-fuzz skip this check - but expect some performance " + "drop.\n", min / 1024, max / 1024); FATAL("Suboptimal CPU scaling governor"); #endif -} +} /* Count the number of logical CPU cores. */ void get_core_count(void) { -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) size_t s = sizeof(cpu_core_count); /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */ -#ifdef __APPLE__ +# ifdef __APPLE__ - if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) - return; + if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) return; -#else +# else - int s_name[2] = { CTL_HW, HW_NCPU }; + int s_name[2] = {CTL_HW, HW_NCPU}; if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return; -#endif /* ^__APPLE__ */ +# endif /* ^__APPLE__ */ #else -#ifdef HAVE_AFFINITY +# ifdef HAVE_AFFINITY cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN); -#else +# else FILE* f = fopen("/proc/stat", "r"); - u8 tmp[1024]; + u8 tmp[1024]; if (!f) return; @@ -1428,7 +1521,7 @@ void get_core_count(void) { fclose(f); -#endif /* ^HAVE_AFFINITY */ +# endif /* ^HAVE_AFFINITY */ #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ @@ -1438,7 +1531,7 @@ void get_core_count(void) { cur_runnable = (u32)get_runnable_processes(); -#if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) +#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) /* Add ourselves, since the 1-minute average doesn't include that yet. */ @@ -1447,8 +1540,8 @@ void get_core_count(void) { #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ OKF("You have %d CPU core%s and %u runnable tasks (utilization: %0.0f%%).", - cpu_core_count, cpu_core_count > 1 ? "s" : "", - cur_runnable, cur_runnable * 100.0 / cpu_core_count); + cpu_core_count, cpu_core_count > 1 ? "s" : "", cur_runnable, + cur_runnable * 100.0 / cpu_core_count); if (cpu_core_count > 1) { @@ -1459,7 +1552,7 @@ void get_core_count(void) { } else if (cur_runnable + 1 <= cpu_core_count) { OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path); - + } } @@ -1473,21 +1566,18 @@ void get_core_count(void) { } - /* Validate and fix up out_dir and sync_dir when using -S. */ void fix_up_sync(void) { u8* x = sync_id; - if (dumb_mode) - FATAL("-S / -M and -n are mutually exclusive"); + if (dumb_mode) FATAL("-S / -M and -n are mutually exclusive"); if (skip_deterministic) { - if (force_deterministic) - FATAL("use -S instead of -M -d"); - //else + if (force_deterministic) FATAL("use -S instead of -M -d"); + // else // FATAL("-S already implies -d"); } @@ -1506,26 +1596,29 @@ void fix_up_sync(void) { x = alloc_printf("%s/%s", out_dir, sync_id); sync_dir = out_dir; - out_dir = x; + out_dir = x; if (!force_deterministic) { + skip_deterministic = 1; use_splicing = 1; + } } - /* Handle screen resize (SIGWINCH). */ static void handle_resize(int sig) { - clear_screen = 1; -} + clear_screen = 1; + +} /* Check ASAN options. */ void check_asan_opts(void) { + u8* x = getenv("ASAN_OPTIONS"); if (x) { @@ -1543,29 +1636,27 @@ void check_asan_opts(void) { if (x) { if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR))) - FATAL("Custom MSAN_OPTIONS set without exit_code=" - STRINGIFY(MSAN_ERROR) " - please fix!"); + FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY( + MSAN_ERROR) " - please fix!"); if (!strstr(x, "symbolize=0")) FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!"); } -} - +} /* Handle stop signal (Ctrl-C, etc). */ static void handle_stop_sig(int sig) { - stop_soon = 1; + stop_soon = 1; if (child_pid > 0) kill(child_pid, SIGKILL); if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL); } - /* Handle skip request (SIGUSR1). */ static void handle_skipreq(int sig) { @@ -1574,14 +1665,13 @@ static void handle_skipreq(int sig) { } - /* Do a PATH search and find target binary to see that it exists and isn't a shell script - a common and painful mistake. We also check for a valid ELF header and for evidence of AFL instrumentation. */ void check_binary(u8* fname) { - u8* env_path = 0; + u8* env_path = 0; struct stat st; s32 fd; @@ -1609,7 +1699,9 @@ void check_binary(u8* fname) { memcpy(cur_elem, env_path, delim - env_path); ++delim; - } else cur_elem = ck_strdup(env_path); + } else + + cur_elem = ck_strdup(env_path); env_path = delim; @@ -1621,7 +1713,8 @@ void check_binary(u8* fname) { ck_free(cur_elem); if (!stat(target_path, &st) && S_ISREG(st.st_mode) && - (st.st_mode & 0111) && (f_len = st.st_size) >= 4) break; + (st.st_mode & 0111) && (f_len = st.st_size) >= 4) + break; ck_free(target_path); target_path = 0; @@ -1638,7 +1731,7 @@ void check_binary(u8* fname) { if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) || (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/'))) - FATAL("Please don't keep binaries in /tmp or /var/tmp"); + FATAL("Please don't keep binaries in /tmp or /var/tmp"); fd = open(target_path, O_RDONLY); @@ -1653,13 +1746,19 @@ void check_binary(u8* fname) { if (f_data[0] == '#' && f_data[1] == '!') { SAYF("\n" cLRD "[-] " cRST - "Oops, the target binary looks like a shell script. Some build systems will\n" - " sometimes generate shell stubs for dynamically linked programs; try static\n" - " library mode (./configure --disable-shared) if that's the case.\n\n" + "Oops, the target binary looks like a shell script. Some build " + "systems will\n" + " sometimes generate shell stubs for dynamically linked programs; " + "try static\n" + " library mode (./configure --disable-shared) if that's the " + "case.\n\n" - " Another possible cause is that you are actually trying to use a shell\n" - " wrapper around the fuzzed component. Invoking shell can slow down the\n" - " fuzzing process by a factor of 20x or more; it's best to write the wrapper\n" + " Another possible cause is that you are actually trying to use a " + "shell\n" + " wrapper around the fuzzed component. Invoking shell can slow " + "down the\n" + " fuzzing process by a factor of 20x or more; it's best to write " + "the wrapper\n" " in a compiled language instead.\n"); FATAL("Program '%s' is a shell script", target_path); @@ -1673,28 +1772,35 @@ void check_binary(u8* fname) { #else -#if !defined(__arm__) && !defined(__arm64__) +# if !defined(__arm__) && !defined(__arm64__) if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED) FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path); -#endif +# endif #endif /* ^!__APPLE__ */ if (!qemu_mode && !unicorn_mode && !dumb_mode && !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { - SAYF("\n" cLRD "[-] " cRST - "Looks like the target binary is not instrumented! The fuzzer depends on\n" - " compile-time instrumentation to isolate interesting test cases while\n" - " mutating the input data. For more information, and for tips on how to\n" - " instrument binaries, please see %s/README.\n\n" + SAYF( + "\n" cLRD "[-] " cRST + "Looks like the target binary is not instrumented! The fuzzer depends " + "on\n" + " compile-time instrumentation to isolate interesting test cases " + "while\n" + " mutating the input data. For more information, and for tips on " + "how to\n" + " instrument binaries, please see %s/README.\n\n" - " When source code is not available, you may be able to leverage QEMU\n" - " mode support. Consult the README for tips on how to enable this.\n" + " When source code is not available, you may be able to leverage " + "QEMU\n" + " mode support. Consult the README for tips on how to enable this.\n" - " (It is also possible to use afl-fuzz as a traditional, \"dumb\" fuzzer.\n" - " For that, you can use the -n option - but expect much worse results.)\n", - doc_path); + " (It is also possible to use afl-fuzz as a traditional, \"dumb\" " + "fuzzer.\n" + " For that, you can use the -n option - but expect much worse " + "results.)\n", + doc_path); FATAL("No instrumentation detected"); @@ -1704,8 +1810,10 @@ void check_binary(u8* fname) { memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { SAYF("\n" cLRD "[-] " cRST - "This program appears to be instrumented with afl-gcc, but is being run in\n" - " QEMU or Unicorn mode (-Q or -U). This is probably not what you want -\n" + "This program appears to be instrumented with afl-gcc, but is being " + "run in\n" + " QEMU or Unicorn mode (-Q or -U). This is probably not what you " + "want -\n" " this setup will be slow and offer no practical benefits.\n"); FATAL("Instrumentation found in -Q or -U mode"); @@ -1713,7 +1821,8 @@ void check_binary(u8* fname) { } if (memmem(f_data, f_len, "libasan.so", 10) || - memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1; + memmem(f_data, f_len, "__msan_init", 11)) + uses_asan = 1; /* Detect persistent & deferred init signatures in the binary. */ @@ -1745,7 +1854,6 @@ void check_binary(u8* fname) { } - /* Trim and possibly create a banner for the run. */ void fix_up_banner(u8* name) { @@ -1759,7 +1867,10 @@ void fix_up_banner(u8* name) { } else { u8* trim = strrchr(name, '/'); - if (!trim) use_banner = name; else use_banner = trim + 1; + if (!trim) + use_banner = name; + else + use_banner = trim + 1; } @@ -1775,7 +1886,6 @@ void fix_up_banner(u8* name) { } - /* Check if we're on TTY. */ void check_if_tty(void) { @@ -1783,24 +1893,29 @@ void check_if_tty(void) { struct winsize ws; if (getenv("AFL_NO_UI")) { + OKF("Disabling the UI because AFL_NO_UI is set."); not_on_tty = 1; return; + } if (ioctl(1, TIOCGWINSZ, &ws)) { if (errno == ENOTTY) { - OKF("Looks like we're not running on a tty, so I'll be a bit less verbose."); + + OKF("Looks like we're not running on a tty, so I'll be a bit less " + "verbose."); not_on_tty = 1; + } return; + } } - /* Set up signal handlers. More complicated that needs to be, because libc on Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call siginterrupt(), and does other stupid things. */ @@ -1809,8 +1924,8 @@ void setup_signal_handlers(void) { struct sigaction sa; - sa.sa_handler = NULL; - sa.sa_flags = SA_RESTART; + sa.sa_handler = NULL; + sa.sa_flags = SA_RESTART; sa.sa_sigaction = NULL; sigemptyset(&sa.sa_mask); @@ -1845,13 +1960,12 @@ void setup_signal_handlers(void) { } - /* Rewrite argv for QEMU. */ char** get_qemu_argv(u8* own_loc, char** argv, int argc) { char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); - u8 *tmp, *cp, *rsl, *own_copy; + u8 * tmp, *cp, *rsl, *own_copy; memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); @@ -1866,8 +1980,7 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) { cp = alloc_printf("%s/afl-qemu-trace", tmp); - if (access(cp, X_OK)) - FATAL("Unable to find '%s'", tmp); + if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp); target_path = new_argv[0] = cp; return new_argv; @@ -1891,7 +2004,9 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } - } else ck_free(own_copy); + } else + + ck_free(own_copy); if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { @@ -1901,14 +2016,20 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } SAYF("\n" cLRD "[-] " cRST - "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be built\n" - " separately by following the instructions in qemu_mode/README.qemu. If you\n" - " already have the binary installed, you may need to specify AFL_PATH in the\n" + "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be " + "built\n" + " separately by following the instructions in qemu_mode/README.qemu. " + "If you\n" + " already have the binary installed, you may need to specify " + "AFL_PATH in the\n" " environment.\n\n" - " Of course, even without QEMU, afl-fuzz can still work with binaries that are\n" - " instrumented at compile time with afl-gcc. It is also possible to use it as a\n" - " traditional \"dumb\" fuzzer by specifying '-n' in the command line.\n"); + " Of course, even without QEMU, afl-fuzz can still work with " + "binaries that are\n" + " instrumented at compile time with afl-gcc. It is also possible to " + "use it as a\n" + " traditional \"dumb\" fuzzer by specifying '-n' in the command " + "line.\n"); FATAL("Failed to locate 'afl-qemu-trace'."); @@ -1923,7 +2044,7 @@ void save_cmdline(u32 argc, char** argv) { for (i = 0; i < argc; ++i) len += strlen(argv[i]) + 1; - + buf = orig_cmdline = ck_alloc(len); for (i = 0; i < argc; ++i) { diff --git a/src/afl-fuzz-misc.c b/src/afl-fuzz-misc.c index 69ff2f6b..eb0cc187 100644 --- a/src/afl-fuzz-misc.c +++ b/src/afl-fuzz-misc.c @@ -33,11 +33,16 @@ u8* DI(u64 val) { cur = (cur + 1) % 12; -#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \ - if (val < (_divisor) * (_limit_mult)) { \ +#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) \ + do { \ + \ + if (val < (_divisor) * (_limit_mult)) { \ + \ sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \ - return tmp[cur]; \ - } \ + return tmp[cur]; \ + \ + } \ + \ } while (0) /* 0-9999 */ @@ -79,8 +84,7 @@ u8* DI(u64 val) { } - -/* Describe float. Similar to the above, except with a single +/* Describe float. Similar to the above, except with a single static buffer. */ u8* DF(double val) { @@ -88,20 +92,23 @@ u8* DF(double val) { static u8 tmp[16]; if (val < 99.995) { + sprintf(tmp, "%0.02f", val); return tmp; + } if (val < 999.95) { + sprintf(tmp, "%0.01f", val); return tmp; + } return DI((u64)val); } - /* Describe integer as memory size. */ u8* DMS(u64 val) { @@ -152,14 +159,13 @@ u8* DMS(u64 val) { } - /* Describe time delta. Returns one static buffer, 34 chars of less. */ u8* DTD(u64 cur_ms, u64 event_ms) { static u8 tmp[64]; - u64 delta; - s32 t_d, t_h, t_m, t_s; + u64 delta; + s32 t_d, t_h, t_m, t_s; if (!event_ms) return "none seen yet"; @@ -174,3 +180,4 @@ u8* DTD(u64 cur_ms, u64 event_ms) { return tmp; } + diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 59370c3d..1b7abedd 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -28,22 +28,31 @@ int select_algorithm(void) { int i_puppet, j_puppet; - double sele = ((double)(UR(10000))*0.0001); + double sele = ((double)(UR(10000)) * 0.0001); j_puppet = 0; for (i_puppet = 0; i_puppet < operator_num; ++i_puppet) { - if (unlikely(i_puppet == 0)) { - if (sele < probability_now[swarm_now][i_puppet]) - break; - } else { - if (sele < probability_now[swarm_now][i_puppet]) { - j_puppet =1; - break; - } + + if (unlikely(i_puppet == 0)) { + + if (sele < probability_now[swarm_now][i_puppet]) break; + + } else { + + if (sele < probability_now[swarm_now][i_puppet]) { + + j_puppet = 1; + break; + } + + } + } - if (j_puppet ==1 && sele < probability_now[swarm_now][i_puppet-1]) + + if (j_puppet == 1 && sele < probability_now[swarm_now][i_puppet - 1]) FATAL("error select_algorithm"); return i_puppet; + } /* Helper to choose random block len for block operations in fuzz_one(). @@ -58,27 +67,29 @@ static u32 choose_block_len(u32 limit) { switch (UR(rlim)) { - case 0: min_value = 1; - max_value = HAVOC_BLK_SMALL; - break; + case 0: + min_value = 1; + max_value = HAVOC_BLK_SMALL; + break; - case 1: min_value = HAVOC_BLK_SMALL; - max_value = HAVOC_BLK_MEDIUM; - break; + case 1: + min_value = HAVOC_BLK_SMALL; + max_value = HAVOC_BLK_MEDIUM; + break; - default: + default: - if (UR(10)) { + if (UR(10)) { - min_value = HAVOC_BLK_MEDIUM; - max_value = HAVOC_BLK_LARGE; + min_value = HAVOC_BLK_MEDIUM; + max_value = HAVOC_BLK_LARGE; - } else { + } else { - min_value = HAVOC_BLK_LARGE; - max_value = HAVOC_BLK_XL; + min_value = HAVOC_BLK_LARGE; + max_value = HAVOC_BLK_XL; - } + } } @@ -88,7 +99,6 @@ static u32 choose_block_len(u32 limit) { } - /* Helper function to see if a particular change (xor_val = old ^ new) could be a product of deterministic bit flips with the lengths and stepovers attempted by afl-fuzz. This is used to avoid dupes in some of the @@ -104,7 +114,12 @@ static u8 could_be_bitflip(u32 xor_val) { /* Shift left until first bit set. */ - while (!(xor_val & 1)) { ++sh; xor_val >>= 1; } + while (!(xor_val & 1)) { + + ++sh; + xor_val >>= 1; + + } /* 1-, 2-, and 4-bit patterns are OK anywhere. */ @@ -115,14 +130,12 @@ static u8 could_be_bitflip(u32 xor_val) { if (sh & 7) return 0; - if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) - return 1; + if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) return 1; return 0; } - /* Helper function to see if a particular value is reachable through arithmetic operations. Used for similar purposes. */ @@ -136,10 +149,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { for (i = 0; i < blen; ++i) { - u8 a = old_val >> (8 * i), - b = new_val >> (8 * i); + u8 a = old_val >> (8 * i), b = new_val >> (8 * i); - if (a != b) { ++diffs; ov = a; nv = b; } + if (a != b) { + + ++diffs; + ov = a; + nv = b; + + } } @@ -147,8 +165,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { if (diffs == 1) { - if ((u8)(ov - nv) <= ARITH_MAX || - (u8)(nv - ov) <= ARITH_MAX) return 1; + if ((u8)(ov - nv) <= ARITH_MAX || (u8)(nv - ov) <= ARITH_MAX) return 1; } @@ -160,10 +177,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { for (i = 0; i < blen / 2; ++i) { - u16 a = old_val >> (16 * i), - b = new_val >> (16 * i); + u16 a = old_val >> (16 * i), b = new_val >> (16 * i); - if (a != b) { ++diffs; ov = a; nv = b; } + if (a != b) { + + ++diffs; + ov = a; + nv = b; + + } } @@ -171,13 +193,12 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { if (diffs == 1) { - if ((u16)(ov - nv) <= ARITH_MAX || - (u16)(nv - ov) <= ARITH_MAX) return 1; + if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1; - ov = SWAP16(ov); nv = SWAP16(nv); + ov = SWAP16(ov); + nv = SWAP16(nv); - if ((u16)(ov - nv) <= ARITH_MAX || - (u16)(nv - ov) <= ARITH_MAX) return 1; + if ((u16)(ov - nv) <= ARITH_MAX || (u16)(nv - ov) <= ARITH_MAX) return 1; } @@ -186,13 +207,15 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { if (blen == 4) { if ((u32)(old_val - new_val) <= ARITH_MAX || - (u32)(new_val - old_val) <= ARITH_MAX) return 1; + (u32)(new_val - old_val) <= ARITH_MAX) + return 1; new_val = SWAP32(new_val); old_val = SWAP32(old_val); if ((u32)(old_val - new_val) <= ARITH_MAX || - (u32)(new_val - old_val) <= ARITH_MAX) return 1; + (u32)(new_val - old_val) <= ARITH_MAX) + return 1; } @@ -200,8 +223,7 @@ static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { } - -/* Last but not least, a similar helper to see if insertion of an +/* Last but not least, a similar helper to see if insertion of an interesting integer is redundant given the insertions done for shorter blen. The last param (check_le) is set if the caller already executed LE insertion for current blen and wants to see @@ -220,8 +242,8 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { for (j = 0; j < sizeof(interesting_8); ++j) { - u32 tval = (old_val & ~(0xff << (i * 8))) | - (((u8)interesting_8[j]) << (i * 8)); + u32 tval = + (old_val & ~(0xff << (i * 8))) | (((u8)interesting_8[j]) << (i * 8)); if (new_val == tval) return 1; @@ -274,11 +296,10 @@ static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { } - #ifndef IGNORE_FINDS -/* Helper function to compare buffers; returns first and last differing offset. We - use this to find reasonable locations for splicing two files. */ +/* Helper function to compare buffers; returns first and last differing offset. + We use this to find reasonable locations for splicing two files. */ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { @@ -313,11 +334,11 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { u8 fuzz_one_original(char** argv) { s32 len, fd, temp_len, i, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt; u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - u8 ret_val = 1, doing_det = 0; + u8 ret_val = 1, doing_det = 0; u8 a_collect[MAX_AUTO_EXTRA]; u32 a_len = 0; @@ -337,8 +358,10 @@ u8 fuzz_one_original(char** argv) { possibly skip to them at the expense of already-fuzzed or non-favored cases. */ - if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; + if (((queue_cur->was_fuzzed > 0 || queue_cur->fuzz_level > 0) || + !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) + return 1; } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { @@ -346,7 +369,8 @@ u8 fuzz_one_original(char** argv) { The odds of skipping stuff are higher for already-fuzzed inputs and lower for never-fuzzed entries. */ - if (queue_cycle > 1 && (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) { + if (queue_cycle > 1 && + (queue_cur->fuzz_level == 0 || queue_cur->was_fuzzed)) { if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; @@ -361,9 +385,11 @@ u8 fuzz_one_original(char** argv) { #endif /* ^IGNORE_FINDS */ if (not_on_tty) { + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", current_entry, queued_paths, unique_crashes); fflush(stdout); + } /* Map the test case into memory. */ @@ -376,7 +402,8 @@ u8 fuzz_one_original(char** argv) { orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len); + if (orig_in == MAP_FAILED) + PFATAL("Unable to mmap '%s' with len %d", queue_cur->fname, len); close(fd); @@ -402,14 +429,15 @@ u8 fuzz_one_original(char** argv) { res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); } if (stop_soon || res != crash_mode) { + ++cur_skipped_paths; goto abandon_entry; + } } @@ -422,12 +450,13 @@ u8 fuzz_one_original(char** argv) { u8 res = trim_case(argv, queue_cur, in_buf); - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); if (stop_soon) { + ++cur_skipped_paths; goto abandon_entry; + } /* Don't retry trimming, even if it failed. */ @@ -449,49 +478,56 @@ u8 fuzz_one_original(char** argv) { if (perf_score == 0) goto abandon_entry; if (custom_mutator) { + stage_short = "custom"; stage_name = "custom mutator"; stage_max = len << 3; stage_val_type = STAGE_VAL_NONE; - const u32 max_seed_size = 4096*4096; - u8* mutated_buf = ck_alloc(max_seed_size); + const u32 max_seed_size = 4096 * 4096; + u8* mutated_buf = ck_alloc(max_seed_size); orig_hit_cnt = queued_paths + unique_crashes; - for (stage_cur = 0 ; stage_cur < stage_max ; ++stage_cur) { - size_t orig_size = (size_t) len; - size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, max_seed_size, UR(UINT32_MAX)); + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + + size_t orig_size = (size_t)len; + size_t mutated_size = custom_mutator(out_buf, orig_size, mutated_buf, + max_seed_size, UR(UINT32_MAX)); if (mutated_size > 0) { + out_buf = ck_realloc(out_buf, mutated_size); memcpy(out_buf, mutated_buf, mutated_size); - if (common_fuzz_stuff(argv, out_buf, (u32) mutated_size)) { + if (common_fuzz_stuff(argv, out_buf, (u32)mutated_size)) { + goto abandon_entry; + } + } + } ck_free(mutated_buf); new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_CUSTOM_MUTATOR] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_CUSTOM_MUTATOR] += stage_max; goto abandon_entry; - } + } /* Skip right away if -d is given, if it has not been chosen sufficiently often to warrant the expensive deterministic stage (fuzz_level), or if it has gone through deterministic testing in earlier, resumed runs (passed_det). */ - if (skip_deterministic - || ((!queue_cur->passed_det) - && perf_score < ( - queue_cur->depth * 30 <= havoc_max_mult * 100 - ? queue_cur->depth * 30 - : havoc_max_mult * 100)) - || queue_cur->passed_det) + if (skip_deterministic || + ((!queue_cur->passed_det) && + perf_score < (queue_cur->depth * 30 <= havoc_max_mult * 100 + ? queue_cur->depth * 30 + : havoc_max_mult * 100)) || + queue_cur->passed_det) #ifdef USE_PYTHON goto python_stage; #else @@ -514,17 +550,20 @@ u8 fuzz_one_original(char** argv) { * SIMPLE BITFLIP (+dictionary construction) * *********************************************/ -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ + \ } while (0) /* Single walking bit. */ stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; stage_val_type = STAGE_VAL_NONE; @@ -556,7 +595,7 @@ u8 fuzz_one_original(char** argv) { We do this here, rather than as a separate stage, because it's a nice way to keep the operation approximately "free" (i.e., no extra execs). - + Empirically, performing the check when flipping the least significant bit is advantageous, compared to doing it at the time of more disruptive changes, where the program flow may be affected in more violent ways. @@ -602,7 +641,7 @@ u8 fuzz_one_original(char** argv) { if (cksum != queue_cur->exec_cksum) { - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; ++a_len; } @@ -613,14 +652,14 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP1] += stage_max; /* Two walking bits. */ - stage_name = "bitflip 2/1"; + stage_name = "bitflip 2/1"; stage_short = "flip2"; - stage_max = (len << 3) - 1; + stage_max = (len << 3) - 1; orig_hit_cnt = new_hit_cnt; @@ -640,14 +679,14 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP2] += stage_max; /* Four walking bits. */ - stage_name = "bitflip 4/1"; + stage_name = "bitflip 4/1"; stage_short = "flip4"; - stage_max = (len << 3) - 3; + stage_max = (len << 3) - 3; orig_hit_cnt = new_hit_cnt; @@ -671,7 +710,7 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP4] += stage_max; /* Effector map setup. These macros calculate: @@ -682,27 +721,29 @@ u8 fuzz_one_original(char** argv) { */ -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1) /* Initialize effector map for the next step (see comments below). Always flag first and last byte as doing something. */ - eff_map = ck_alloc(EFF_ALEN(len)); + eff_map = ck_alloc(EFF_ALEN(len)); eff_map[0] = 1; if (EFF_APOS(len - 1) != 0) { + eff_map[EFF_APOS(len - 1)] = 1; ++eff_cnt; + } /* Walking byte. */ - stage_name = "bitflip 8/8"; + stage_name = "bitflip 8/8"; stage_short = "flip8"; - stage_max = len; + stage_max = len; orig_hit_cnt = new_hit_cnt; @@ -732,8 +773,10 @@ u8 fuzz_one_original(char** argv) { cksum = ~queue_cur->exec_cksum; if (cksum != queue_cur->exec_cksum) { + eff_map[EFF_APOS(stage_cur)] = 1; ++eff_cnt; + } } @@ -763,17 +806,17 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP8] += stage_max; /* Two walking bytes. */ if (len < 2) goto skip_bitflip; - stage_name = "bitflip 16/8"; + stage_name = "bitflip 16/8"; stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; + stage_cur = 0; + stage_max = len - 1; orig_hit_cnt = new_hit_cnt; @@ -782,8 +825,10 @@ u8 fuzz_one_original(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + --stage_max; continue; + } stage_cur_byte = i; @@ -795,22 +840,21 @@ u8 fuzz_one_original(char** argv) { *(u16*)(out_buf + i) ^= 0xFFFF; - } new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP16] += stage_max; if (len < 4) goto skip_bitflip; /* Four walking bytes. */ - stage_name = "bitflip 32/8"; + stage_name = "bitflip 32/8"; stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; + stage_cur = 0; + stage_max = len - 3; orig_hit_cnt = new_hit_cnt; @@ -819,8 +863,10 @@ u8 fuzz_one_original(char** argv) { /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + --stage_max; continue; + } stage_cur_byte = i; @@ -836,7 +882,7 @@ u8 fuzz_one_original(char** argv) { new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_FLIP32] += stage_max; skip_bitflip: @@ -849,10 +895,10 @@ skip_bitflip: /* 8-bit arithmetics. */ - stage_name = "arith 8/8"; + stage_name = "arith 8/8"; stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; stage_val_type = STAGE_VAL_LE; @@ -865,8 +911,10 @@ skip_bitflip: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)]) { + stage_max -= 2 * ARITH_MAX; continue; + } stage_cur_byte = i; @@ -886,9 +934,11 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else - r = orig ^ (orig - j); + --stage_max; + + r = orig ^ (orig - j); if (!could_be_bitflip(r)) { @@ -898,7 +948,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; out_buf[i] = orig; @@ -908,17 +960,17 @@ skip_bitflip: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ARITH8] += stage_max; /* 16-bit arithmetics, both endians. */ if (len < 2) goto skip_arith; - stage_name = "arith 16/8"; + stage_name = "arith 16/8"; stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; orig_hit_cnt = new_hit_cnt; @@ -929,25 +981,26 @@ skip_bitflip: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= 4 * ARITH_MAX; continue; + } stage_cur_byte = i; for (j = 1; j <= ARITH_MAX; ++j) { - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), + u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), r3 = orig ^ SWAP16(SWAP16(orig) + j), r4 = orig ^ SWAP16(SWAP16(orig) - j); /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the + if the operation would affect more than one byte (hence the & 0xff overflow checks) and if it couldn't be a product of a bitflip. */ - stage_val_type = STAGE_VAL_LE; + stage_val_type = STAGE_VAL_LE; if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { @@ -956,8 +1009,10 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - - } else --stage_max; + + } else + + --stage_max; if ((orig & 0xff) < j && !could_be_bitflip(r2)) { @@ -967,13 +1022,14 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; /* Big endian comes next. Same deal. */ stage_val_type = STAGE_VAL_BE; - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { stage_cur_val = j; @@ -982,7 +1038,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((orig >> 8) < j && !could_be_bitflip(r4)) { @@ -992,7 +1050,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; *(u16*)(out_buf + i) = orig; @@ -1002,17 +1062,17 @@ skip_bitflip: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ARITH16] += stage_max; /* 32-bit arithmetics, both endians. */ if (len < 4) goto skip_arith; - stage_name = "arith 32/8"; + stage_name = "arith 32/8"; stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; orig_hit_cnt = new_hit_cnt; @@ -1024,16 +1084,17 @@ skip_bitflip: if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= 4 * ARITH_MAX; continue; + } stage_cur_byte = i; for (j = 1; j <= ARITH_MAX; ++j) { - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), + u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), r3 = orig ^ SWAP32(SWAP32(orig) + j), r4 = orig ^ SWAP32(SWAP32(orig) - j); @@ -1050,7 +1111,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { @@ -1060,7 +1123,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; /* Big endian next. */ @@ -1074,7 +1139,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { @@ -1084,7 +1151,9 @@ skip_bitflip: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; *(u32*)(out_buf + i) = orig; @@ -1094,7 +1163,7 @@ skip_bitflip: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_ARITH32] += stage_max; skip_arith: @@ -1103,10 +1172,10 @@ skip_arith: * INTERESTING VALUES * **********************/ - stage_name = "interest 8/8"; + stage_name = "interest 8/8"; stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); + stage_cur = 0; + stage_max = len * sizeof(interesting_8); stage_val_type = STAGE_VAL_LE; @@ -1121,8 +1190,10 @@ skip_arith: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)]) { + stage_max -= sizeof(interesting_8); continue; + } stage_cur_byte = i; @@ -1133,8 +1204,10 @@ skip_arith: if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || could_be_arith(orig, (u8)interesting_8[j], 1)) { + --stage_max; continue; + } stage_cur_val = interesting_8[j]; @@ -1151,17 +1224,17 @@ skip_arith: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_INTEREST8] += stage_max; /* Setting 16-bit integers, both endians. */ if (no_arith || len < 2) goto skip_interest; - stage_name = "interest 16/8"; + stage_name = "interest 16/8"; stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); orig_hit_cnt = new_hit_cnt; @@ -1172,8 +1245,10 @@ skip_arith: /* Let's consult the effector map... */ if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + stage_max -= sizeof(interesting_16); continue; + } stage_cur_byte = i; @@ -1196,7 +1271,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && @@ -1209,7 +1286,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; } @@ -1219,17 +1298,17 @@ skip_arith: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_INTEREST16] += stage_max; if (len < 4) goto skip_interest; /* Setting 32-bit integers, both endians. */ - stage_name = "interest 32/8"; + stage_name = "interest 32/8"; stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); orig_hit_cnt = new_hit_cnt; @@ -1241,8 +1320,10 @@ skip_arith: if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { + stage_max -= sizeof(interesting_32) >> 1; continue; + } stage_cur_byte = i; @@ -1265,7 +1346,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && @@ -1278,7 +1361,9 @@ skip_arith: if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; ++stage_cur; - } else --stage_max; + } else + + --stage_max; } @@ -1288,7 +1373,7 @@ skip_arith: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_INTEREST32] += stage_max; skip_interest: @@ -1301,10 +1386,10 @@ skip_interest: /* Overwrite with user-supplied extras. */ - stage_name = "user extras (over)"; + stage_name = "user extras (over)"; stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_cur = 0; + stage_max = extras_cnt * len; stage_val_type = STAGE_VAL_NONE; @@ -1354,15 +1439,15 @@ skip_interest: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_EXTRAS_UO] += stage_max; /* Insertion of user-supplied extras. */ - stage_name = "user extras (insert)"; + stage_name = "user extras (insert)"; stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_cur = 0; + stage_max = extras_cnt * len; orig_hit_cnt = new_hit_cnt; @@ -1375,8 +1460,10 @@ skip_interest: for (j = 0; j < extras_cnt; ++j) { if (len + extras[j].len > MAX_FILE) { - --stage_max; + + --stage_max; continue; + } /* Insert token */ @@ -1386,8 +1473,10 @@ skip_interest: memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { + ck_free(ex_tmp); goto abandon_entry; + } ++stage_cur; @@ -1403,17 +1492,17 @@ skip_interest: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_EXTRAS_UI] += stage_max; skip_user_extras: if (!a_extras_cnt) goto skip_extras; - stage_name = "auto extras (over)"; + stage_name = "auto extras (over)"; stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; stage_val_type = STAGE_VAL_NONE; @@ -1431,7 +1520,8 @@ skip_user_extras: if (a_extras[j].len > len - i || !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + !memchr(eff_map + EFF_APOS(i), 1, + EFF_SPAN_ALEN(i, a_extras[j].len))) { --stage_max; continue; @@ -1454,7 +1544,7 @@ skip_user_extras: new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_EXTRAS_AO] += stage_max; skip_extras: @@ -1473,36 +1563,51 @@ python_stage: if (!py_module) goto havoc_stage; - stage_name = "python"; + stage_name = "python"; stage_short = "python"; - stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100; + stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100; if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; orig_hit_cnt = queued_paths + unique_crashes; - char* retbuf = NULL; + char* retbuf = NULL; size_t retlen = 0; for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - struct queue_entry* target; - u32 tid; - u8* new_buf; -retry_external_pick: + struct queue_entry* target; + u32 tid; + u8* new_buf; + + retry_external_pick: /* Pick a random other queue entry for passing to external API */ - do { tid = UR(queued_paths); } while (tid == current_entry && queued_paths > 1); + do { + + tid = UR(queued_paths); + + } while (tid == current_entry && queued_paths > 1); target = queue; - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + while (tid >= 100) { + + target = target->next_100; + tid -= 100; + + } + + while (tid--) + target = target->next; /* Make sure that the target has a reasonable length. */ - while (target && (target->len < 2 || target == queue_cur) && queued_paths > 1) { + while (target && (target->len < 2 || target == queue_cur) && + queued_paths > 1) { + target = target->next; ++splicing_with; + } if (!target) goto retry_external_pick; @@ -1519,12 +1624,14 @@ retry_external_pick: ck_free(new_buf); if (retbuf) { - if (!retlen) - goto abandon_entry; + + if (!retlen) goto abandon_entry; if (common_fuzz_stuff(argv, retbuf, retlen)) { + free(retbuf); goto abandon_entry; + } /* Reset retbuf/retlen */ @@ -1536,26 +1643,35 @@ retry_external_pick: permitting. */ if (queued_paths != havoc_queued) { + if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; + + stage_max *= 2; perf_score *= 2; + } havoc_queued = queued_paths; + } + } + } new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt; + stage_finds[STAGE_PYTHON] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_PYTHON] += stage_max; if (python_only) { + /* Skip other stages */ ret_val = 0; goto abandon_entry; + } + #endif /**************** @@ -1571,10 +1687,10 @@ havoc_stage: if (!splice_cycle) { - stage_name = "havoc"; + stage_name = "havoc"; stage_short = "havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score / + havoc_div / 100; } else { @@ -1583,9 +1699,9 @@ havoc_stage: perf_score = orig_perf; sprintf(tmp, "splice %u", splice_cycle); - stage_name = tmp; + stage_name = tmp; stage_short = "splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; } @@ -1605,7 +1721,7 @@ havoc_stage: u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); stage_cur_val = use_stacking; - + for (i = 0; i < use_stacking; ++i) { switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) { @@ -1617,7 +1733,7 @@ havoc_stage: FLIP_BIT(out_buf, UR(temp_len << 3)); break; - case 1: + case 1: /* Set byte to interesting value. */ @@ -1633,12 +1749,12 @@ havoc_stage: if (UR(2)) { *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; + interesting_16[UR(sizeof(interesting_16) >> 1)]; } else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); + *(u16*)(out_buf + UR(temp_len - 1)) = + SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]); } @@ -1651,14 +1767,14 @@ havoc_stage: if (temp_len < 4) break; if (UR(2)) { - + *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; + interesting_32[UR(sizeof(interesting_32) >> 2)]; } else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); + *(u32*)(out_buf + UR(temp_len - 3)) = + SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]); } @@ -1696,7 +1812,7 @@ havoc_stage: u16 num = 1 + UR(ARITH_MAX); *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); } @@ -1720,7 +1836,7 @@ havoc_stage: u16 num = 1 + UR(ARITH_MAX); *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); } @@ -1744,7 +1860,7 @@ havoc_stage: u32 num = 1 + UR(ARITH_MAX); *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); } @@ -1768,7 +1884,7 @@ havoc_stage: u32 num = 1 + UR(ARITH_MAX); *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); } @@ -1785,28 +1901,28 @@ havoc_stage: case 11 ... 12: { - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ - u32 del_from, del_len; + u32 del_from, del_len; - if (temp_len < 2) break; + if (temp_len < 2) break; - /* Don't delete too much. */ + /* Don't delete too much. */ - del_len = choose_block_len(temp_len - 1); + del_len = choose_block_len(temp_len - 1); - del_from = UR(temp_len - del_len + 1); + del_from = UR(temp_len - del_len + 1); - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); - temp_len -= del_len; + temp_len -= del_len; - break; + break; - } + } case 13: @@ -1820,7 +1936,7 @@ havoc_stage: if (actually_clone) { - clone_len = choose_block_len(temp_len); + clone_len = choose_block_len(temp_len); clone_from = UR(temp_len - clone_len + 1); } else { @@ -1830,7 +1946,7 @@ havoc_stage: } - clone_to = UR(temp_len); + clone_to = UR(temp_len); new_buf = ck_alloc_nozero(temp_len + clone_len); @@ -1860,128 +1976,129 @@ havoc_stage: case 14: { - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ - u32 copy_from, copy_to, copy_len; + u32 copy_from, copy_to, copy_len; - if (temp_len < 2) break; + if (temp_len < 2) break; - copy_len = choose_block_len(temp_len - 1); + copy_len = choose_block_len(temp_len - 1); - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); - if (UR(4)) { + if (UR(4)) { - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - } else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + } else - break; + memset(out_buf + copy_to, UR(2) ? UR(256) : out_buf[UR(temp_len)], + copy_len); - } + break; - /* Values 15 and 16 can be selected only if there are any extras - present in the dictionaries. */ + } + + /* Values 15 and 16 can be selected only if there are any extras + present in the dictionaries. */ case 15: { - /* Overwrite bytes with an extra. */ + /* Overwrite bytes with an extra. */ - if (!extras_cnt || (a_extras_cnt && UR(2))) { + if (!extras_cnt || (a_extras_cnt && UR(2))) { - /* No user-specified extras or odds in our favor. Let's use an - auto-detected one. */ + /* No user-specified extras or odds in our favor. Let's use an + auto-detected one. */ - u32 use_extra = UR(a_extras_cnt); - u32 extra_len = a_extras[use_extra].len; - u32 insert_at; + u32 use_extra = UR(a_extras_cnt); + u32 extra_len = a_extras[use_extra].len; + u32 insert_at; - if (extra_len > temp_len) break; + if (extra_len > temp_len) break; - insert_at = UR(temp_len - extra_len + 1); - memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len); + insert_at = UR(temp_len - extra_len + 1); + memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len); - } else { + } else { - /* No auto extras or odds in our favor. Use the dictionary. */ + /* No auto extras or odds in our favor. Use the dictionary. */ - u32 use_extra = UR(extras_cnt); - u32 extra_len = extras[use_extra].len; - u32 insert_at; + u32 use_extra = UR(extras_cnt); + u32 extra_len = extras[use_extra].len; + u32 insert_at; - if (extra_len > temp_len) break; + if (extra_len > temp_len) break; - insert_at = UR(temp_len - extra_len + 1); - memcpy(out_buf + insert_at, extras[use_extra].data, extra_len); - - } - - break; + insert_at = UR(temp_len - extra_len + 1); + memcpy(out_buf + insert_at, extras[use_extra].data, extra_len); } + break; + + } + case 16: { - u32 use_extra, extra_len, insert_at = UR(temp_len + 1); - u8* new_buf; + u32 use_extra, extra_len, insert_at = UR(temp_len + 1); + u8* new_buf; - /* Insert an extra. Do the same dice-rolling stuff as for the - previous case. */ + /* Insert an extra. Do the same dice-rolling stuff as for the + previous case. */ - if (!extras_cnt || (a_extras_cnt && UR(2))) { + if (!extras_cnt || (a_extras_cnt && UR(2))) { - use_extra = UR(a_extras_cnt); - extra_len = a_extras[use_extra].len; + use_extra = UR(a_extras_cnt); + extra_len = a_extras[use_extra].len; - if (temp_len + extra_len >= MAX_FILE) break; + if (temp_len + extra_len >= MAX_FILE) break; - new_buf = ck_alloc_nozero(temp_len + extra_len); + new_buf = ck_alloc_nozero(temp_len + extra_len); - /* Head */ - memcpy(new_buf, out_buf, insert_at); + /* Head */ + memcpy(new_buf, out_buf, insert_at); - /* Inserted part */ - memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len); + /* Inserted part */ + memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len); - } else { + } else { - use_extra = UR(extras_cnt); - extra_len = extras[use_extra].len; + use_extra = UR(extras_cnt); + extra_len = extras[use_extra].len; - if (temp_len + extra_len >= MAX_FILE) break; + if (temp_len + extra_len >= MAX_FILE) break; - new_buf = ck_alloc_nozero(temp_len + extra_len); + new_buf = ck_alloc_nozero(temp_len + extra_len); - /* Head */ - memcpy(new_buf, out_buf, insert_at); + /* Head */ + memcpy(new_buf, out_buf, insert_at); - /* Inserted part */ - memcpy(new_buf + insert_at, extras[use_extra].data, extra_len); - - } - - /* Tail */ - memcpy(new_buf + insert_at + extra_len, out_buf + insert_at, - temp_len - insert_at); - - ck_free(out_buf); - out_buf = new_buf; - temp_len += extra_len; - - break; + /* Inserted part */ + memcpy(new_buf + insert_at, extras[use_extra].data, extra_len); } + /* Tail */ + memcpy(new_buf + insert_at + extra_len, out_buf + insert_at, + temp_len - insert_at); + + ck_free(out_buf); + out_buf = new_buf; + temp_len += extra_len; + + break; + + } + } } - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry; + if (common_fuzz_stuff(argv, out_buf, temp_len)) goto abandon_entry; /* out_buf might have been mangled a bit, so let's restore it to its original size and shape. */ @@ -1996,8 +2113,10 @@ havoc_stage: if (queued_paths != havoc_queued) { if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; + + stage_max *= 2; perf_score *= 2; + } havoc_queued = queued_paths; @@ -2009,11 +2128,15 @@ havoc_stage: new_hit_cnt = queued_paths + unique_crashes; if (!splice_cycle) { - stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; + + stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_HAVOC] += stage_max; + } else { - stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; + + stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; stage_cycles[STAGE_SPLICE] += stage_max; + } #ifndef IGNORE_FINDS @@ -2029,38 +2152,53 @@ havoc_stage: retry_splicing: - if (use_splicing && splice_cycle++ < SPLICE_CYCLES && - queued_paths > 1 && queue_cur->len > 1) { + if (use_splicing && splice_cycle++ < SPLICE_CYCLES && queued_paths > 1 && + queue_cur->len > 1) { struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; /* First of all, if we've modified in_buf for havoc, let's clean that up... */ if (in_buf != orig_in) { + ck_free(in_buf); in_buf = orig_in; len = queue_cur->len; + } /* Pick a random queue entry and seek to it. Don't splice with yourself. */ - do { tid = UR(queued_paths); } while (tid == current_entry); + do { + + tid = UR(queued_paths); + + } while (tid == current_entry); splicing_with = tid; target = queue; - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + while (tid >= 100) { + + target = target->next_100; + tid -= 100; + + } + + while (tid--) + target = target->next; /* Make sure that the target has a reasonable length. */ while (target && (target->len < 2 || target == queue_cur)) { + target = target->next; ++splicing_with; + } if (!target) goto retry_splicing; @@ -2084,8 +2222,10 @@ retry_splicing: locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + ck_free(new_buf); goto retry_splicing; + } /* Split somewhere between the first and last differing byte. */ @@ -2102,11 +2242,11 @@ retry_splicing: out_buf = ck_alloc_nozero(len); memcpy(out_buf, in_buf, len); -#ifdef USE_PYTHON +# ifdef USE_PYTHON goto python_stage; -#else +# else goto havoc_stage; -#endif +# endif } @@ -2121,10 +2261,13 @@ abandon_entry: /* Update pending_not_fuzzed count if we made it through the calibration cycle and have not seen this entry before. */ - if (!stop_soon && !queue_cur->cal_failed && (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) { + if (!stop_soon && !queue_cur->cal_failed && + (queue_cur->was_fuzzed == 0 || queue_cur->fuzz_level == 0)) { + --pending_not_fuzzed; queue_cur->was_fuzzed = 1; if (queue_cur->favored) --pending_favored; + } ++queue_cur->fuzz_level; @@ -2144,3576 +2287,3738 @@ abandon_entry: /* MOpt mode */ u8 pilot_fuzzing(char** argv) { - s32 len, fd, temp_len, i, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; - u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + s32 len, fd, temp_len, i, j; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; + u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; - u8 ret_val = 1, doing_det = 0; + u8 ret_val = 1, doing_det = 0; - u8 a_collect[MAX_AUTO_EXTRA]; - u32 a_len = 0; + u8 a_collect[MAX_AUTO_EXTRA]; + u32 a_len = 0; #ifdef IGNORE_FINDS - /* In IGNORE_FINDS mode, skip any entries that weren't in the - initial data set. */ + /* In IGNORE_FINDS mode, skip any entries that weren't in the + initial data set. */ - if (queue_cur->depth > 1) return 1; + if (queue_cur->depth > 1) return 1; #else - if (pending_favored) { + if (pending_favored) { - /* If we have any favored, non-fuzzed new arrivals in the queue, - possibly skip to them at the expense of already-fuzzed or non-favored - cases. */ + /* If we have any favored, non-fuzzed new arrivals in the queue, + possibly skip to them at the expense of already-fuzzed or non-favored + cases. */ - if ((queue_cur->was_fuzzed || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; + if ((queue_cur->was_fuzzed || !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) + return 1; - } - else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { + } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { - /* Otherwise, still possibly skip non-favored cases, albeit less often. - The odds of skipping stuff are higher for already-fuzzed inputs and - lower for never-fuzzed entries. */ + /* Otherwise, still possibly skip non-favored cases, albeit less often. + The odds of skipping stuff are higher for already-fuzzed inputs and + lower for never-fuzzed entries. */ - if (queue_cycle > 1 && !queue_cur->was_fuzzed) { + if (queue_cycle > 1 && !queue_cur->was_fuzzed) { - if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; + if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; - } - else { + } else { - if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; + if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; - } + } - } + } #endif /* ^IGNORE_FINDS */ - if (not_on_tty) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - current_entry, queued_paths, unique_crashes); - fflush(stdout); - } + if (not_on_tty) { - /* Map the test case into memory. */ + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", + current_entry, queued_paths, unique_crashes); + fflush(stdout); - fd = open(queue_cur->fname, O_RDONLY); + } - if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + /* Map the test case into memory. */ - len = queue_cur->len; + fd = open(queue_cur->fname, O_RDONLY); - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); + len = queue_cur->len; - close(fd); + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); - out_buf = ck_alloc_nozero(len); + close(fd); - subseq_tmouts = 0; + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every + single byte anyway, so it wouldn't give us any performance or memory usage + benefits. */ - cur_depth = queue_cur->depth; + out_buf = ck_alloc_nozero(len); - /******************************************* - * CALIBRATION (only if failed earlier on) * - *******************************************/ + subseq_tmouts = 0; - if (queue_cur->cal_failed) { + cur_depth = queue_cur->depth; - u8 res = FAULT_TMOUT; + /******************************************* + * CALIBRATION (only if failed earlier on) * + *******************************************/ - if (queue_cur->cal_failed < CAL_CHANCES) { + if (queue_cur->cal_failed) { - res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); + u8 res = FAULT_TMOUT; - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (queue_cur->cal_failed < CAL_CHANCES) { - } + res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - if (stop_soon || res != crash_mode) { - ++cur_skipped_paths; - goto abandon_entry; - } + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - } + } - /************ - * TRIMMING * - ************/ + if (stop_soon || res != crash_mode) { - if (!dumb_mode && !queue_cur->trim_done) { + ++cur_skipped_paths; + goto abandon_entry; - u8 res = trim_case(argv, queue_cur, in_buf); + } - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + } - if (stop_soon) { - ++cur_skipped_paths; - goto abandon_entry; - } + /************ + * TRIMMING * + ************/ - /* Don't retry trimming, even if it failed. */ + if (!dumb_mode && !queue_cur->trim_done) { - queue_cur->trim_done = 1; + u8 res = trim_case(argv, queue_cur, in_buf); - len = queue_cur->len; + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - } + if (stop_soon) { - memcpy(out_buf, in_buf, len); + ++cur_skipped_paths; + goto abandon_entry; - /********************* - * PERFORMANCE SCORE * - *********************/ + } - orig_perf = perf_score = calculate_score(queue_cur); + /* Don't retry trimming, even if it failed. */ - /* Skip right away if -d is given, if we have done deterministic fuzzing on - this entry ourselves (was_fuzzed), or if it has gone through deterministic - testing in earlier, resumed runs (passed_det). */ + queue_cur->trim_done = 1; - if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) - goto havoc_stage; + len = queue_cur->len; - /* Skip deterministic fuzzing if exec path checksum puts this out of scope - for this master instance. */ + } - if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) - goto havoc_stage; + memcpy(out_buf, in_buf, len); + /********************* + * PERFORMANCE SCORE * + *********************/ - cur_ms_lv = get_cur_time(); - if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || - (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) - { - key_puppet = 1; - goto pacemaker_fuzzing; - } + orig_perf = perf_score = calculate_score(queue_cur); - doing_det = 1; + /* Skip right away if -d is given, if we have done deterministic fuzzing on + this entry ourselves (was_fuzzed), or if it has gone through deterministic + testing in earlier, resumed runs (passed_det). */ - /********************************************* - * SIMPLE BITFLIP (+dictionary construction) * - *********************************************/ + if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) + goto havoc_stage; -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ + /* Skip deterministic fuzzing if exec path checksum puts this out of scope + for this master instance. */ + + if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) + goto havoc_stage; + + cur_ms_lv = get_cur_time(); + if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || + (last_crash_time != 0 && + cur_ms_lv - last_crash_time < limit_time_puppet) || + last_path_time == 0))) { + + key_puppet = 1; + goto pacemaker_fuzzing; + + } + + doing_det = 1; + + /********************************************* + * SIMPLE BITFLIP (+dictionary construction) * + *********************************************/ + +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ + \ } while (0) - /* Single walking bit. */ + /* Single walking bit. */ - stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; + stage_short = "flip1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; + stage_val_type = STAGE_VAL_NONE; + orig_hit_cnt = queued_paths + unique_crashes; + prev_cksum = queue_cur->exec_cksum; - stage_val_type = STAGE_VAL_NONE; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - orig_hit_cnt = queued_paths + unique_crashes; + stage_cur_byte = stage_cur >> 3; - prev_cksum = queue_cur->exec_cksum; + FLIP_BIT(out_buf, stage_cur); - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur_byte = stage_cur >> 3; + FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur); + /* While flipping the least significant bit in every byte, pull of an extra + trick to detect possible syntax tokens. In essence, the idea is that if + you have a binary blob like this: - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + xxxxxxxxIHDRxxxxxxxx - FLIP_BIT(out_buf, stage_cur); + ...and changing the leading and trailing bytes causes variable or no + changes in program flow, but touching any character in the "IHDR" string + always produces the same, distinctive path, it's highly likely that + "IHDR" is an atomically-checked magic value of special significance to + the fuzzed format. - /* While flipping the least significant bit in every byte, pull of an extra - trick to detect possible syntax tokens. In essence, the idea is that if - you have a binary blob like this: + We do this here, rather than as a separate stage, because it's a nice + way to keep the operation approximately "free" (i.e., no extra execs). - xxxxxxxxIHDRxxxxxxxx + Empirically, performing the check when flipping the least significant bit + is advantageous, compared to doing it at the time of more disruptive + changes, where the program flow may be affected in more violent ways. - ...and changing the leading and trailing bytes causes variable or no - changes in program flow, but touching any character in the "IHDR" string - always produces the same, distinctive path, it's highly likely that - "IHDR" is an atomically-checked magic value of special significance to - the fuzzed format. + The caveat is that we won't generate dictionaries in the -d mode or -S + mode - but that's probably a fair trade-off. - We do this here, rather than as a separate stage, because it's a nice - way to keep the operation approximately "free" (i.e., no extra execs). + This won't work particularly well with paths that exhibit variable + behavior, but fails gracefully, so we'll carry out the checks anyway. - Empirically, performing the check when flipping the least significant bit - is advantageous, compared to doing it at the time of more disruptive - changes, where the program flow may be affected in more violent ways. + */ - The caveat is that we won't generate dictionaries in the -d mode or -S - mode - but that's probably a fair trade-off. + if (!dumb_mode && (stage_cur & 7) == 7) { - This won't work particularly well with paths that exhibit variable - behavior, but fails gracefully, so we'll carry out the checks anyway. + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - */ + if (stage_cur == stage_max - 1 && cksum == prev_cksum) { - if (!dumb_mode && (stage_cur & 7) == 7) { + /* If at end of file and we are still collecting a string, grab the + final character and force output. */ - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; - if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - /* If at end of file and we are still collecting a string, grab the - final character and force output. */ + } else if (cksum != prev_cksum) { - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + /* Otherwise, if the checksum has changed, see if we have something + worthwhile queued up, and collect that if the answer is yes. */ - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - } - else if (cksum != prev_cksum) { + a_len = 0; + prev_cksum = cksum; - /* Otherwise, if the checksum has changed, see if we have something - worthwhile queued up, and collect that if the answer is yes. */ + } - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + /* Continue collecting string, but only if the bit flip actually made + any difference - we don't want no-op tokens. */ - a_len = 0; - prev_cksum = cksum; + if (cksum != queue_cur->exec_cksum) { - } + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; - /* Continue collecting string, but only if the bit flip actually made - any difference - we don't want no-op tokens. */ + } - if (cksum != queue_cur->exec_cksum) { + } - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + } - } + new_hit_cnt = queued_paths + unique_crashes; - } + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP1] += stage_max; - } + /* Two walking bits. */ - new_hit_cnt = queued_paths + unique_crashes; + stage_name = "bitflip 2/1"; + stage_short = "flip2"; + stage_max = (len << 3) - 1; - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP1] += stage_max; + orig_hit_cnt = new_hit_cnt; - /* Two walking bits. */ + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - stage_name = "bitflip 2/1"; - stage_short = "flip2"; - stage_max = (len << 3) - 1; + stage_cur_byte = stage_cur >> 3; - orig_hit_cnt = new_hit_cnt; + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur_byte = stage_cur >> 3; + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + new_hit_cnt = queued_paths + unique_crashes; - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP2] += stage_max; - } + /* Four walking bits. */ - new_hit_cnt = queued_paths + unique_crashes; + stage_name = "bitflip 4/1"; + stage_short = "flip4"; + stage_max = (len << 3) - 3; - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP2] += stage_max; + orig_hit_cnt = new_hit_cnt; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + stage_cur_byte = stage_cur >> 3; - /* Four walking bits. */ + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); - stage_name = "bitflip 4/1"; - stage_short = "flip4"; - stage_max = (len << 3) - 3; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); + } + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP4] += stage_max; - orig_hit_cnt = new_hit_cnt; + /* Effector map setup. These macros calculate: - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + EFF_APOS - position of a particular file offset in the map. + EFF_ALEN - length of a map with a particular number of bytes. + EFF_SPAN_ALEN - map span for a sequence of bytes. - stage_cur_byte = stage_cur >> 3; + */ - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1) - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + /* Initialize effector map for the next step (see comments below). Always + flag first and last byte as doing something. */ - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + eff_map = ck_alloc(EFF_ALEN(len)); + eff_map[0] = 1; - } + if (EFF_APOS(len - 1) != 0) { - new_hit_cnt = queued_paths + unique_crashes; + eff_map[EFF_APOS(len - 1)] = 1; + ++eff_cnt; - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP4] += stage_max; + } + /* Walking byte. */ + stage_name = "bitflip 8/8"; + stage_short = "flip8"; + stage_max = len; + orig_hit_cnt = new_hit_cnt; - /* Effector map setup. These macros calculate: + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - EFF_APOS - position of a particular file offset in the map. - EFF_ALEN - length of a map with a particular number of bytes. - EFF_SPAN_ALEN - map span for a sequence of bytes. + stage_cur_byte = stage_cur; - */ + out_buf[stage_cur] ^= 0xFF; -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - /* Initialize effector map for the next step (see comments below). Always - flag first and last byte as doing something. */ + /* We also use this stage to pull off a simple trick: we identify + bytes that seem to have no effect on the current execution path + even when fully flipped - and we skip them during more expensive + deterministic stages, such as arithmetics or known ints. */ - eff_map = ck_alloc(EFF_ALEN(len)); - eff_map[0] = 1; + if (!eff_map[EFF_APOS(stage_cur)]) { - if (EFF_APOS(len - 1) != 0) { - eff_map[EFF_APOS(len - 1)] = 1; - ++eff_cnt; - } + u32 cksum; - /* Walking byte. */ + /* If in dumb mode or if the file is very short, just flag everything + without wasting time on checksums. */ - stage_name = "bitflip 8/8"; - stage_short = "flip8"; - stage_max = len; + if (!dumb_mode && len >= EFF_MIN_LEN) + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + else + cksum = ~queue_cur->exec_cksum; + if (cksum != queue_cur->exec_cksum) { + eff_map[EFF_APOS(stage_cur)] = 1; + ++eff_cnt; - orig_hit_cnt = new_hit_cnt; + } - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + } - stage_cur_byte = stage_cur; + out_buf[stage_cur] ^= 0xFF; - out_buf[stage_cur] ^= 0xFF; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + /* If the effector map is more than EFF_MAX_PERC dense, just flag the + whole thing as worth fuzzing, since we wouldn't be saving much time + anyway. */ - /* We also use this stage to pull off a simple trick: we identify - bytes that seem to have no effect on the current execution path - even when fully flipped - and we skip them during more expensive - deterministic stages, such as arithmetics or known ints. */ + if (eff_cnt != EFF_ALEN(len) && + eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { - if (!eff_map[EFF_APOS(stage_cur)]) { + memset(eff_map, 1, EFF_ALEN(len)); - u32 cksum; + blocks_eff_select += EFF_ALEN(len); - /* If in dumb mode or if the file is very short, just flag everything - without wasting time on checksums. */ + } else { - if (!dumb_mode && len >= EFF_MIN_LEN) - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - else - cksum = ~queue_cur->exec_cksum; + blocks_eff_select += eff_cnt; - if (cksum != queue_cur->exec_cksum) { - eff_map[EFF_APOS(stage_cur)] = 1; - ++eff_cnt; - } + } - } + blocks_eff_total += EFF_ALEN(len); - out_buf[stage_cur] ^= 0xFF; + new_hit_cnt = queued_paths + unique_crashes; - } + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP8] += stage_max; - /* If the effector map is more than EFF_MAX_PERC dense, just flag the - whole thing as worth fuzzing, since we wouldn't be saving much time - anyway. */ + /* Two walking bytes. */ - if (eff_cnt != EFF_ALEN(len) && - eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + if (len < 2) goto skip_bitflip; - memset(eff_map, 1, EFF_ALEN(len)); + stage_name = "bitflip 16/8"; + stage_short = "flip16"; + stage_cur = 0; + stage_max = len - 1; - blocks_eff_select += EFF_ALEN(len); + orig_hit_cnt = new_hit_cnt; - } - else { + for (i = 0; i < len - 1; ++i) { - blocks_eff_select += eff_cnt; + /* Let's consult the effector map... */ - } + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - blocks_eff_total += EFF_ALEN(len); + --stage_max; + continue; - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP8] += stage_max; + stage_cur_byte = i; + *(u16*)(out_buf + i) ^= 0xFFFF; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + *(u16*)(out_buf + i) ^= 0xFFFF; + } - /* Two walking bytes. */ + new_hit_cnt = queued_paths + unique_crashes; - if (len < 2) goto skip_bitflip; + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP16] += stage_max; - stage_name = "bitflip 16/8"; - stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; + if (len < 4) goto skip_bitflip; + /* Four walking bytes. */ + stage_name = "bitflip 32/8"; + stage_short = "flip32"; + stage_cur = 0; + stage_max = len - 3; - orig_hit_cnt = new_hit_cnt; + orig_hit_cnt = new_hit_cnt; - for (i = 0; i < len - 1; ++i) { + for (i = 0; i < len - 3; ++i) { - /* Let's consult the effector map... */ + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - --stage_max; - continue; - } + --stage_max; + continue; - stage_cur_byte = i; + } - *(u16*)(out_buf + i) ^= 0xFFFF; + stage_cur_byte = i; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - *(u16*)(out_buf + i) ^= 0xFFFF; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - } + } - new_hit_cnt = queued_paths + unique_crashes; + new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP16] += stage_max; + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP32] += stage_max; +skip_bitflip: + if (no_arith) goto skip_arith; + /********************** + * ARITHMETIC INC/DEC * + **********************/ - if (len < 4) goto skip_bitflip; + /* 8-bit arithmetics. */ - /* Four walking bytes. */ + stage_name = "arith 8/8"; + stage_short = "arith8"; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; - stage_name = "bitflip 32/8"; - stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; + stage_val_type = STAGE_VAL_LE; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len; ++i) { - orig_hit_cnt = new_hit_cnt; + u8 orig = out_buf[i]; - for (i = 0; i < len - 3; ++i) { + /* Let's consult the effector map... */ - /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - --stage_max; - continue; - } + if (!eff_map[EFF_APOS(i)]) { - stage_cur_byte = i; + stage_max -= 2 * ARITH_MAX; + continue; - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_byte = i; - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + for (j = 1; j <= ARITH_MAX; ++j) { - } + u8 r = orig ^ (orig + j); - new_hit_cnt = queued_paths + unique_crashes; + /* Do arithmetic operations only if the result couldn't be a product + of a bitflip. */ - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP32] += stage_max; + if (!could_be_bitflip(r)) { + stage_cur_val = j; + out_buf[i] = orig + j; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else + --stage_max; + r = orig ^ (orig - j); - skip_bitflip: + if (!could_be_bitflip(r)) { - if (no_arith) goto skip_arith; + stage_cur_val = -j; + out_buf[i] = orig - j; - /********************** - * ARITHMETIC INC/DEC * - **********************/ + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* 8-bit arithmetics. */ + } else - stage_name = "arith 8/8"; - stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; + --stage_max; + out_buf[i] = orig; + } + } - stage_val_type = STAGE_VAL_LE; + new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt = new_hit_cnt; + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH8] += stage_max; - for (i = 0; i < len; ++i) { + /* 16-bit arithmetics, both endians. */ - u8 orig = out_buf[i]; + if (len < 2) goto skip_arith; - /* Let's consult the effector map... */ + stage_name = "arith 16/8"; + stage_short = "arith16"; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; - if (!eff_map[EFF_APOS(i)]) { - stage_max -= 2 * ARITH_MAX; - continue; - } + orig_hit_cnt = new_hit_cnt; - stage_cur_byte = i; + for (i = 0; i < len - 1; ++i) { - for (j = 1; j <= ARITH_MAX; ++j) { + u16 orig = *(u16*)(out_buf + i); - u8 r = orig ^ (orig + j); + /* Let's consult the effector map... */ - /* Do arithmetic operations only if the result couldn't be a product - of a bitflip. */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - if (!could_be_bitflip(r)) { + stage_max -= 4 * ARITH_MAX; + continue; - stage_cur_val = j; - out_buf[i] = orig + j; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_byte = i; - } else --stage_max; + for (j = 1; j <= ARITH_MAX; ++j) { - r = orig ^ (orig - j); + u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP16(SWAP16(orig) + j), + r4 = orig ^ SWAP16(SWAP16(orig) - j); - if (!could_be_bitflip(r)) { + /* Try little endian addition and subtraction first. Do it only + if the operation would affect more than one byte (hence the + & 0xff overflow checks) and if it couldn't be a product of + a bitflip. */ - stage_cur_val = -j; - out_buf[i] = orig - j; + stage_val_type = STAGE_VAL_LE; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { - } else --stage_max; + stage_cur_val = j; + *(u16*)(out_buf + i) = orig + j; - out_buf[i] = orig; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } + } else - } + --stage_max; - new_hit_cnt = queued_paths + unique_crashes; + if ((orig & 0xff) < j && !could_be_bitflip(r2)) { - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH8] += stage_max; + stage_cur_val = -j; + *(u16*)(out_buf + i) = orig - j; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else + --stage_max; + /* Big endian comes next. Same deal. */ - /* 16-bit arithmetics, both endians. */ + stage_val_type = STAGE_VAL_BE; - if (len < 2) goto skip_arith; + if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { - stage_name = "arith 16/8"; - stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; + stage_cur_val = j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else + --stage_max; - orig_hit_cnt = new_hit_cnt; + if ((orig >> 8) < j && !could_be_bitflip(r4)) { - for (i = 0; i < len - 1; ++i) { + stage_cur_val = -j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); - u16 orig = *(u16*)(out_buf + i); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* Let's consult the effector map... */ + } else - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + --stage_max; - stage_cur_byte = i; + *(u16*)(out_buf + i) = orig; - for (j = 1; j <= ARITH_MAX; ++j) { + } - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP16(SWAP16(orig) + j), - r4 = orig ^ SWAP16(SWAP16(orig) - j); + } - /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the - & 0xff overflow checks) and if it couldn't be a product of - a bitflip. */ + new_hit_cnt = queued_paths + unique_crashes; - stage_val_type = STAGE_VAL_LE; + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH16] += stage_max; - if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + /* 32-bit arithmetics, both endians. */ - stage_cur_val = j; - *(u16*)(out_buf + i) = orig + j; + if (len < 4) goto skip_arith; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_name = "arith 32/8"; + stage_short = "arith32"; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; - } else --stage_max; + orig_hit_cnt = new_hit_cnt; - if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + for (i = 0; i < len - 3; ++i) { - stage_cur_val = -j; - *(u16*)(out_buf + i) = orig - j; + u32 orig = *(u32*)(out_buf + i); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Let's consult the effector map... */ - } else --stage_max; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - /* Big endian comes next. Same deal. */ + stage_max -= 4 * ARITH_MAX; + continue; - stage_val_type = STAGE_VAL_BE; + } + stage_cur_byte = i; - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + for (j = 1; j <= ARITH_MAX; ++j) { - stage_cur_val = j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP32(SWAP32(orig) + j), + r4 = orig ^ SWAP32(SWAP32(orig) - j); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Little endian first. Same deal as with 16-bit: we only want to + try if the operation would have effect on more than two bytes. */ - } else --stage_max; + stage_val_type = STAGE_VAL_LE; - if ((orig >> 8) < j && !could_be_bitflip(r4)) { + if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { - stage_cur_val = -j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); + stage_cur_val = j; + *(u32*)(out_buf + i) = orig + j; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - *(u16*)(out_buf + i) = orig; + --stage_max; - } + if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { - } + stage_cur_val = -j; + *(u32*)(out_buf + i) = orig - j; - new_hit_cnt = queued_paths + unique_crashes; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_cur++; - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH16] += stage_max; + } else + --stage_max; + /* Big endian next. */ + stage_val_type = STAGE_VAL_BE; - /* 32-bit arithmetics, both endians. */ + if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { - if (len < 4) goto skip_arith; + stage_cur_val = j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); - stage_name = "arith 32/8"; - stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; + } else + --stage_max; - orig_hit_cnt = new_hit_cnt; + if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { - for (i = 0; i < len - 3; ++i) { + stage_cur_val = -j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); - u32 orig = *(u32*)(out_buf + i); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* Let's consult the effector map... */ + } else - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + --stage_max; - stage_cur_byte = i; + *(u32*)(out_buf + i) = orig; - for (j = 1; j <= ARITH_MAX; ++j) { + } - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP32(SWAP32(orig) + j), - r4 = orig ^ SWAP32(SWAP32(orig) - j); + } - /* Little endian first. Same deal as with 16-bit: we only want to - try if the operation would have effect on more than two bytes. */ + new_hit_cnt = queued_paths + unique_crashes; - stage_val_type = STAGE_VAL_LE; + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH32] += stage_max; - if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { +skip_arith: - stage_cur_val = j; - *(u32*)(out_buf + i) = orig + j; + /********************** + * INTERESTING VALUES * + **********************/ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_name = "interest 8/8"; + stage_short = "int8"; + stage_cur = 0; + stage_max = len * sizeof(interesting_8); - } else --stage_max; + stage_val_type = STAGE_VAL_LE; - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + orig_hit_cnt = new_hit_cnt; - stage_cur_val = -j; - *(u32*)(out_buf + i) = orig - j; + /* Setting 8-bit integers. */ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_cur++; + for (i = 0; i < len; ++i) { - } else --stage_max; + u8 orig = out_buf[i]; - /* Big endian next. */ + /* Let's consult the effector map... */ - stage_val_type = STAGE_VAL_BE; + if (!eff_map[EFF_APOS(i)]) { - if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + stage_max -= sizeof(interesting_8); + continue; - stage_cur_val = j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_byte = i; - } else --stage_max; + for (j = 0; j < sizeof(interesting_8); ++j) { - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + /* Skip if the value could be a product of bitflips or arithmetics. */ - stage_cur_val = -j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); + if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || + could_be_arith(orig, (u8)interesting_8[j], 1)) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + --stage_max; + continue; - } else --stage_max; + } - *(u32*)(out_buf + i) = orig; + stage_cur_val = interesting_8[j]; + out_buf[i] = interesting_8[j]; - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - } + out_buf[i] = orig; + ++stage_cur; - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH32] += stage_max; + } + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST8] += stage_max; + /* Setting 16-bit integers, both endians. */ - skip_arith: + if (no_arith || len < 2) goto skip_interest; - /********************** - * INTERESTING VALUES * - **********************/ + stage_name = "interest 16/8"; + stage_short = "int16"; + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); - stage_name = "interest 8/8"; - stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len - 1; ++i) { + u16 orig = *(u16*)(out_buf + i); - stage_val_type = STAGE_VAL_LE; + /* Let's consult the effector map... */ - orig_hit_cnt = new_hit_cnt; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - /* Setting 8-bit integers. */ + stage_max -= sizeof(interesting_16); + continue; - for (i = 0; i < len; ++i) { + } - u8 orig = out_buf[i]; + stage_cur_byte = i; - /* Let's consult the effector map... */ + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - if (!eff_map[EFF_APOS(i)]) { - stage_max -= sizeof(interesting_8); - continue; - } + stage_cur_val = interesting_16[j]; - stage_cur_byte = i; + /* Skip if this could be a product of a bitflip, arithmetics, + or single-byte interesting value insertion. */ - for (j = 0; j < sizeof(interesting_8); ++j) { + if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && + !could_be_arith(orig, (u16)interesting_16[j], 2) && + !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { - /* Skip if the value could be a product of bitflips or arithmetics. */ + stage_val_type = STAGE_VAL_LE; - if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || - could_be_arith(orig, (u8)interesting_8[j], 1)) { - --stage_max; - continue; - } + *(u16*)(out_buf + i) = interesting_16[j]; - stage_cur_val = interesting_8[j]; - out_buf[i] = interesting_8[j]; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + } else - out_buf[i] = orig; - ++stage_cur; + --stage_max; - } + if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && + !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && + !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && + !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { - } + stage_val_type = STAGE_VAL_BE; - new_hit_cnt = queued_paths + unique_crashes; + *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST8] += stage_max; + } else + --stage_max; + } + *(u16*)(out_buf + i) = orig; - /* Setting 16-bit integers, both endians. */ + } - if (no_arith || len < 2) goto skip_interest; + new_hit_cnt = queued_paths + unique_crashes; - stage_name = "interest 16/8"; - stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST16] += stage_max; + if (len < 4) goto skip_interest; + /* Setting 32-bit integers, both endians. */ - orig_hit_cnt = new_hit_cnt; + stage_name = "interest 32/8"; + stage_short = "int32"; + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); - for (i = 0; i < len - 1; ++i) { + orig_hit_cnt = new_hit_cnt; - u16 orig = *(u16*)(out_buf + i); + for (i = 0; i < len - 3; ++i) { - /* Let's consult the effector map... */ + u32 orig = *(u32*)(out_buf + i); - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= sizeof(interesting_16); - continue; - } + /* Let's consult the effector map... */ - stage_cur_byte = i; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + stage_max -= sizeof(interesting_32) >> 1; + continue; - stage_cur_val = interesting_16[j]; + } - /* Skip if this could be a product of a bitflip, arithmetics, - or single-byte interesting value insertion. */ + stage_cur_byte = i; - if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && - !could_be_arith(orig, (u16)interesting_16[j], 2) && - !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { - stage_val_type = STAGE_VAL_LE; + stage_cur_val = interesting_32[j]; - *(u16*)(out_buf + i) = interesting_16[j]; + /* Skip if this could be a product of a bitflip, arithmetics, + or word interesting value insertion. */ - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && + !could_be_arith(orig, interesting_32[j], 4) && + !could_be_interest(orig, interesting_32[j], 4, 0)) { - } else --stage_max; + stage_val_type = STAGE_VAL_LE; - if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && - !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && - !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && - !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + *(u32*)(out_buf + i) = interesting_32[j]; - stage_val_type = STAGE_VAL_BE; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } else - } else --stage_max; + --stage_max; - } + if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && + !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && + !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && + !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { - *(u16*)(out_buf + i) = orig; + stage_val_type = STAGE_VAL_BE; - } + *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - new_hit_cnt = queued_paths + unique_crashes; + } else - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST16] += stage_max; + --stage_max; + } + *(u32*)(out_buf + i) = orig; + } + new_hit_cnt = queued_paths + unique_crashes; - if (len < 4) goto skip_interest; + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST32] += stage_max; - /* Setting 32-bit integers, both endians. */ +skip_interest: - stage_name = "interest 32/8"; - stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + /******************** + * DICTIONARY STUFF * + ********************/ + if (!extras_cnt) goto skip_user_extras; - orig_hit_cnt = new_hit_cnt; + /* Overwrite with user-supplied extras. */ - for (i = 0; i < len - 3; ++i) { + stage_name = "user extras (over)"; + stage_short = "ext_UO"; + stage_cur = 0; + stage_max = extras_cnt * len; - u32 orig = *(u32*)(out_buf + i); + stage_val_type = STAGE_VAL_NONE; - /* Let's consult the effector map... */ + orig_hit_cnt = new_hit_cnt; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= sizeof(interesting_32) >> 1; - continue; - } + for (i = 0; i < len; ++i) { - stage_cur_byte = i; + u32 last_len = 0; - for (j = 0; j < sizeof(interesting_32) / 4; ++j) { + stage_cur_byte = i; - stage_cur_val = interesting_32[j]; + /* Extras are sorted by size, from smallest to largest. This means + that we don't have to worry about restoring the buffer in + between writes at a particular offset determined by the outer + loop. */ - /* Skip if this could be a product of a bitflip, arithmetics, - or word interesting value insertion. */ + for (j = 0; j < extras_cnt; ++j) { - if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && - !could_be_arith(orig, interesting_32[j], 4) && - !could_be_interest(orig, interesting_32[j], 4, 0)) { + /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also + skip them if there's no room to insert the payload, if the token + is redundant, or if its entire span has no bytes set in the effector + map. */ - stage_val_type = STAGE_VAL_LE; + if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || + extras[j].len > len - i || + !memcmp(extras[j].data, out_buf + i, extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { - *(u32*)(out_buf + i) = interesting_32[j]; + --stage_max; + continue; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + last_len = extras[j].len; + memcpy(out_buf + i, extras[j].data, last_len); - if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && - !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && - !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && - !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - stage_val_type = STAGE_VAL_BE; + ++stage_cur; - *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); - } + } - *(u32*)(out_buf + i) = orig; + new_hit_cnt = queued_paths + unique_crashes; - } + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UO] += stage_max; - new_hit_cnt = queued_paths + unique_crashes; + /* Insertion of user-supplied extras. */ - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST32] += stage_max; + stage_name = "user extras (insert)"; + stage_short = "ext_UI"; + stage_cur = 0; + stage_max = extras_cnt * len; + orig_hit_cnt = new_hit_cnt; + ex_tmp = ck_alloc(len + MAX_DICT_FILE); + for (i = 0; i <= len; ++i) { + stage_cur_byte = i; - skip_interest: + for (j = 0; j < extras_cnt; ++j) { - /******************** - * DICTIONARY STUFF * - ********************/ + if (len + extras[j].len > MAX_FILE) { - if (!extras_cnt) goto skip_user_extras; + --stage_max; + continue; - /* Overwrite with user-supplied extras. */ + } - stage_name = "user extras (over)"; - stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; + /* Insert token */ + memcpy(ex_tmp + i, extras[j].data, extras[j].len); + /* Copy tail */ + memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { + ck_free(ex_tmp); + goto abandon_entry; - stage_val_type = STAGE_VAL_NONE; + } - orig_hit_cnt = new_hit_cnt; + ++stage_cur; - for (i = 0; i < len; ++i) { + } - u32 last_len = 0; + /* Copy head */ + ex_tmp[i] = out_buf[i]; - stage_cur_byte = i; + } - /* Extras are sorted by size, from smallest to largest. This means - that we don't have to worry about restoring the buffer in - between writes at a particular offset determined by the outer - loop. */ + ck_free(ex_tmp); - for (j = 0; j < extras_cnt; ++j) { + new_hit_cnt = queued_paths + unique_crashes; - /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token - is redundant, or if its entire span has no bytes set in the effector - map. */ + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UI] += stage_max; - if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || - extras[j].len > len - i || - !memcmp(extras[j].data, out_buf + i, extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { +skip_user_extras: - --stage_max; - continue; + if (!a_extras_cnt) goto skip_extras; - } + stage_name = "auto extras (over)"; + stage_short = "ext_AO"; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; - last_len = extras[j].len; - memcpy(out_buf + i, extras[j].data, last_len); + stage_val_type = STAGE_VAL_NONE; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + orig_hit_cnt = new_hit_cnt; - ++stage_cur; + for (i = 0; i < len; ++i) { - } + u32 last_len = 0; - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + stage_cur_byte = i; - } + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { - new_hit_cnt = queued_paths + unique_crashes; + /* See the comment in the earlier code; extras are sorted by size. */ - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UO] += stage_max; + if (a_extras[j].len > len - i || + !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, + EFF_SPAN_ALEN(i, a_extras[j].len))) { - /* Insertion of user-supplied extras. */ + --stage_max; + continue; - stage_name = "user extras (insert)"; - stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; + } + last_len = a_extras[j].len; + memcpy(out_buf + i, a_extras[j].data, last_len); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - orig_hit_cnt = new_hit_cnt; + } - ex_tmp = ck_alloc(len + MAX_DICT_FILE); + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); - for (i = 0; i <= len; ++i) { + } - stage_cur_byte = i; + new_hit_cnt = queued_paths + unique_crashes; - for (j = 0; j < extras_cnt; ++j) { + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_AO] += stage_max; - if (len + extras[j].len > MAX_FILE) { - --stage_max; - continue; - } +skip_extras: - /* Insert token */ - memcpy(ex_tmp + i, extras[j].data, extras[j].len); + /* If we made this to here without jumping to havoc_stage or abandon_entry, + we're properly done with deterministic steps and can mark it as such + in the .state/ directory. */ - /* Copy tail */ - memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - ck_free(ex_tmp); - goto abandon_entry; - } + /**************** + * RANDOM HAVOC * + ****************/ - ++stage_cur; +havoc_stage: +pacemaker_fuzzing: - } + stage_cur_byte = -1; - /* Copy head */ - ex_tmp[i] = out_buf[i]; + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ - } + if (!splice_cycle) { - ck_free(ex_tmp); + stage_name = "MOpt-havoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score / + havoc_div / 100; - new_hit_cnt = queued_paths + unique_crashes; + } else { - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UI] += stage_max; + static u8 tmp[32]; - skip_user_extras: + perf_score = orig_perf; - if (!a_extras_cnt) goto skip_extras; + sprintf(tmp, "MOpt-splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - stage_name = "auto extras (over)"; - stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + } + s32 temp_len_puppet; + cur_ms_lv = get_cur_time(); - stage_val_type = STAGE_VAL_NONE; + { - orig_hit_cnt = new_hit_cnt; + if (key_puppet == 1) { - for (i = 0; i < len; ++i) { + if (unlikely(orig_hit_cnt_puppet == 0)) { - u32 last_len = 0; + orig_hit_cnt_puppet = queued_paths + unique_crashes; + last_limit_time_start = get_cur_time(); + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); - stage_cur_byte = i; + } - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + } - /* See the comment in the earlier code; extras are sorted by size. */ + { - if (a_extras[j].len > len - i || - !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { - - --stage_max; - continue; - - } - - last_len = a_extras[j].len; - memcpy(out_buf + i, a_extras[j].data, last_len); - - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - - ++stage_cur; - - } - - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); - - } - - new_hit_cnt = queued_paths + unique_crashes; - - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_AO] += stage_max; - - skip_extras: - - /* If we made this to here without jumping to havoc_stage or abandon_entry, - we're properly done with deterministic steps and can mark it as such - in the .state/ directory. */ - - if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - - /**************** - * RANDOM HAVOC * - ****************/ - - havoc_stage: - pacemaker_fuzzing: - - - stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - - stage_name = "MOpt-havoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; - - } - else { - - static u8 tmp[32]; - - perf_score = orig_perf; - - sprintf(tmp, "MOpt-splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - - } - - s32 temp_len_puppet; - cur_ms_lv = get_cur_time(); - - { - - - if (key_puppet == 1) - { - if (unlikely(orig_hit_cnt_puppet == 0)) - { - orig_hit_cnt_puppet = queued_paths + unique_crashes; - last_limit_time_start = get_cur_time(); - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - } - } - - - { #ifndef IGNORE_FINDS - havoc_stage_puppet: + havoc_stage_puppet: #endif - stage_cur_byte = -1; + stage_cur_byte = -1; - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ + /* The havoc stage mutation code is also invoked when splicing files; if + the splice_cycle variable is set, generate different descriptions and + such. */ - if (!splice_cycle) { + if (!splice_cycle) { - stage_name = "MOpt avoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + stage_name = "MOpt avoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; - } - else { - static u8 tmp[32]; - perf_score = orig_perf; - sprintf(tmp, "MOpt splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - } + } else { + static u8 tmp[32]; + perf_score = orig_perf; + sprintf(tmp, "MOpt splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + } - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - temp_len = len; + temp_len = len; - orig_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt = queued_paths + unique_crashes; - havoc_queued = queued_paths; + havoc_queued = queued_paths; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + stage_cur_val = use_stacking; - u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); + for (i = 0; i < operator_num; ++i) { - stage_cur_val = use_stacking; + stage_cycles_puppet_v3[swarm_now][i] = + stage_cycles_puppet_v2[swarm_now][i]; - - for (i = 0; i < operator_num; ++i) - { - stage_cycles_puppet_v3[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; - } - - - for (i = 0; i < use_stacking; ++i) { - - switch (select_algorithm()) { - - case 0: - /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, UR(temp_len << 3)); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1; - break; - - - case 1: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1; - break; - - case 2: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - FLIP_BIT(out_buf, temp_len_puppet + 2); - FLIP_BIT(out_buf, temp_len_puppet + 3); - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1; - break; - - case 3: - if (temp_len < 4) break; - out_buf[UR(temp_len)] ^= 0xFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1; - break; - - case 4: - if (temp_len < 8) break; - *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1; - break; - - case 5: - if (temp_len < 8) break; - *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; - stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1; - break; - - case 6: - out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); - out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1; - break; - - case 7: - /* Randomly subtract from word, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } - /* Randomly add to word, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - } - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1; - break; - - - case 8: - /* Randomly subtract from dword, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - } - /* Randomly add to dword, random endian. */ - //if (temp_len < 4) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } - else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - } - stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1; - break; - - - case 9: - /* Set byte to interesting value. */ - if (temp_len < 4) break; - out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1; - break; - - case 10: - /* Set word to interesting value, randomly choosing endian. */ - if (temp_len < 8) break; - if (UR(2)) { - *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; - } - else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); - } - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1; - break; - - - case 11: - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 8) break; - - if (UR(2)) { - *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; - } - else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); - } - stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1; - break; - - - case 12: - - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ - - out_buf[UR(temp_len)] ^= 1 + UR(255); - stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1; - break; - - - - case 13: { - - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ - - u32 del_from, del_len; - - if (temp_len < 2) break; - - /* Don't delete too much. */ - - del_len = choose_block_len(temp_len - 1); - - del_from = UR(temp_len - del_len + 1); - - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); - - temp_len -= del_len; - stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1; - break; - - } - - case 14: - - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { - - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ - - u8 actually_clone = UR(4); - u32 clone_from, clone_to, clone_len; - u8* new_buf; - - if (actually_clone) { - - clone_len = choose_block_len(temp_len); - clone_from = UR(temp_len - clone_len + 1); - - } - else { - - clone_len = choose_block_len(HAVOC_BLK_XL); - clone_from = 0; - - } - - clone_to = UR(temp_len); - - new_buf = ck_alloc_nozero(temp_len + clone_len); - - /* Head */ - - memcpy(new_buf, out_buf, clone_to); - - /* Inserted part */ - - if (actually_clone) - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - else - memset(new_buf + clone_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); - - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); - - ck_free(out_buf); - out_buf = new_buf; - temp_len += clone_len; - stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1; - } - - break; - - case 15: { - - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ - - u32 copy_from, copy_to, copy_len; - - if (temp_len < 2) break; - - copy_len = choose_block_len(temp_len - 1); - - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); - - if (UR(4)) { - - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); - - } - else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); - stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1; - break; - - } - - - } - - } - - - tmp_pilot_time += 1; - - - - - u64 temp_total_found = queued_paths + unique_crashes; - - - - - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry_puppet; - - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ - - if (temp_len < len) out_buf = ck_realloc(out_buf, len); - temp_len = len; - memcpy(out_buf, in_buf, len); - - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ - - if (queued_paths != havoc_queued) { - - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } - - havoc_queued = queued_paths; - - } - - if (unlikely(queued_paths + unique_crashes > temp_total_found)) - { - u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; - total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; ++i) - { - if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet_v3[swarm_now][i]) - stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet; - } - } - - } - new_hit_cnt = queued_paths + unique_crashes; - - if (!splice_cycle) { - stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_HAVOC] += stage_max; - } else { - stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_SPLICE] += stage_max; } + for (i = 0; i < use_stacking; ++i) { + + switch (select_algorithm()) { + + case 0: + /* Flip a single bit somewhere. Spooky! */ + FLIP_BIT(out_buf, UR(temp_len << 3)); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP1] += 1; + break; + + case 1: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP2] += 1; + break; + + case 2: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + FLIP_BIT(out_buf, temp_len_puppet + 2); + FLIP_BIT(out_buf, temp_len_puppet + 3); + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP4] += 1; + break; + + case 3: + if (temp_len < 4) break; + out_buf[UR(temp_len)] ^= 0xFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP8] += 1; + break; + + case 4: + if (temp_len < 8) break; + *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP16] += 1; + break; + + case 5: + if (temp_len < 8) break; + *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; + stage_cycles_puppet_v2[swarm_now][STAGE_FLIP32] += 1; + break; + + case 6: + out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); + out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH8] += 1; + break; + + case 7: + /* Randomly subtract from word, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { + + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); + + } else { + + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); + + } + + /* Randomly add to word, random endian. */ + if (UR(2)) { + + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); + + } else { + + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); + + } + + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH16] += 1; + break; + + case 8: + /* Randomly subtract from dword, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { + + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); + + } else { + + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); + + } + + /* Randomly add to dword, random endian. */ + // if (temp_len < 4) break; + if (UR(2)) { + + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); + + } else { + + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); + + } + + stage_cycles_puppet_v2[swarm_now][STAGE_ARITH32] += 1; + break; + + case 9: + /* Set byte to interesting value. */ + if (temp_len < 4) break; + out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST8] += 1; + break; + + case 10: + /* Set word to interesting value, randomly choosing endian. */ + if (temp_len < 8) break; + if (UR(2)) { + + *(u16*)(out_buf + UR(temp_len - 1)) = + interesting_16[UR(sizeof(interesting_16) >> 1)]; + + } else { + + *(u16*)(out_buf + UR(temp_len - 1)) = + SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]); + + } + + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST16] += 1; + break; + + case 11: + /* Set dword to interesting value, randomly choosing endian. */ + + if (temp_len < 8) break; + + if (UR(2)) { + + *(u32*)(out_buf + UR(temp_len - 3)) = + interesting_32[UR(sizeof(interesting_32) >> 2)]; + + } else { + + *(u32*)(out_buf + UR(temp_len - 3)) = + SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]); + + } + + stage_cycles_puppet_v2[swarm_now][STAGE_INTEREST32] += 1; + break; + + case 12: + + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ + + out_buf[UR(temp_len)] ^= 1 + UR(255); + stage_cycles_puppet_v2[swarm_now][STAGE_RANDOMBYTE] += 1; + break; + + case 13: { + + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ + + u32 del_from, del_len; + + if (temp_len < 2) break; + + /* Don't delete too much. */ + + del_len = choose_block_len(temp_len - 1); + + del_from = UR(temp_len - del_len + 1); + + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); + + temp_len -= del_len; + stage_cycles_puppet_v2[swarm_now][STAGE_DELETEBYTE] += 1; + break; + + } + + case 14: + + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + + /* Clone bytes (75%) or insert a block of constant bytes (25%). + */ + + u8 actually_clone = UR(4); + u32 clone_from, clone_to, clone_len; + u8* new_buf; + + if (actually_clone) { + + clone_len = choose_block_len(temp_len); + clone_from = UR(temp_len - clone_len + 1); + + } else { + + clone_len = choose_block_len(HAVOC_BLK_XL); + clone_from = 0; + + } + + clone_to = UR(temp_len); + + new_buf = ck_alloc_nozero(temp_len + clone_len); + + /* Head */ + + memcpy(new_buf, out_buf, clone_to); + + /* Inserted part */ + + if (actually_clone) + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + else + memset(new_buf + clone_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); + + ck_free(out_buf); + out_buf = new_buf; + temp_len += clone_len; + stage_cycles_puppet_v2[swarm_now][STAGE_Clone75] += 1; + + } + + break; + + case 15: { + + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ + + u32 copy_from, copy_to, copy_len; + + if (temp_len < 2) break; + + copy_len = choose_block_len(temp_len - 1); + + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); + + if (UR(4)) { + + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + + } else + + memset(out_buf + copy_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + stage_cycles_puppet_v2[swarm_now][STAGE_OverWrite75] += 1; + break; + + } + + } + + } + + tmp_pilot_time += 1; + + u64 temp_total_found = queued_paths + unique_crashes; + + if (common_fuzz_stuff(argv, out_buf, temp_len)) + goto abandon_entry_puppet; + + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ + + if (temp_len < len) out_buf = ck_realloc(out_buf, len); + temp_len = len; + memcpy(out_buf, in_buf, len); + + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ + + if (queued_paths != havoc_queued) { + + if (perf_score <= havoc_max_mult * 100) { + + stage_max *= 2; + perf_score *= 2; + + } + + havoc_queued = queued_paths; + + } + + if (unlikely(queued_paths + unique_crashes > temp_total_found)) { + + u64 temp_temp_puppet = + queued_paths + unique_crashes - temp_total_found; + total_puppet_find = total_puppet_find + temp_temp_puppet; + for (i = 0; i < 16; ++i) { + + if (stage_cycles_puppet_v2[swarm_now][i] > + stage_cycles_puppet_v3[swarm_now][i]) + stage_finds_puppet_v2[swarm_now][i] += temp_temp_puppet; + + } + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; + + if (!splice_cycle) { + + stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_HAVOC] += stage_max; + + } else { + + stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_SPLICE] += stage_max; + + } + #ifndef IGNORE_FINDS - /************ - * SPLICING * - ************/ + /************ + * SPLICING * + ************/ + retry_splicing_puppet: - retry_splicing_puppet: + if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && + queued_paths > 1 && queue_cur->len > 1) { - if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && - queued_paths > 1 && queue_cur->len > 1) { + struct queue_entry* target; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; - struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ + if (in_buf != orig_in) { - if (in_buf != orig_in) { - ck_free(in_buf); - in_buf = orig_in; - len = queue_cur->len; - } + ck_free(in_buf); + in_buf = orig_in; + len = queue_cur->len; - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + } - do { tid = UR(queued_paths); } while (tid == current_entry); + /* Pick a random queue entry and seek to it. Don't splice with yourself. + */ - splicing_with = tid; - target = queue; + do { - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + tid = UR(queued_paths); - /* Make sure that the target has a reasonable length. */ + } while (tid == current_entry); - while (target && (target->len < 2 || target == queue_cur)) { - target = target->next; - ++splicing_with; - } + splicing_with = tid; + target = queue; - if (!target) goto retry_splicing_puppet; + while (tid >= 100) { - /* Read the testcase into a new buffer. */ + target = target->next_100; + tid -= 100; - fd = open(target->fname, O_RDONLY); + } - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + while (tid--) + target = target->next; - new_buf = ck_alloc_nozero(target->len); + /* Make sure that the target has a reasonable length. */ - ck_read(fd, new_buf, target->len, target->fname); + while (target && (target->len < 2 || target == queue_cur)) { - close(fd); + target = target->next; + ++splicing_with; - /* Find a suitable splicin g location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ + } - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + if (!target) goto retry_splicing_puppet; - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ck_free(new_buf); - goto retry_splicing_puppet; - } + /* Read the testcase into a new buffer. */ - /* Split somewhere between the first and last differing byte. */ + fd = open(target->fname, O_RDONLY); - split_at = f_diff + UR(l_diff - f_diff); + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - /* Do the thing. */ + new_buf = ck_alloc_nozero(target->len); - len = target->len; - memcpy(new_buf, in_buf, split_at); - in_buf = new_buf; - ck_free(out_buf); - out_buf = ck_alloc_nozero(len); - memcpy(out_buf, in_buf, len); - goto havoc_stage_puppet; + ck_read(fd, new_buf, target->len, target->fname); - } + close(fd); + + /* Find a suitable splicin g location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ + + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + + ck_free(new_buf); + goto retry_splicing_puppet; + + } + + /* Split somewhere between the first and last differing byte. */ + + split_at = f_diff + UR(l_diff - f_diff); + + /* Do the thing. */ + + len = target->len; + memcpy(new_buf, in_buf, split_at); + in_buf = new_buf; + ck_free(out_buf); + out_buf = ck_alloc_nozero(len); + memcpy(out_buf, in_buf, len); + goto havoc_stage_puppet; + + } #endif /* !IGNORE_FINDS */ - ret_val = 0; + ret_val = 0; - abandon_entry: - abandon_entry_puppet: + abandon_entry: + abandon_entry_puppet: - if (splice_cycle >= SPLICE_CYCLES_puppet) - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + if (splice_cycle >= SPLICE_CYCLES_puppet) + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); + splicing_with = -1; - splicing_with = -1; + /* Update pending_not_fuzzed count if we made it through the calibration + cycle and have not seen this entry before. */ - /* Update pending_not_fuzzed count if we made it through the calibration - cycle and have not seen this entry before. */ + // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { - // if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { - // queue_cur->was_fuzzed = 1; - // --pending_not_fuzzed; - // if (queue_cur->favored) --pending_favored; - // } + // queue_cur->was_fuzzed = 1; + // --pending_not_fuzzed; + // if (queue_cur->favored) --pending_favored; + // } - munmap(orig_in, queue_cur->len); + munmap(orig_in, queue_cur->len); - if (in_buf != orig_in) ck_free(in_buf); - ck_free(out_buf); - ck_free(eff_map); + if (in_buf != orig_in) ck_free(in_buf); + ck_free(out_buf); + ck_free(eff_map); + if (key_puppet == 1) { - if (key_puppet == 1) { - if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) { - key_puppet = 0; - cur_ms_lv = get_cur_time(); - new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt_puppet = 0; - last_limit_time_start = 0; - } - } + if (unlikely(queued_paths + unique_crashes > + ((queued_paths + unique_crashes) * limit_time_bound + + orig_hit_cnt_puppet))) { + key_puppet = 0; + cur_ms_lv = get_cur_time(); + new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt_puppet = 0; + last_limit_time_start = 0; - if (unlikely(tmp_pilot_time > period_pilot)) { - total_pacemaker_time += tmp_pilot_time; - new_hit_cnt = queued_paths + unique_crashes; - swarm_fitness[swarm_now] = (double)(total_puppet_find - temp_puppet_find) / ((double)(tmp_pilot_time)/ period_pilot_tmp); - tmp_pilot_time = 0; - temp_puppet_find = total_puppet_find; + } - u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) { - double temp_eff = 0.0; + } - if (stage_cycles_puppet_v2[swarm_now][i] > stage_cycles_puppet[swarm_now][i]) - temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - stage_finds_puppet[swarm_now][i]) / - (double)(stage_cycles_puppet_v2[swarm_now][i] - stage_cycles_puppet[swarm_now][i]); + if (unlikely(tmp_pilot_time > period_pilot)) { - if (eff_best[swarm_now][i] < temp_eff) { - eff_best[swarm_now][i] = temp_eff; - L_best[swarm_now][i] = x_now[swarm_now][i]; - } + total_pacemaker_time += tmp_pilot_time; + new_hit_cnt = queued_paths + unique_crashes; + swarm_fitness[swarm_now] = + (double)(total_puppet_find - temp_puppet_find) / + ((double)(tmp_pilot_time) / period_pilot_tmp); + tmp_pilot_time = 0; + temp_puppet_find = total_puppet_find; - stage_finds_puppet[swarm_now][i] = stage_finds_puppet_v2[swarm_now][i]; - stage_cycles_puppet[swarm_now][i] = stage_cycles_puppet_v2[swarm_now][i]; - temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i]; - } + u64 temp_stage_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { - swarm_now = swarm_now + 1; - if (swarm_now == swarm_num) { - key_module = 1; - for (i = 0; i < operator_num; ++i) { - core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i]; - core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i]; - core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i]; - } + double temp_eff = 0.0; - double swarm_eff = 0.0; - swarm_now = 0; - for (i = 0; i < swarm_num; ++i) { - if (swarm_fitness[i] > swarm_eff) { - swarm_eff = swarm_fitness[i]; - swarm_now = i; - } - } - if (swarm_now <0 || swarm_now > swarm_num - 1) - PFATAL("swarm_now error number %d", swarm_now); + if (stage_cycles_puppet_v2[swarm_now][i] > + stage_cycles_puppet[swarm_now][i]) + temp_eff = (double)(stage_finds_puppet_v2[swarm_now][i] - + stage_finds_puppet[swarm_now][i]) / + (double)(stage_cycles_puppet_v2[swarm_now][i] - + stage_cycles_puppet[swarm_now][i]); - } - } - return ret_val; - } - } + if (eff_best[swarm_now][i] < temp_eff) { + eff_best[swarm_now][i] = temp_eff; + L_best[swarm_now][i] = x_now[swarm_now][i]; + + } + + stage_finds_puppet[swarm_now][i] = + stage_finds_puppet_v2[swarm_now][i]; + stage_cycles_puppet[swarm_now][i] = + stage_cycles_puppet_v2[swarm_now][i]; + temp_stage_finds_puppet += stage_finds_puppet[swarm_now][i]; + + } + + swarm_now = swarm_now + 1; + if (swarm_now == swarm_num) { + + key_module = 1; + for (i = 0; i < operator_num; ++i) { + + core_operator_cycles_puppet_v2[i] = core_operator_cycles_puppet[i]; + core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet[i]; + core_operator_finds_puppet_v2[i] = core_operator_finds_puppet[i]; + + } + + double swarm_eff = 0.0; + swarm_now = 0; + for (i = 0; i < swarm_num; ++i) { + + if (swarm_fitness[i] > swarm_eff) { + + swarm_eff = swarm_fitness[i]; + swarm_now = i; + + } + + } + + if (swarm_now < 0 || swarm_now > swarm_num - 1) + PFATAL("swarm_now error number %d", swarm_now); + + } + + } + + return ret_val; + + } + + } #undef FLIP_BIT } - u8 core_fuzzing(char** argv) { - int i; - if (swarm_num == 1) { - key_module = 2; - return 0; - } + int i; + if (swarm_num == 1) { - s32 len, fd, temp_len, j; - u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; - u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; - u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + key_module = 2; + return 0; - u8 ret_val = 1, doing_det = 0; + } - u8 a_collect[MAX_AUTO_EXTRA]; - u32 a_len = 0; + s32 len, fd, temp_len, j; + u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; + u64 havoc_queued, orig_hit_cnt, new_hit_cnt, cur_ms_lv; + u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; + + u8 ret_val = 1, doing_det = 0; + + u8 a_collect[MAX_AUTO_EXTRA]; + u32 a_len = 0; #ifdef IGNORE_FINDS - /* In IGNORE_FINDS mode, skip any entries that weren't in the - initial data set. */ + /* In IGNORE_FINDS mode, skip any entries that weren't in the + initial data set. */ - if (queue_cur->depth > 1) return 1; + if (queue_cur->depth > 1) return 1; #else - if (pending_favored) { + if (pending_favored) { - /* If we have any favored, non-fuzzed new arrivals in the queue, - possibly skip to them at the expense of already-fuzzed or non-favored - cases. */ + /* If we have any favored, non-fuzzed new arrivals in the queue, + possibly skip to them at the expense of already-fuzzed or non-favored + cases. */ - if ((queue_cur->was_fuzzed || !queue_cur->favored) && - UR(100) < SKIP_TO_NEW_PROB) return 1; + if ((queue_cur->was_fuzzed || !queue_cur->favored) && + UR(100) < SKIP_TO_NEW_PROB) + return 1; - } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { + } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { - /* Otherwise, still possibly skip non-favored cases, albeit less often. - The odds of skipping stuff are higher for already-fuzzed inputs and - lower for never-fuzzed entries. */ + /* Otherwise, still possibly skip non-favored cases, albeit less often. + The odds of skipping stuff are higher for already-fuzzed inputs and + lower for never-fuzzed entries. */ - if (queue_cycle > 1 && !queue_cur->was_fuzzed) { + if (queue_cycle > 1 && !queue_cur->was_fuzzed) { - if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; + if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; - } else { + } else { - if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; + if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; - } + } - } + } #endif /* ^IGNORE_FINDS */ - if (not_on_tty) { - ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", - current_entry, queued_paths, unique_crashes); - fflush(stdout); - } + if (not_on_tty) { - /* Map the test case into memory. */ + ACTF("Fuzzing test case #%u (%u total, %llu uniq crashes found)...", + current_entry, queued_paths, unique_crashes); + fflush(stdout); - fd = open(queue_cur->fname, O_RDONLY); + } - if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); + /* Map the test case into memory. */ - len = queue_cur->len; + fd = open(queue_cur->fname, O_RDONLY); - orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); - if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); + len = queue_cur->len; - close(fd); + orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); - /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every - single byte anyway, so it wouldn't give us any performance or memory usage - benefits. */ + if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); - out_buf = ck_alloc_nozero(len); + close(fd); - subseq_tmouts = 0; + /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every + single byte anyway, so it wouldn't give us any performance or memory usage + benefits. */ - cur_depth = queue_cur->depth; + out_buf = ck_alloc_nozero(len); - /******************************************* - * CALIBRATION (only if failed earlier on) * - *******************************************/ + subseq_tmouts = 0; - if (queue_cur->cal_failed) { + cur_depth = queue_cur->depth; - u8 res = FAULT_TMOUT; + /******************************************* + * CALIBRATION (only if failed earlier on) * + *******************************************/ - if (queue_cur->cal_failed < CAL_CHANCES) { + if (queue_cur->cal_failed) { - res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); + u8 res = FAULT_TMOUT; - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + if (queue_cur->cal_failed < CAL_CHANCES) { - } + res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); - if (stop_soon || res != crash_mode) { - ++cur_skipped_paths; - goto abandon_entry; - } + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - } + } - /************ - * TRIMMING * - ************/ + if (stop_soon || res != crash_mode) { - if (!dumb_mode && !queue_cur->trim_done) { + ++cur_skipped_paths; + goto abandon_entry; - u8 res = trim_case(argv, queue_cur, in_buf); + } - if (res == FAULT_ERROR) - FATAL("Unable to execute target application"); + } - if (stop_soon) { - ++cur_skipped_paths; - goto abandon_entry; - } + /************ + * TRIMMING * + ************/ - /* Don't retry trimming, even if it failed. */ + if (!dumb_mode && !queue_cur->trim_done) { - queue_cur->trim_done = 1; + u8 res = trim_case(argv, queue_cur, in_buf); - len = queue_cur->len; + if (res == FAULT_ERROR) FATAL("Unable to execute target application"); - } + if (stop_soon) { - memcpy(out_buf, in_buf, len); + ++cur_skipped_paths; + goto abandon_entry; - /********************* - * PERFORMANCE SCORE * - *********************/ + } - orig_perf = perf_score = calculate_score(queue_cur); + /* Don't retry trimming, even if it failed. */ - /* Skip right away if -d is given, if we have done deterministic fuzzing on - this entry ourselves (was_fuzzed), or if it has gone through deterministic - testing in earlier, resumed runs (passed_det). */ + queue_cur->trim_done = 1; - if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) - goto havoc_stage; + len = queue_cur->len; - /* Skip deterministic fuzzing if exec path checksum puts this out of scope - for this master instance. */ + } - if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) - goto havoc_stage; + memcpy(out_buf, in_buf, len); + /********************* + * PERFORMANCE SCORE * + *********************/ - cur_ms_lv = get_cur_time(); - if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || - (last_crash_time != 0 && cur_ms_lv - last_crash_time < limit_time_puppet) || last_path_time == 0))) - { - key_puppet = 1; - goto pacemaker_fuzzing; - } + orig_perf = perf_score = calculate_score(queue_cur); - doing_det = 1; + /* Skip right away if -d is given, if we have done deterministic fuzzing on + this entry ourselves (was_fuzzed), or if it has gone through deterministic + testing in earlier, resumed runs (passed_det). */ - /********************************************* - * SIMPLE BITFLIP (+dictionary construction) * - *********************************************/ + if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) + goto havoc_stage; -#define FLIP_BIT(_ar, _b) do { \ - u8* _arf = (u8*)(_ar); \ - u32 _bf = (_b); \ - _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ + /* Skip deterministic fuzzing if exec path checksum puts this out of scope + for this master instance. */ + + if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) + goto havoc_stage; + + cur_ms_lv = get_cur_time(); + if (!(key_puppet == 0 && ((cur_ms_lv - last_path_time < limit_time_puppet) || + (last_crash_time != 0 && + cur_ms_lv - last_crash_time < limit_time_puppet) || + last_path_time == 0))) { + + key_puppet = 1; + goto pacemaker_fuzzing; + + } + + doing_det = 1; + + /********************************************* + * SIMPLE BITFLIP (+dictionary construction) * + *********************************************/ + +#define FLIP_BIT(_ar, _b) \ + do { \ + \ + u8* _arf = (u8*)(_ar); \ + u32 _bf = (_b); \ + _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \ + \ } while (0) - /* Single walking bit. */ + /* Single walking bit. */ - stage_short = "flip1"; - stage_max = len << 3; - stage_name = "bitflip 1/1"; + stage_short = "flip1"; + stage_max = len << 3; + stage_name = "bitflip 1/1"; - stage_val_type = STAGE_VAL_NONE; + stage_val_type = STAGE_VAL_NONE; - orig_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt = queued_paths + unique_crashes; - prev_cksum = queue_cur->exec_cksum; + prev_cksum = queue_cur->exec_cksum; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - stage_cur_byte = stage_cur >> 3; + stage_cur_byte = stage_cur >> 3; - FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur); - /* While flipping the least significant bit in every byte, pull of an extra - trick to detect possible syntax tokens. In essence, the idea is that if - you have a binary blob like this: + /* While flipping the least significant bit in every byte, pull of an extra + trick to detect possible syntax tokens. In essence, the idea is that if + you have a binary blob like this: - xxxxxxxxIHDRxxxxxxxx + xxxxxxxxIHDRxxxxxxxx - ...and changing the leading and trailing bytes causes variable or no - changes in program flow, but touching any character in the "IHDR" string - always produces the same, distinctive path, it's highly likely that - "IHDR" is an atomically-checked magic value of special significance to - the fuzzed format. + ...and changing the leading and trailing bytes causes variable or no + changes in program flow, but touching any character in the "IHDR" string + always produces the same, distinctive path, it's highly likely that + "IHDR" is an atomically-checked magic value of special significance to + the fuzzed format. - We do this here, rather than as a separate stage, because it's a nice - way to keep the operation approximately "free" (i.e., no extra execs). + We do this here, rather than as a separate stage, because it's a nice + way to keep the operation approximately "free" (i.e., no extra execs). - Empirically, performing the check when flipping the least significant bit - is advantageous, compared to doing it at the time of more disruptive - changes, where the program flow may be affected in more violent ways. + Empirically, performing the check when flipping the least significant bit + is advantageous, compared to doing it at the time of more disruptive + changes, where the program flow may be affected in more violent ways. - The caveat is that we won't generate dictionaries in the -d mode or -S - mode - but that's probably a fair trade-off. + The caveat is that we won't generate dictionaries in the -d mode or -S + mode - but that's probably a fair trade-off. - This won't work particularly well with paths that exhibit variable - behavior, but fails gracefully, so we'll carry out the checks anyway. + This won't work particularly well with paths that exhibit variable + behavior, but fails gracefully, so we'll carry out the checks anyway. - */ + */ - if (!dumb_mode && (stage_cur & 7) == 7) { + if (!dumb_mode && (stage_cur & 7) == 7) { - u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - if (stage_cur == stage_max - 1 && cksum == prev_cksum) { + if (stage_cur == stage_max - 1 && cksum == prev_cksum) { - /* If at end of file and we are still collecting a string, grab the - final character and force output. */ + /* If at end of file and we are still collecting a string, grab the + final character and force output. */ - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - } - else if (cksum != prev_cksum) { + } else if (cksum != prev_cksum) { - /* Otherwise, if the checksum has changed, see if we have something - worthwhile queued up, and collect that if the answer is yes. */ + /* Otherwise, if the checksum has changed, see if we have something + worthwhile queued up, and collect that if the answer is yes. */ - if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) - maybe_add_auto(a_collect, a_len); + if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) + maybe_add_auto(a_collect, a_len); - a_len = 0; - prev_cksum = cksum; + a_len = 0; + prev_cksum = cksum; - } + } - /* Continue collecting string, but only if the bit flip actually made - any difference - we don't want no-op tokens. */ + /* Continue collecting string, but only if the bit flip actually made + any difference - we don't want no-op tokens. */ - if (cksum != queue_cur->exec_cksum) { + if (cksum != queue_cur->exec_cksum) { - if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; - ++a_len; + if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; + ++a_len; - } + } - } + } - } + } - new_hit_cnt = queued_paths + unique_crashes; + new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP1] += stage_max; + stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP1] += stage_max; + /* Two walking bits. */ + stage_name = "bitflip 2/1"; + stage_short = "flip2"; + stage_max = (len << 3) - 1; - /* Two walking bits. */ + orig_hit_cnt = new_hit_cnt; - stage_name = "bitflip 2/1"; - stage_short = "flip2"; - stage_max = (len << 3) - 1; + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - orig_hit_cnt = new_hit_cnt; + stage_cur_byte = stage_cur >> 3; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); - stage_cur_byte = stage_cur >> 3; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + } - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); + new_hit_cnt = queued_paths + unique_crashes; - } + stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP2] += stage_max; - new_hit_cnt = queued_paths + unique_crashes; + /* Four walking bits. */ - stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP2] += stage_max; + stage_name = "bitflip 4/1"; + stage_short = "flip4"; + stage_max = (len << 3) - 3; + orig_hit_cnt = new_hit_cnt; - /* Four walking bits. */ + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - stage_name = "bitflip 4/1"; - stage_short = "flip4"; - stage_max = (len << 3) - 3; + stage_cur_byte = stage_cur >> 3; + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); - orig_hit_cnt = new_hit_cnt; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + FLIP_BIT(out_buf, stage_cur); + FLIP_BIT(out_buf, stage_cur + 1); + FLIP_BIT(out_buf, stage_cur + 2); + FLIP_BIT(out_buf, stage_cur + 3); - stage_cur_byte = stage_cur >> 3; + } - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + new_hit_cnt = queued_paths + unique_crashes; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP4] += stage_max; - FLIP_BIT(out_buf, stage_cur); - FLIP_BIT(out_buf, stage_cur + 1); - FLIP_BIT(out_buf, stage_cur + 2); - FLIP_BIT(out_buf, stage_cur + 3); + /* Effector map setup. These macros calculate: - } + EFF_APOS - position of a particular file offset in the map. + EFF_ALEN - length of a map with a particular number of bytes. + EFF_SPAN_ALEN - map span for a sequence of bytes. - new_hit_cnt = queued_paths + unique_crashes; + */ - stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP4] += stage_max; +#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) +#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) +#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) +#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l)-1) - EFF_APOS(_p) + 1) + /* Initialize effector map for the next step (see comments below). Always + flag first and last byte as doing something. */ - /* Effector map setup. These macros calculate: + eff_map = ck_alloc(EFF_ALEN(len)); + eff_map[0] = 1; - EFF_APOS - position of a particular file offset in the map. - EFF_ALEN - length of a map with a particular number of bytes. - EFF_SPAN_ALEN - map span for a sequence of bytes. + if (EFF_APOS(len - 1) != 0) { - */ + eff_map[EFF_APOS(len - 1)] = 1; + ++eff_cnt; -#define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) -#define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) -#define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) -#define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) + } - /* Initialize effector map for the next step (see comments below). Always - flag first and last byte as doing something. */ + /* Walking byte. */ - eff_map = ck_alloc(EFF_ALEN(len)); - eff_map[0] = 1; + stage_name = "bitflip 8/8"; + stage_short = "flip8"; + stage_max = len; - if (EFF_APOS(len - 1) != 0) { - eff_map[EFF_APOS(len - 1)] = 1; - ++eff_cnt; - } + orig_hit_cnt = new_hit_cnt; - /* Walking byte. */ + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - stage_name = "bitflip 8/8"; - stage_short = "flip8"; - stage_max = len; + stage_cur_byte = stage_cur; + out_buf[stage_cur] ^= 0xFF; - orig_hit_cnt = new_hit_cnt; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { + /* We also use this stage to pull off a simple trick: we identify + bytes that seem to have no effect on the current execution path + even when fully flipped - and we skip them during more expensive + deterministic stages, such as arithmetics or known ints. */ - stage_cur_byte = stage_cur; + if (!eff_map[EFF_APOS(stage_cur)]) { - out_buf[stage_cur] ^= 0xFF; + u32 cksum; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + /* If in dumb mode or if the file is very short, just flag everything + without wasting time on checksums. */ - /* We also use this stage to pull off a simple trick: we identify - bytes that seem to have no effect on the current execution path - even when fully flipped - and we skip them during more expensive - deterministic stages, such as arithmetics or known ints. */ + if (!dumb_mode && len >= EFF_MIN_LEN) + cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); + else + cksum = ~queue_cur->exec_cksum; - if (!eff_map[EFF_APOS(stage_cur)]) { + if (cksum != queue_cur->exec_cksum) { - u32 cksum; + eff_map[EFF_APOS(stage_cur)] = 1; + ++eff_cnt; - /* If in dumb mode or if the file is very short, just flag everything - without wasting time on checksums. */ + } - if (!dumb_mode && len >= EFF_MIN_LEN) - cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); - else - cksum = ~queue_cur->exec_cksum; + } - if (cksum != queue_cur->exec_cksum) { - eff_map[EFF_APOS(stage_cur)] = 1; - ++eff_cnt; - } + out_buf[stage_cur] ^= 0xFF; - } + } - out_buf[stage_cur] ^= 0xFF; + /* If the effector map is more than EFF_MAX_PERC dense, just flag the + whole thing as worth fuzzing, since we wouldn't be saving much time + anyway. */ - } + if (eff_cnt != EFF_ALEN(len) && + eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { - /* If the effector map is more than EFF_MAX_PERC dense, just flag the - whole thing as worth fuzzing, since we wouldn't be saving much time - anyway. */ + memset(eff_map, 1, EFF_ALEN(len)); - if (eff_cnt != EFF_ALEN(len) && - eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { + blocks_eff_select += EFF_ALEN(len); - memset(eff_map, 1, EFF_ALEN(len)); + } else { - blocks_eff_select += EFF_ALEN(len); + blocks_eff_select += eff_cnt; - } - else { + } - blocks_eff_select += eff_cnt; + blocks_eff_total += EFF_ALEN(len); - } + new_hit_cnt = queued_paths + unique_crashes; - blocks_eff_total += EFF_ALEN(len); + stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP8] += stage_max; - new_hit_cnt = queued_paths + unique_crashes; + /* Two walking bytes. */ - stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP8] += stage_max; + if (len < 2) goto skip_bitflip; + stage_name = "bitflip 16/8"; + stage_short = "flip16"; + stage_cur = 0; + stage_max = len - 1; + orig_hit_cnt = new_hit_cnt; - /* Two walking bytes. */ + for (i = 0; i < len - 1; ++i) { - if (len < 2) goto skip_bitflip; + /* Let's consult the effector map... */ - stage_name = "bitflip 16/8"; - stage_short = "flip16"; - stage_cur = 0; - stage_max = len - 1; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { + --stage_max; + continue; - orig_hit_cnt = new_hit_cnt; + } - for (i = 0; i < len - 1; ++i) { + stage_cur_byte = i; - /* Let's consult the effector map... */ + *(u16*)(out_buf + i) ^= 0xFFFF; - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - --stage_max; - continue; - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_cur_byte = i; + *(u16*)(out_buf + i) ^= 0xFFFF; - *(u16*)(out_buf + i) ^= 0xFFFF; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + new_hit_cnt = queued_paths + unique_crashes; - *(u16*)(out_buf + i) ^= 0xFFFF; + stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP16] += stage_max; + if (len < 4) goto skip_bitflip; - } + /* Four walking bytes. */ - new_hit_cnt = queued_paths + unique_crashes; + stage_name = "bitflip 32/8"; + stage_short = "flip32"; + stage_cur = 0; + stage_max = len - 3; - stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP16] += stage_max; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len - 3; ++i) { + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - if (len < 4) goto skip_bitflip; + --stage_max; + continue; - /* Four walking bytes. */ + } - stage_name = "bitflip 32/8"; - stage_short = "flip32"; - stage_cur = 0; - stage_max = len - 3; + stage_cur_byte = i; + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - orig_hit_cnt = new_hit_cnt; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - for (i = 0; i < len - 3; ++i) { + *(u32*)(out_buf + i) ^= 0xFFFFFFFF; - /* Let's consult the effector map... */ - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - --stage_max; - continue; - } + } - stage_cur_byte = i; + new_hit_cnt = queued_paths + unique_crashes; - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_FLIP32] += stage_max; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; +skip_bitflip: - *(u32*)(out_buf + i) ^= 0xFFFFFFFF; + if (no_arith) goto skip_arith; - } + /********************** + * ARITHMETIC INC/DEC * + **********************/ - new_hit_cnt = queued_paths + unique_crashes; + /* 8-bit arithmetics. */ - stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_FLIP32] += stage_max; + stage_name = "arith 8/8"; + stage_short = "arith8"; + stage_cur = 0; + stage_max = 2 * len * ARITH_MAX; + stage_val_type = STAGE_VAL_LE; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len; ++i) { - skip_bitflip: + u8 orig = out_buf[i]; - if (no_arith) goto skip_arith; + /* Let's consult the effector map... */ - /********************** - * ARITHMETIC INC/DEC * - **********************/ + if (!eff_map[EFF_APOS(i)]) { - /* 8-bit arithmetics. */ + stage_max -= 2 * ARITH_MAX; + continue; - stage_name = "arith 8/8"; - stage_short = "arith8"; - stage_cur = 0; - stage_max = 2 * len * ARITH_MAX; + } + stage_cur_byte = i; - stage_val_type = STAGE_VAL_LE; + for (j = 1; j <= ARITH_MAX; ++j) { - orig_hit_cnt = new_hit_cnt; + u8 r = orig ^ (orig + j); - for (i = 0; i < len; ++i) { + /* Do arithmetic operations only if the result couldn't be a product + of a bitflip. */ - u8 orig = out_buf[i]; + if (!could_be_bitflip(r)) { - /* Let's consult the effector map... */ + stage_cur_val = j; + out_buf[i] = orig + j; - if (!eff_map[EFF_APOS(i)]) { - stage_max -= 2 * ARITH_MAX; - continue; - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_cur_byte = i; + } else - for (j = 1; j <= ARITH_MAX; ++j) { + --stage_max; - u8 r = orig ^ (orig + j); + r = orig ^ (orig - j); - /* Do arithmetic operations only if the result couldn't be a product - of a bitflip. */ + if (!could_be_bitflip(r)) { - if (!could_be_bitflip(r)) { + stage_cur_val = -j; + out_buf[i] = orig - j; - stage_cur_val = j; - out_buf[i] = orig + j; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } else - } else --stage_max; + --stage_max; - r = orig ^ (orig - j); + out_buf[i] = orig; - if (!could_be_bitflip(r)) { + } - stage_cur_val = -j; - out_buf[i] = orig - j; + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + new_hit_cnt = queued_paths + unique_crashes; - } else --stage_max; + stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH8] += stage_max; - out_buf[i] = orig; + /* 16-bit arithmetics, both endians. */ - } + if (len < 2) goto skip_arith; - } + stage_name = "arith 16/8"; + stage_short = "arith16"; + stage_cur = 0; + stage_max = 4 * (len - 1) * ARITH_MAX; - new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt = new_hit_cnt; - stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH8] += stage_max; + for (i = 0; i < len - 1; ++i) { + u16 orig = *(u16*)(out_buf + i); + /* Let's consult the effector map... */ + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - /* 16-bit arithmetics, both endians. */ + stage_max -= 4 * ARITH_MAX; + continue; - if (len < 2) goto skip_arith; + } - stage_name = "arith 16/8"; - stage_short = "arith16"; - stage_cur = 0; - stage_max = 4 * (len - 1) * ARITH_MAX; + stage_cur_byte = i; + for (j = 1; j <= ARITH_MAX; ++j) { - orig_hit_cnt = new_hit_cnt; + u16 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP16(SWAP16(orig) + j), + r4 = orig ^ SWAP16(SWAP16(orig) - j); - for (i = 0; i < len - 1; ++i) { + /* Try little endian addition and subtraction first. Do it only + if the operation would affect more than one byte (hence the + & 0xff overflow checks) and if it couldn't be a product of + a bitflip. */ - u16 orig = *(u16*)(out_buf + i); + stage_val_type = STAGE_VAL_LE; - /* Let's consult the effector map... */ + if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + stage_cur_val = j; + *(u16*)(out_buf + i) = orig + j; - stage_cur_byte = i; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - for (j = 1; j <= ARITH_MAX; ++j) { + } else - u16 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP16(SWAP16(orig) + j), - r4 = orig ^ SWAP16(SWAP16(orig) - j); + --stage_max; - /* Try little endian addition and subtraction first. Do it only - if the operation would affect more than one byte (hence the - & 0xff overflow checks) and if it couldn't be a product of - a bitflip. */ + if ((orig & 0xff) < j && !could_be_bitflip(r2)) { - stage_val_type = STAGE_VAL_LE; + stage_cur_val = -j; + *(u16*)(out_buf + i) = orig - j; - if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_cur_val = j; - *(u16*)(out_buf + i) = orig + j; + } else - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + --stage_max; - } else --stage_max; + /* Big endian comes next. Same deal. */ - if ((orig & 0xff) < j && !could_be_bitflip(r2)) { + stage_val_type = STAGE_VAL_BE; - stage_cur_val = -j; - *(u16*)(out_buf + i) = orig - j; + if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_val = j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); - } else --stage_max; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - /* Big endian comes next. Same deal. */ + } else - stage_val_type = STAGE_VAL_BE; + --stage_max; + if ((orig >> 8) < j && !could_be_bitflip(r4)) { - if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { + stage_cur_val = -j; + *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); - stage_cur_val = j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } else - } else --stage_max; + --stage_max; - if ((orig >> 8) < j && !could_be_bitflip(r4)) { + *(u16*)(out_buf + i) = orig; - stage_cur_val = -j; - *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); + } - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } - } else --stage_max; + new_hit_cnt = queued_paths + unique_crashes; - *(u16*)(out_buf + i) = orig; + stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH16] += stage_max; - } + /* 32-bit arithmetics, both endians. */ - } + if (len < 4) goto skip_arith; - new_hit_cnt = queued_paths + unique_crashes; + stage_name = "arith 32/8"; + stage_short = "arith32"; + stage_cur = 0; + stage_max = 4 * (len - 3) * ARITH_MAX; - stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH16] += stage_max; + orig_hit_cnt = new_hit_cnt; + for (i = 0; i < len - 3; ++i) { + u32 orig = *(u32*)(out_buf + i); - /* 32-bit arithmetics, both endians. */ + /* Let's consult the effector map... */ - if (len < 4) goto skip_arith; + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_name = "arith 32/8"; - stage_short = "arith32"; - stage_cur = 0; - stage_max = 4 * (len - 3) * ARITH_MAX; + stage_max -= 4 * ARITH_MAX; + continue; - orig_hit_cnt = new_hit_cnt; + } - for (i = 0; i < len - 3; ++i) { + stage_cur_byte = i; - u32 orig = *(u32*)(out_buf + i); + for (j = 1; j <= ARITH_MAX; ++j) { - /* Let's consult the effector map... */ + u32 r1 = orig ^ (orig + j), r2 = orig ^ (orig - j), + r3 = orig ^ SWAP32(SWAP32(orig) + j), + r4 = orig ^ SWAP32(SWAP32(orig) - j); - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= 4 * ARITH_MAX; - continue; - } + /* Little endian first. Same deal as with 16-bit: we only want to + try if the operation would have effect on more than two bytes. */ - stage_cur_byte = i; + stage_val_type = STAGE_VAL_LE; - for (j = 1; j <= ARITH_MAX; ++j) { + if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { - u32 r1 = orig ^ (orig + j), - r2 = orig ^ (orig - j), - r3 = orig ^ SWAP32(SWAP32(orig) + j), - r4 = orig ^ SWAP32(SWAP32(orig) - j); + stage_cur_val = j; + *(u32*)(out_buf + i) = orig + j; - /* Little endian first. Same deal as with 16-bit: we only want to - try if the operation would have effect on more than two bytes. */ + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_val_type = STAGE_VAL_LE; + } else - if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { + --stage_max; - stage_cur_val = j; - *(u32*)(out_buf + i) = orig + j; + if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + stage_cur_val = -j; + *(u32*)(out_buf + i) = orig - j; - } else --stage_max; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { + } else - stage_cur_val = -j; - *(u32*)(out_buf + i) = orig - j; + --stage_max; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + /* Big endian next. */ - } else --stage_max; + stage_val_type = STAGE_VAL_BE; - /* Big endian next. */ + if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { - stage_val_type = STAGE_VAL_BE; + stage_cur_val = j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); - if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - stage_cur_val = j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); + } else - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + --stage_max; - } else --stage_max; + if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { - if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { + stage_cur_val = -j; + *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); - stage_cur_val = -j; - *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } else - } else --stage_max; + --stage_max; - *(u32*)(out_buf + i) = orig; + *(u32*)(out_buf + i) = orig; - } + } - } + } - new_hit_cnt = queued_paths + unique_crashes; + new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_ARITH32] += stage_max; + stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_ARITH32] += stage_max; +skip_arith: + /********************** + * INTERESTING VALUES * + **********************/ - skip_arith: + stage_name = "interest 8/8"; + stage_short = "int8"; + stage_cur = 0; + stage_max = len * sizeof(interesting_8); - /********************** - * INTERESTING VALUES * - **********************/ + stage_val_type = STAGE_VAL_LE; - stage_name = "interest 8/8"; - stage_short = "int8"; - stage_cur = 0; - stage_max = len * sizeof(interesting_8); + orig_hit_cnt = new_hit_cnt; + /* Setting 8-bit integers. */ + for (i = 0; i < len; ++i) { - stage_val_type = STAGE_VAL_LE; + u8 orig = out_buf[i]; - orig_hit_cnt = new_hit_cnt; + /* Let's consult the effector map... */ - /* Setting 8-bit integers. */ + if (!eff_map[EFF_APOS(i)]) { - for (i = 0; i < len; ++i) { + stage_max -= sizeof(interesting_8); + continue; - u8 orig = out_buf[i]; + } - /* Let's consult the effector map... */ + stage_cur_byte = i; - if (!eff_map[EFF_APOS(i)]) { - stage_max -= sizeof(interesting_8); - continue; - } + for (j = 0; j < sizeof(interesting_8); ++j) { - stage_cur_byte = i; + /* Skip if the value could be a product of bitflips or arithmetics. */ - for (j = 0; j < sizeof(interesting_8); ++j) { + if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || + could_be_arith(orig, (u8)interesting_8[j], 1)) { - /* Skip if the value could be a product of bitflips or arithmetics. */ + --stage_max; + continue; - if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || - could_be_arith(orig, (u8)interesting_8[j], 1)) { - --stage_max; - continue; - } + } - stage_cur_val = interesting_8[j]; - out_buf[i] = interesting_8[j]; + stage_cur_val = interesting_8[j]; + out_buf[i] = interesting_8[j]; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - out_buf[i] = orig; - ++stage_cur; + out_buf[i] = orig; + ++stage_cur; - } + } - } + } - new_hit_cnt = queued_paths + unique_crashes; + new_hit_cnt = queued_paths + unique_crashes; - stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST8] += stage_max; + stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST8] += stage_max; + /* Setting 16-bit integers, both endians. */ + if (no_arith || len < 2) goto skip_interest; - /* Setting 16-bit integers, both endians. */ + stage_name = "interest 16/8"; + stage_short = "int16"; + stage_cur = 0; + stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); - if (no_arith || len < 2) goto skip_interest; + orig_hit_cnt = new_hit_cnt; - stage_name = "interest 16/8"; - stage_short = "int16"; - stage_cur = 0; - stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); + for (i = 0; i < len - 1; ++i) { + u16 orig = *(u16*)(out_buf + i); - orig_hit_cnt = new_hit_cnt; + /* Let's consult the effector map... */ - for (i = 0; i < len - 1; ++i) { + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - u16 orig = *(u16*)(out_buf + i); + stage_max -= sizeof(interesting_16); + continue; - /* Let's consult the effector map... */ + } - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { - stage_max -= sizeof(interesting_16); - continue; - } + stage_cur_byte = i; - stage_cur_byte = i; + for (j = 0; j < sizeof(interesting_16) / 2; ++j) { - for (j = 0; j < sizeof(interesting_16) / 2; ++j) { + stage_cur_val = interesting_16[j]; - stage_cur_val = interesting_16[j]; + /* Skip if this could be a product of a bitflip, arithmetics, + or single-byte interesting value insertion. */ - /* Skip if this could be a product of a bitflip, arithmetics, - or single-byte interesting value insertion. */ + if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && + !could_be_arith(orig, (u16)interesting_16[j], 2) && + !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { - if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && - !could_be_arith(orig, (u16)interesting_16[j], 2) && - !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { + stage_val_type = STAGE_VAL_LE; - stage_val_type = STAGE_VAL_LE; + *(u16*)(out_buf + i) = interesting_16[j]; - *(u16*)(out_buf + i) = interesting_16[j]; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } else - } else --stage_max; + --stage_max; - if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && - !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && - !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && - !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { + if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && + !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && + !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && + !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { - stage_val_type = STAGE_VAL_BE; + stage_val_type = STAGE_VAL_BE; - *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - } + --stage_max; - *(u16*)(out_buf + i) = orig; + } - } + *(u16*)(out_buf + i) = orig; - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST16] += stage_max; + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST16] += stage_max; + if (len < 4) goto skip_interest; + /* Setting 32-bit integers, both endians. */ - if (len < 4) goto skip_interest; + stage_name = "interest 32/8"; + stage_short = "int32"; + stage_cur = 0; + stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); - /* Setting 32-bit integers, both endians. */ + orig_hit_cnt = new_hit_cnt; - stage_name = "interest 32/8"; - stage_short = "int32"; - stage_cur = 0; - stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); + for (i = 0; i < len - 3; ++i) { + u32 orig = *(u32*)(out_buf + i); - orig_hit_cnt = new_hit_cnt; + /* Let's consult the effector map... */ - for (i = 0; i < len - 3; ++i) { + if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && + !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - u32 orig = *(u32*)(out_buf + i); + stage_max -= sizeof(interesting_32) >> 1; + continue; - /* Let's consult the effector map... */ + } - if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && - !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { - stage_max -= sizeof(interesting_32) >> 1; - continue; - } + stage_cur_byte = i; - stage_cur_byte = i; + for (j = 0; j < sizeof(interesting_32) / 4; ++j) { - for (j = 0; j < sizeof(interesting_32) / 4; ++j) { + stage_cur_val = interesting_32[j]; - stage_cur_val = interesting_32[j]; + /* Skip if this could be a product of a bitflip, arithmetics, + or word interesting value insertion. */ - /* Skip if this could be a product of a bitflip, arithmetics, - or word interesting value insertion. */ + if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && + !could_be_arith(orig, interesting_32[j], 4) && + !could_be_interest(orig, interesting_32[j], 4, 0)) { - if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && - !could_be_arith(orig, interesting_32[j], 4) && - !could_be_interest(orig, interesting_32[j], 4, 0)) { + stage_val_type = STAGE_VAL_LE; - stage_val_type = STAGE_VAL_LE; + *(u32*)(out_buf + i) = interesting_32[j]; - *(u32*)(out_buf + i) = interesting_32[j]; + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + } else - } else --stage_max; + --stage_max; - if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && - !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && - !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && - !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { + if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && + !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && + !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && + !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { - stage_val_type = STAGE_VAL_BE; + stage_val_type = STAGE_VAL_BE; - *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - ++stage_cur; + *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + ++stage_cur; - } else --stage_max; + } else - } + --stage_max; - *(u32*)(out_buf + i) = orig; + } - } + *(u32*)(out_buf + i) = orig; - new_hit_cnt = queued_paths + unique_crashes; + } - stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_INTEREST32] += stage_max; + new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_INTEREST32] += stage_max; +skip_interest: - skip_interest: + /******************** + * DICTIONARY STUFF * + ********************/ - /******************** - * DICTIONARY STUFF * - ********************/ + if (!extras_cnt) goto skip_user_extras; - if (!extras_cnt) goto skip_user_extras; + /* Overwrite with user-supplied extras. */ - /* Overwrite with user-supplied extras. */ + stage_name = "user extras (over)"; + stage_short = "ext_UO"; + stage_cur = 0; + stage_max = extras_cnt * len; - stage_name = "user extras (over)"; - stage_short = "ext_UO"; - stage_cur = 0; - stage_max = extras_cnt * len; + stage_val_type = STAGE_VAL_NONE; + orig_hit_cnt = new_hit_cnt; - stage_val_type = STAGE_VAL_NONE; + for (i = 0; i < len; ++i) { - orig_hit_cnt = new_hit_cnt; + u32 last_len = 0; - for (i = 0; i < len; ++i) { + stage_cur_byte = i; - u32 last_len = 0; + /* Extras are sorted by size, from smallest to largest. This means + that we don't have to worry about restoring the buffer in + between writes at a particular offset determined by the outer + loop. */ - stage_cur_byte = i; + for (j = 0; j < extras_cnt; ++j) { - /* Extras are sorted by size, from smallest to largest. This means - that we don't have to worry about restoring the buffer in - between writes at a particular offset determined by the outer - loop. */ + /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also + skip them if there's no room to insert the payload, if the token + is redundant, or if its entire span has no bytes set in the effector + map. */ - for (j = 0; j < extras_cnt; ++j) { + if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || + extras[j].len > len - i || + !memcmp(extras[j].data, out_buf + i, extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { - /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also - skip them if there's no room to insert the payload, if the token - is redundant, or if its entire span has no bytes set in the effector - map. */ + --stage_max; + continue; - if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || - extras[j].len > len - i || - !memcmp(extras[j].data, out_buf + i, extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { + } - --stage_max; - continue; + last_len = extras[j].len; + memcpy(out_buf + i, extras[j].data, last_len); - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - last_len = extras[j].len; - memcpy(out_buf + i, extras[j].data, last_len); + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + } - ++stage_cur; + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); - } + } - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + new_hit_cnt = queued_paths + unique_crashes; - } + stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UO] += stage_max; - new_hit_cnt = queued_paths + unique_crashes; + /* Insertion of user-supplied extras. */ - stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UO] += stage_max; + stage_name = "user extras (insert)"; + stage_short = "ext_UI"; + stage_cur = 0; + stage_max = extras_cnt * len; - /* Insertion of user-supplied extras. */ + orig_hit_cnt = new_hit_cnt; - stage_name = "user extras (insert)"; - stage_short = "ext_UI"; - stage_cur = 0; - stage_max = extras_cnt * len; + ex_tmp = ck_alloc(len + MAX_DICT_FILE); + for (i = 0; i <= len; ++i) { + stage_cur_byte = i; + for (j = 0; j < extras_cnt; ++j) { - orig_hit_cnt = new_hit_cnt; + if (len + extras[j].len > MAX_FILE) { - ex_tmp = ck_alloc(len + MAX_DICT_FILE); + --stage_max; + continue; - for (i = 0; i <= len; ++i) { + } - stage_cur_byte = i; + /* Insert token */ + memcpy(ex_tmp + i, extras[j].data, extras[j].len); - for (j = 0; j < extras_cnt; ++j) { + /* Copy tail */ + memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); - if (len + extras[j].len > MAX_FILE) { - --stage_max; - continue; - } + if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - /* Insert token */ - memcpy(ex_tmp + i, extras[j].data, extras[j].len); + ck_free(ex_tmp); + goto abandon_entry; - /* Copy tail */ - memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); + } - if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { - ck_free(ex_tmp); - goto abandon_entry; - } + ++stage_cur; - ++stage_cur; + } - } + /* Copy head */ + ex_tmp[i] = out_buf[i]; - /* Copy head */ - ex_tmp[i] = out_buf[i]; + } - } + ck_free(ex_tmp); - ck_free(ex_tmp); + new_hit_cnt = queued_paths + unique_crashes; - new_hit_cnt = queued_paths + unique_crashes; + stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_UI] += stage_max; - stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_UI] += stage_max; +skip_user_extras: - skip_user_extras: + if (!a_extras_cnt) goto skip_extras; - if (!a_extras_cnt) goto skip_extras; + stage_name = "auto extras (over)"; + stage_short = "ext_AO"; + stage_cur = 0; + stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; - stage_name = "auto extras (over)"; - stage_short = "ext_AO"; - stage_cur = 0; - stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; + stage_val_type = STAGE_VAL_NONE; + orig_hit_cnt = new_hit_cnt; - stage_val_type = STAGE_VAL_NONE; + for (i = 0; i < len; ++i) { - orig_hit_cnt = new_hit_cnt; + u32 last_len = 0; - for (i = 0; i < len; ++i) { + stage_cur_byte = i; - u32 last_len = 0; + for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { - stage_cur_byte = i; + /* See the comment in the earlier code; extras are sorted by size. */ - for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); ++j) { + if (a_extras[j].len > len - i || + !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || + !memchr(eff_map + EFF_APOS(i), 1, + EFF_SPAN_ALEN(i, a_extras[j].len))) { - /* See the comment in the earlier code; extras are sorted by size. */ + --stage_max; + continue; - if (a_extras[j].len > len - i || - !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || - !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len))) { + } - --stage_max; - continue; + last_len = a_extras[j].len; + memcpy(out_buf + i, a_extras[j].data, last_len); - } + if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; - last_len = a_extras[j].len; - memcpy(out_buf + i, a_extras[j].data, last_len); + ++stage_cur; - if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; + } - ++stage_cur; + /* Restore all the clobbered memory. */ + memcpy(out_buf + i, in_buf + i, last_len); - } + } - /* Restore all the clobbered memory. */ - memcpy(out_buf + i, in_buf + i, last_len); + new_hit_cnt = queued_paths + unique_crashes; - } + stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; + stage_cycles[STAGE_EXTRAS_AO] += stage_max; - new_hit_cnt = queued_paths + unique_crashes; +skip_extras: - stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; - stage_cycles[STAGE_EXTRAS_AO] += stage_max; + /* If we made this to here without jumping to havoc_stage or abandon_entry, + we're properly done with deterministic steps and can mark it as such + in the .state/ directory. */ - skip_extras: + if (!queue_cur->passed_det) mark_as_det_done(queue_cur); - /* If we made this to here without jumping to havoc_stage or abandon_entry, - we're properly done with deterministic steps and can mark it as such - in the .state/ directory. */ + /**************** + * RANDOM HAVOC * + ****************/ - if (!queue_cur->passed_det) mark_as_det_done(queue_cur); +havoc_stage: +pacemaker_fuzzing: - /**************** - * RANDOM HAVOC * - ****************/ + stage_cur_byte = -1; - havoc_stage: - pacemaker_fuzzing: + /* The havoc stage mutation code is also invoked when splicing files; if the + splice_cycle variable is set, generate different descriptions and such. */ + if (!splice_cycle) { - stage_cur_byte = -1; + stage_name = "MOpt-havoc"; + stage_short = "MOpt_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * perf_score / + havoc_div / 100; - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ + } else { - if (!splice_cycle) { + static u8 tmp[32]; - stage_name = "MOpt-havoc"; - stage_short = "MOpt_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; + perf_score = orig_perf; - } else { + sprintf(tmp, "MOpt-core-splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_core_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - static u8 tmp[32]; + } - perf_score = orig_perf; + s32 temp_len_puppet; + cur_ms_lv = get_cur_time(); - sprintf(tmp, "MOpt-core-splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_core_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; + // for (; swarm_now < swarm_num; ++swarm_now) + { - } + if (key_puppet == 1) { - s32 temp_len_puppet; - cur_ms_lv = get_cur_time(); + if (unlikely(orig_hit_cnt_puppet == 0)) { + + orig_hit_cnt_puppet = queued_paths + unique_crashes; + last_limit_time_start = get_cur_time(); + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); + + } + + } + + { - //for (; swarm_now < swarm_num; ++swarm_now) - { - if (key_puppet == 1) { - if (unlikely(orig_hit_cnt_puppet == 0)) { - orig_hit_cnt_puppet = queued_paths + unique_crashes; - last_limit_time_start = get_cur_time(); - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); - } - } - { #ifndef IGNORE_FINDS - havoc_stage_puppet: + havoc_stage_puppet: #endif - stage_cur_byte = -1; - - /* The havoc stage mutation code is also invoked when splicing files; if the - splice_cycle variable is set, generate different descriptions and such. */ - - if (!splice_cycle) { - stage_name = "MOpt core avoc"; - stage_short = "MOpt_core_havoc"; - stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * - perf_score / havoc_div / 100; - } else { - static u8 tmp[32]; - perf_score = orig_perf; - sprintf(tmp, "MOpt core splice %u", splice_cycle); - stage_name = tmp; - stage_short = "MOpt_core_splice"; - stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - } - - if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; - temp_len = len; - orig_hit_cnt = queued_paths + unique_crashes; - havoc_queued = queued_paths; - - for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - - u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); - stage_cur_val = use_stacking; - - for (i = 0; i < operator_num; ++i) { - core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i]; - } - - for (i = 0; i < use_stacking; ++i) { - - switch (select_algorithm()) { - - case 0: - /* Flip a single bit somewhere. Spooky! */ - FLIP_BIT(out_buf, UR(temp_len << 3)); - core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1; - break; - - - case 1: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1; - break; - - case 2: - if (temp_len < 2) break; - temp_len_puppet = UR(temp_len << 3); - FLIP_BIT(out_buf, temp_len_puppet); - FLIP_BIT(out_buf, temp_len_puppet + 1); - FLIP_BIT(out_buf, temp_len_puppet + 2); - FLIP_BIT(out_buf, temp_len_puppet + 3); - core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1; - break; - - case 3: - if (temp_len < 4) break; - out_buf[UR(temp_len)] ^= 0xFF; - core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1; - break; - - case 4: - if (temp_len < 8) break; - *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; - core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1; - break; - - case 5: - if (temp_len < 8) break; - *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; - core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1; - break; - - case 6: - out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); - out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); - core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1; - break; - - case 7: - /* Randomly subtract from word, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } - /* Randomly add to word, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 1); - *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 1); - u16 num = 1 + UR(ARITH_MAX); - *(u16*)(out_buf + pos) = - SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - } - core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1; - break; - - - case 8: - /* Randomly subtract from dword, random endian. */ - if (temp_len < 8) break; - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - } - /* Randomly add to dword, random endian. */ - if (UR(2)) { - u32 pos = UR(temp_len - 3); - *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - } else { - u32 pos = UR(temp_len - 3); - u32 num = 1 + UR(ARITH_MAX); - *(u32*)(out_buf + pos) = - SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - } - core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1; - break; - - - case 9: - /* Set byte to interesting value. */ - if (temp_len < 4) break; - out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; - core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1; - break; - - case 10: - /* Set word to interesting value, randomly choosing endian. */ - if (temp_len < 8) break; - if (UR(2)) { - *(u16*)(out_buf + UR(temp_len - 1)) = - interesting_16[UR(sizeof(interesting_16) >> 1)]; - } else { - *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( - interesting_16[UR(sizeof(interesting_16) >> 1)]); - } - core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1; - break; - - - case 11: - /* Set dword to interesting value, randomly choosing endian. */ - - if (temp_len < 8) break; - - if (UR(2)) { - *(u32*)(out_buf + UR(temp_len - 3)) = - interesting_32[UR(sizeof(interesting_32) >> 2)]; - } else { - *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( - interesting_32[UR(sizeof(interesting_32) >> 2)]); - } - core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1; - break; + stage_cur_byte = -1; + /* The havoc stage mutation code is also invoked when splicing files; if + the splice_cycle variable is set, generate different descriptions and + such. */ + + if (!splice_cycle) { + + stage_name = "MOpt core avoc"; + stage_short = "MOpt_core_havoc"; + stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + perf_score / havoc_div / 100; - case 12: + } else { - /* Just set a random byte to a random value. Because, - why not. We use XOR with 1-255 to eliminate the - possibility of a no-op. */ + static u8 tmp[32]; + perf_score = orig_perf; + sprintf(tmp, "MOpt core splice %u", splice_cycle); + stage_name = tmp; + stage_short = "MOpt_core_splice"; + stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; - out_buf[UR(temp_len)] ^= 1 + UR(255); - core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1; - break; + } + if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; + temp_len = len; + orig_hit_cnt = queued_paths + unique_crashes; + havoc_queued = queued_paths; - case 13: { + for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) { - /* Delete bytes. We're making this a bit more likely - than insertion (the next option) in hopes of keeping - files reasonably small. */ + u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); + stage_cur_val = use_stacking; - u32 del_from, del_len; + for (i = 0; i < operator_num; ++i) { - if (temp_len < 2) break; + core_operator_cycles_puppet_v3[i] = core_operator_cycles_puppet_v2[i]; - /* Don't delete too much. */ + } - del_len = choose_block_len(temp_len - 1); + for (i = 0; i < use_stacking; ++i) { - del_from = UR(temp_len - del_len + 1); + switch (select_algorithm()) { - memmove(out_buf + del_from, out_buf + del_from + del_len, - temp_len - del_from - del_len); + case 0: + /* Flip a single bit somewhere. Spooky! */ + FLIP_BIT(out_buf, UR(temp_len << 3)); + core_operator_cycles_puppet_v2[STAGE_FLIP1] += 1; + break; - temp_len -= del_len; - core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1; - break; + case 1: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + core_operator_cycles_puppet_v2[STAGE_FLIP2] += 1; + break; - } + case 2: + if (temp_len < 2) break; + temp_len_puppet = UR(temp_len << 3); + FLIP_BIT(out_buf, temp_len_puppet); + FLIP_BIT(out_buf, temp_len_puppet + 1); + FLIP_BIT(out_buf, temp_len_puppet + 2); + FLIP_BIT(out_buf, temp_len_puppet + 3); + core_operator_cycles_puppet_v2[STAGE_FLIP4] += 1; + break; - case 14: + case 3: + if (temp_len < 4) break; + out_buf[UR(temp_len)] ^= 0xFF; + core_operator_cycles_puppet_v2[STAGE_FLIP8] += 1; + break; - if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + case 4: + if (temp_len < 8) break; + *(u16*)(out_buf + UR(temp_len - 1)) ^= 0xFFFF; + core_operator_cycles_puppet_v2[STAGE_FLIP16] += 1; + break; - /* Clone bytes (75%) or insert a block of constant bytes (25%). */ + case 5: + if (temp_len < 8) break; + *(u32*)(out_buf + UR(temp_len - 3)) ^= 0xFFFFFFFF; + core_operator_cycles_puppet_v2[STAGE_FLIP32] += 1; + break; - u8 actually_clone = UR(4); - u32 clone_from, clone_to, clone_len; - u8* new_buf; + case 6: + out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); + out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); + core_operator_cycles_puppet_v2[STAGE_ARITH8] += 1; + break; - if (actually_clone) { + case 7: + /* Randomly subtract from word, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { - clone_len = choose_block_len(temp_len); - clone_from = UR(temp_len - clone_len + 1); + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - } else { + } else { - clone_len = choose_block_len(HAVOC_BLK_XL); - clone_from = 0; + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); - } + } - clone_to = UR(temp_len); + /* Randomly add to word, random endian. */ + if (UR(2)) { - new_buf = ck_alloc_nozero(temp_len + clone_len); + u32 pos = UR(temp_len - 1); + *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); - /* Head */ + } else { - memcpy(new_buf, out_buf, clone_to); + u32 pos = UR(temp_len - 1); + u16 num = 1 + UR(ARITH_MAX); + *(u16*)(out_buf + pos) = + SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); - /* Inserted part */ + } - if (actually_clone) - memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); - else - memset(new_buf + clone_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + core_operator_cycles_puppet_v2[STAGE_ARITH16] += 1; + break; - /* Tail */ - memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, - temp_len - clone_to); + case 8: + /* Randomly subtract from dword, random endian. */ + if (temp_len < 8) break; + if (UR(2)) { - ck_free(out_buf); - out_buf = new_buf; - temp_len += clone_len; - core_operator_cycles_puppet_v2[STAGE_Clone75] += 1; - } + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); - break; + } else { - case 15: { + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); - /* Overwrite bytes with a randomly selected chunk (75%) or fixed - bytes (25%). */ + } - u32 copy_from, copy_to, copy_len; + /* Randomly add to dword, random endian. */ + if (UR(2)) { - if (temp_len < 2) break; + u32 pos = UR(temp_len - 3); + *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); - copy_len = choose_block_len(temp_len - 1); + } else { - copy_from = UR(temp_len - copy_len + 1); - copy_to = UR(temp_len - copy_len + 1); + u32 pos = UR(temp_len - 3); + u32 num = 1 + UR(ARITH_MAX); + *(u32*)(out_buf + pos) = + SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); - if (UR(4)) { + } - if (copy_from != copy_to) - memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + core_operator_cycles_puppet_v2[STAGE_ARITH32] += 1; + break; - } - else memset(out_buf + copy_to, - UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); - core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1; - break; + case 9: + /* Set byte to interesting value. */ + if (temp_len < 4) break; + out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; + core_operator_cycles_puppet_v2[STAGE_INTEREST8] += 1; + break; - } + case 10: + /* Set word to interesting value, randomly choosing endian. */ + if (temp_len < 8) break; + if (UR(2)) { + *(u16*)(out_buf + UR(temp_len - 1)) = + interesting_16[UR(sizeof(interesting_16) >> 1)]; - } + } else { - } + *(u16*)(out_buf + UR(temp_len - 1)) = + SWAP16(interesting_16[UR(sizeof(interesting_16) >> 1)]); - tmp_core_time += 1; + } - u64 temp_total_found = queued_paths + unique_crashes; + core_operator_cycles_puppet_v2[STAGE_INTEREST16] += 1; + break; - if (common_fuzz_stuff(argv, out_buf, temp_len)) - goto abandon_entry_puppet; + case 11: + /* Set dword to interesting value, randomly choosing endian. */ - /* out_buf might have been mangled a bit, so let's restore it to its - original size and shape. */ + if (temp_len < 8) break; - if (temp_len < len) out_buf = ck_realloc(out_buf, len); - temp_len = len; - memcpy(out_buf, in_buf, len); + if (UR(2)) { - /* If we're finding new stuff, let's run for a bit longer, limits - permitting. */ + *(u32*)(out_buf + UR(temp_len - 3)) = + interesting_32[UR(sizeof(interesting_32) >> 2)]; - if (queued_paths != havoc_queued) { + } else { - if (perf_score <= havoc_max_mult * 100) { - stage_max *= 2; - perf_score *= 2; - } + *(u32*)(out_buf + UR(temp_len - 3)) = + SWAP32(interesting_32[UR(sizeof(interesting_32) >> 2)]); - havoc_queued = queued_paths; + } - } + core_operator_cycles_puppet_v2[STAGE_INTEREST32] += 1; + break; - if (unlikely(queued_paths + unique_crashes > temp_total_found)) - { - u64 temp_temp_puppet = queued_paths + unique_crashes - temp_total_found; - total_puppet_find = total_puppet_find + temp_temp_puppet; - for (i = 0; i < 16; ++i) - { - if (core_operator_cycles_puppet_v2[i] > core_operator_cycles_puppet_v3[i]) - core_operator_finds_puppet_v2[i] += temp_temp_puppet; - } - } + case 12: - } + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ - new_hit_cnt = queued_paths + unique_crashes; + out_buf[UR(temp_len)] ^= 1 + UR(255); + core_operator_cycles_puppet_v2[STAGE_RANDOMBYTE] += 1; + break; + case 13: { + + /* Delete bytes. We're making this a bit more likely + than insertion (the next option) in hopes of keeping + files reasonably small. */ + + u32 del_from, del_len; + + if (temp_len < 2) break; + + /* Don't delete too much. */ + + del_len = choose_block_len(temp_len - 1); + + del_from = UR(temp_len - del_len + 1); + + memmove(out_buf + del_from, out_buf + del_from + del_len, + temp_len - del_from - del_len); + + temp_len -= del_len; + core_operator_cycles_puppet_v2[STAGE_DELETEBYTE] += 1; + break; + + } + + case 14: + + if (temp_len + HAVOC_BLK_XL < MAX_FILE) { + + /* Clone bytes (75%) or insert a block of constant bytes (25%). + */ + + u8 actually_clone = UR(4); + u32 clone_from, clone_to, clone_len; + u8* new_buf; + + if (actually_clone) { + + clone_len = choose_block_len(temp_len); + clone_from = UR(temp_len - clone_len + 1); + + } else { + + clone_len = choose_block_len(HAVOC_BLK_XL); + clone_from = 0; + + } + + clone_to = UR(temp_len); + + new_buf = ck_alloc_nozero(temp_len + clone_len); + + /* Head */ + + memcpy(new_buf, out_buf, clone_to); + + /* Inserted part */ + + if (actually_clone) + memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); + else + memset(new_buf + clone_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], clone_len); + + /* Tail */ + memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, + temp_len - clone_to); + + ck_free(out_buf); + out_buf = new_buf; + temp_len += clone_len; + core_operator_cycles_puppet_v2[STAGE_Clone75] += 1; + + } + + break; + + case 15: { + + /* Overwrite bytes with a randomly selected chunk (75%) or fixed + bytes (25%). */ + + u32 copy_from, copy_to, copy_len; + + if (temp_len < 2) break; + + copy_len = choose_block_len(temp_len - 1); + + copy_from = UR(temp_len - copy_len + 1); + copy_to = UR(temp_len - copy_len + 1); + + if (UR(4)) { + + if (copy_from != copy_to) + memmove(out_buf + copy_to, out_buf + copy_from, copy_len); + + } else + + memset(out_buf + copy_to, + UR(2) ? UR(256) : out_buf[UR(temp_len)], copy_len); + core_operator_cycles_puppet_v2[STAGE_OverWrite75] += 1; + break; + + } + + } + + } + + tmp_core_time += 1; + + u64 temp_total_found = queued_paths + unique_crashes; + + if (common_fuzz_stuff(argv, out_buf, temp_len)) + goto abandon_entry_puppet; + + /* out_buf might have been mangled a bit, so let's restore it to its + original size and shape. */ + + if (temp_len < len) out_buf = ck_realloc(out_buf, len); + temp_len = len; + memcpy(out_buf, in_buf, len); + + /* If we're finding new stuff, let's run for a bit longer, limits + permitting. */ + + if (queued_paths != havoc_queued) { + + if (perf_score <= havoc_max_mult * 100) { + + stage_max *= 2; + perf_score *= 2; + + } + + havoc_queued = queued_paths; + + } + + if (unlikely(queued_paths + unique_crashes > temp_total_found)) { + + u64 temp_temp_puppet = + queued_paths + unique_crashes - temp_total_found; + total_puppet_find = total_puppet_find + temp_temp_puppet; + for (i = 0; i < 16; ++i) { + + if (core_operator_cycles_puppet_v2[i] > + core_operator_cycles_puppet_v3[i]) + core_operator_finds_puppet_v2[i] += temp_temp_puppet; + + } + + } + + } + + new_hit_cnt = queued_paths + unique_crashes; #ifndef IGNORE_FINDS - /************ - * SPLICING * - ************/ + /************ + * SPLICING * + ************/ + retry_splicing_puppet: - retry_splicing_puppet: + if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && + queued_paths > 1 && queue_cur->len > 1) { + struct queue_entry* target; + u32 tid, split_at; + u8* new_buf; + s32 f_diff, l_diff; + /* First of all, if we've modified in_buf for havoc, let's clean that + up... */ - if (use_splicing && splice_cycle++ < SPLICE_CYCLES_puppet && - queued_paths > 1 && queue_cur->len > 1) { + if (in_buf != orig_in) { - struct queue_entry* target; - u32 tid, split_at; - u8* new_buf; - s32 f_diff, l_diff; + ck_free(in_buf); + in_buf = orig_in; + len = queue_cur->len; - /* First of all, if we've modified in_buf for havoc, let's clean that - up... */ + } - if (in_buf != orig_in) { - ck_free(in_buf); - in_buf = orig_in; - len = queue_cur->len; - } + /* Pick a random queue entry and seek to it. Don't splice with yourself. + */ - /* Pick a random queue entry and seek to it. Don't splice with yourself. */ + do { - do { tid = UR(queued_paths); } while (tid == current_entry); + tid = UR(queued_paths); - splicing_with = tid; - target = queue; + } while (tid == current_entry); - while (tid >= 100) { target = target->next_100; tid -= 100; } - while (tid--) target = target->next; + splicing_with = tid; + target = queue; - /* Make sure that the target has a reasonable length. */ + while (tid >= 100) { - while (target && (target->len < 2 || target == queue_cur)) { - target = target->next; - ++splicing_with; - } + target = target->next_100; + tid -= 100; - if (!target) goto retry_splicing_puppet; + } - /* Read the testcase into a new buffer. */ + while (tid--) + target = target->next; - fd = open(target->fname, O_RDONLY); + /* Make sure that the target has a reasonable length. */ - if (fd < 0) PFATAL("Unable to open '%s'", target->fname); + while (target && (target->len < 2 || target == queue_cur)) { - new_buf = ck_alloc_nozero(target->len); + target = target->next; + ++splicing_with; - ck_read(fd, new_buf, target->len, target->fname); + } - close(fd); + if (!target) goto retry_splicing_puppet; - /* Find a suitable splicin g location, somewhere between the first and - the last differing byte. Bail out if the difference is just a single - byte or so. */ + /* Read the testcase into a new buffer. */ - locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); + fd = open(target->fname, O_RDONLY); - if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { - ck_free(new_buf); - goto retry_splicing_puppet; - } + if (fd < 0) PFATAL("Unable to open '%s'", target->fname); - /* Split somewhere between the first and last differing byte. */ + new_buf = ck_alloc_nozero(target->len); - split_at = f_diff + UR(l_diff - f_diff); + ck_read(fd, new_buf, target->len, target->fname); - /* Do the thing. */ + close(fd); - len = target->len; - memcpy(new_buf, in_buf, split_at); - in_buf = new_buf; - ck_free(out_buf); - out_buf = ck_alloc_nozero(len); - memcpy(out_buf, in_buf, len); + /* Find a suitable splicin g location, somewhere between the first and + the last differing byte. Bail out if the difference is just a single + byte or so. */ - goto havoc_stage_puppet; + locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); - } + if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { + + ck_free(new_buf); + goto retry_splicing_puppet; + + } + + /* Split somewhere between the first and last differing byte. */ + + split_at = f_diff + UR(l_diff - f_diff); + + /* Do the thing. */ + + len = target->len; + memcpy(new_buf, in_buf, split_at); + in_buf = new_buf; + ck_free(out_buf); + out_buf = ck_alloc_nozero(len); + memcpy(out_buf, in_buf, len); + + goto havoc_stage_puppet; + + } #endif /* !IGNORE_FINDS */ - ret_val = 0; - abandon_entry: - abandon_entry_puppet: + ret_val = 0; + abandon_entry: + abandon_entry_puppet: - if (splice_cycle >= SPLICE_CYCLES_puppet) - SPLICE_CYCLES_puppet = (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + SPLICE_CYCLES_puppet_low); + if (splice_cycle >= SPLICE_CYCLES_puppet) + SPLICE_CYCLES_puppet = + (UR(SPLICE_CYCLES_puppet_up - SPLICE_CYCLES_puppet_low + 1) + + SPLICE_CYCLES_puppet_low); + splicing_with = -1; - splicing_with = -1; + munmap(orig_in, queue_cur->len); + if (in_buf != orig_in) ck_free(in_buf); + ck_free(out_buf); + ck_free(eff_map); - munmap(orig_in, queue_cur->len); + if (key_puppet == 1) { - if (in_buf != orig_in) ck_free(in_buf); - ck_free(out_buf); - ck_free(eff_map); + if (unlikely(queued_paths + unique_crashes > + ((queued_paths + unique_crashes) * limit_time_bound + + orig_hit_cnt_puppet))) { + key_puppet = 0; + cur_ms_lv = get_cur_time(); + new_hit_cnt = queued_paths + unique_crashes; + orig_hit_cnt_puppet = 0; + last_limit_time_start = 0; - if (key_puppet == 1) - { - if (unlikely(queued_paths + unique_crashes > ((queued_paths + unique_crashes)*limit_time_bound + orig_hit_cnt_puppet))) - { - key_puppet = 0; - cur_ms_lv = get_cur_time(); - new_hit_cnt = queued_paths + unique_crashes; - orig_hit_cnt_puppet = 0; - last_limit_time_start = 0; - } - } + } + } - if (unlikely(tmp_core_time > period_core)) - { - total_pacemaker_time += tmp_core_time; - tmp_core_time = 0; - temp_puppet_find = total_puppet_find; - new_hit_cnt = queued_paths + unique_crashes; + if (unlikely(tmp_core_time > period_core)) { - u64 temp_stage_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) - { + total_pacemaker_time += tmp_core_time; + tmp_core_time = 0; + temp_puppet_find = total_puppet_find; + new_hit_cnt = queued_paths + unique_crashes; - core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i]; - core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i]; - temp_stage_finds_puppet += core_operator_finds_puppet[i]; - } + u64 temp_stage_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { - key_module = 2; + core_operator_finds_puppet[i] = core_operator_finds_puppet_v2[i]; + core_operator_cycles_puppet[i] = core_operator_cycles_puppet_v2[i]; + temp_stage_finds_puppet += core_operator_finds_puppet[i]; - old_hit_count = new_hit_cnt; - } - return ret_val; - } - } + } + key_module = 2; + + old_hit_count = new_hit_cnt; + + } + + return ret_val; + + } + + } #undef FLIP_BIT } - void pso_updating(void) { - g_now += 1; - if (g_now > g_max) g_now = 0; - w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end; - int tmp_swarm, i, j; - u64 temp_operator_finds_puppet = 0; - for (i = 0; i < operator_num; ++i) - { - operator_finds_puppet[i] = core_operator_finds_puppet[i]; + g_now += 1; + if (g_now > g_max) g_now = 0; + w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end; + int tmp_swarm, i, j; + u64 temp_operator_finds_puppet = 0; + for (i = 0; i < operator_num; ++i) { - for (j = 0; j < swarm_num; ++j) - { - operator_finds_puppet[i] = operator_finds_puppet[i] + stage_finds_puppet[j][i]; - } - temp_operator_finds_puppet = temp_operator_finds_puppet + operator_finds_puppet[i]; - } + operator_finds_puppet[i] = core_operator_finds_puppet[i]; - for (i = 0; i < operator_num; ++i) - { - if (operator_finds_puppet[i]) - G_best[i] = (double)((double)(operator_finds_puppet[i]) / (double)(temp_operator_finds_puppet)); - } + for (j = 0; j < swarm_num; ++j) { - for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) - { - double x_temp = 0.0; - for (i = 0; i < operator_num; ++i) - { - probability_now[tmp_swarm][i] = 0.0; - v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); - x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; - if (x_now[tmp_swarm][i] > v_max) - x_now[tmp_swarm][i] = v_max; - else if (x_now[tmp_swarm][i] < v_min) - x_now[tmp_swarm][i] = v_min; - x_temp += x_now[tmp_swarm][i]; - } + operator_finds_puppet[i] = + operator_finds_puppet[i] + stage_finds_puppet[j][i]; + + } + + temp_operator_finds_puppet = + temp_operator_finds_puppet + operator_finds_puppet[i]; + + } + + for (i = 0; i < operator_num; ++i) { + + if (operator_finds_puppet[i]) + G_best[i] = (double)((double)(operator_finds_puppet[i]) / + (double)(temp_operator_finds_puppet)); + + } + + for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) { + + double x_temp = 0.0; + for (i = 0; i < operator_num; ++i) { + + probability_now[tmp_swarm][i] = 0.0; + v_now[tmp_swarm][i] = + w_now * v_now[tmp_swarm][i] + + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); + x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; + if (x_now[tmp_swarm][i] > v_max) + x_now[tmp_swarm][i] = v_max; + else if (x_now[tmp_swarm][i] < v_min) + x_now[tmp_swarm][i] = v_min; + x_temp += x_now[tmp_swarm][i]; + + } + + for (i = 0; i < operator_num; ++i) { + + x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; + if (likely(i != 0)) + probability_now[tmp_swarm][i] = + probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; + else + probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; + + } + + if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || + probability_now[tmp_swarm][operator_num - 1] > 1.01) + FATAL("ERROR probability"); + + } + + swarm_now = 0; + key_module = 0; - for (i = 0; i < operator_num; ++i) - { - x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; - if (likely(i != 0)) - probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; - else - probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; - } - if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) FATAL("ERROR probability"); - } - swarm_now = 0; - key_module = 0; } - /* larger change for MOpt implementation: the original fuzz_one was renamed to fuzz_one_original. All documentation references to fuzz_one therefore mean fuzz_one_original */ u8 fuzz_one(char** argv) { - int key_val_lv = 0; - if (limit_time_sig == 0) { - key_val_lv = fuzz_one_original(argv); - } else { - if (key_module == 0) - key_val_lv = pilot_fuzzing(argv); - else if (key_module == 1) - key_val_lv = core_fuzzing(argv); - else if (key_module == 2) - pso_updating(); - } - return key_val_lv; + int key_val_lv = 0; + if (limit_time_sig == 0) { + + key_val_lv = fuzz_one_original(argv); + + } else { + + if (key_module == 0) + key_val_lv = pilot_fuzzing(argv); + else if (key_module == 1) + key_val_lv = core_fuzzing(argv); + else if (key_module == 2) + pso_updating(); + + } + + return key_val_lv; + } diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index ed158e6c..e22291b5 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -26,45 +26,62 @@ #ifdef USE_PYTHON int init_py() { + Py_Initialize(); u8* module_name = getenv("AFL_PYTHON_MODULE"); if (module_name) { + PyObject* py_name = PyString_FromString(module_name); py_module = PyImport_Import(py_name); Py_DECREF(py_name); if (py_module != NULL) { + u8 py_notrim = 0; py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init"); py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz"); - py_functions[PY_FUNC_INIT_TRIM] = PyObject_GetAttrString(py_module, "init_trim"); - py_functions[PY_FUNC_POST_TRIM] = PyObject_GetAttrString(py_module, "post_trim"); + py_functions[PY_FUNC_INIT_TRIM] = + PyObject_GetAttrString(py_module, "init_trim"); + py_functions[PY_FUNC_POST_TRIM] = + PyObject_GetAttrString(py_module, "post_trim"); py_functions[PY_FUNC_TRIM] = PyObject_GetAttrString(py_module, "trim"); for (u8 py_idx = 0; py_idx < PY_FUNC_COUNT; ++py_idx) { + if (!py_functions[py_idx] || !PyCallable_Check(py_functions[py_idx])) { + if (py_idx >= PY_FUNC_INIT_TRIM && py_idx <= PY_FUNC_TRIM) { + // Implementing the trim API is optional for now - if (PyErr_Occurred()) - PyErr_Print(); + if (PyErr_Occurred()) PyErr_Print(); py_notrim = 1; + } else { - if (PyErr_Occurred()) - PyErr_Print(); - fprintf(stderr, "Cannot find/call function with index %d in external Python module.\n", py_idx); + + if (PyErr_Occurred()) PyErr_Print(); + fprintf(stderr, + "Cannot find/call function with index %d in external " + "Python module.\n", + py_idx); return 1; + } + } } if (py_notrim) { + py_functions[PY_FUNC_INIT_TRIM] = NULL; py_functions[PY_FUNC_POST_TRIM] = NULL; py_functions[PY_FUNC_TRIM] = NULL; - WARNF("Python module does not implement trim API, standard trimming will be used."); + WARNF( + "Python module does not implement trim API, standard trimming will " + "be used."); + } PyObject *py_args, *py_value; @@ -73,9 +90,11 @@ int init_py() { py_args = PyTuple_New(1); py_value = PyInt_FromLong(UR(0xFFFFFFFF)); if (!py_value) { + Py_DECREF(py_args); fprintf(stderr, "Cannot convert argument\n"); return 1; + } PyTuple_SetItem(py_args, 0, py_value); @@ -85,51 +104,68 @@ int init_py() { Py_DECREF(py_args); if (py_value == NULL) { + PyErr_Print(); - fprintf(stderr,"Call failed\n"); + fprintf(stderr, "Call failed\n"); return 1; + } + } else { + PyErr_Print(); fprintf(stderr, "Failed to load \"%s\"\n", module_name); return 1; + } + } return 0; + } void finalize_py() { + if (py_module != NULL) { + u32 i; for (i = 0; i < PY_FUNC_COUNT; ++i) Py_XDECREF(py_functions[i]); Py_DECREF(py_module); + } Py_Finalize(); + } -void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** ret, size_t* retlen) { +void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, + char** ret, size_t* retlen) { if (py_module != NULL) { + PyObject *py_args, *py_value; py_args = PyTuple_New(2); py_value = PyByteArray_FromStringAndSize(buf, buflen); if (!py_value) { + Py_DECREF(py_args); fprintf(stderr, "Cannot convert argument\n"); return; + } PyTuple_SetItem(py_args, 0, py_value); py_value = PyByteArray_FromStringAndSize(add_buf, add_buflen); if (!py_value) { + Py_DECREF(py_args); fprintf(stderr, "Cannot convert argument\n"); return; + } PyTuple_SetItem(py_args, 1, py_value); @@ -139,26 +175,35 @@ void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** Py_DECREF(py_args); if (py_value != NULL) { + *retlen = PyByteArray_Size(py_value); *ret = malloc(*retlen); memcpy(*ret, PyByteArray_AsString(py_value), *retlen); Py_DECREF(py_value); + } else { + PyErr_Print(); - fprintf(stderr,"Call failed\n"); + fprintf(stderr, "Call failed\n"); return; + } + } + } u32 init_trim_py(char* buf, size_t buflen) { + PyObject *py_args, *py_value; py_args = PyTuple_New(1); py_value = PyByteArray_FromStringAndSize(buf, buflen); if (!py_value) { + Py_DECREF(py_args); FATAL("Failed to convert arguments"); + } PyTuple_SetItem(py_args, 0, py_value); @@ -167,24 +212,32 @@ u32 init_trim_py(char* buf, size_t buflen) { Py_DECREF(py_args); if (py_value != NULL) { + u32 retcnt = PyInt_AsLong(py_value); Py_DECREF(py_value); return retcnt; + } else { + PyErr_Print(); FATAL("Call failed"); + } + } u32 post_trim_py(char success) { + PyObject *py_args, *py_value; py_args = PyTuple_New(1); py_value = PyBool_FromLong(success); if (!py_value) { + Py_DECREF(py_args); FATAL("Failed to convert arguments"); + } PyTuple_SetItem(py_args, 0, py_value); @@ -193,16 +246,22 @@ u32 post_trim_py(char success) { Py_DECREF(py_args); if (py_value != NULL) { + u32 retcnt = PyInt_AsLong(py_value); Py_DECREF(py_value); return retcnt; + } else { + PyErr_Print(); FATAL("Call failed"); + } + } void trim_py(char** ret, size_t* retlen) { + PyObject *py_args, *py_value; py_args = PyTuple_New(0); @@ -210,14 +269,19 @@ void trim_py(char** ret, size_t* retlen) { Py_DECREF(py_args); if (py_value != NULL) { + *retlen = PyByteArray_Size(py_value); *ret = malloc(*retlen); memcpy(*ret, PyByteArray_AsString(py_value), *retlen); Py_DECREF(py_value); + } else { + PyErr_Print(); FATAL("Call failed"); + } + } u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { @@ -237,20 +301,24 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { stage_max = init_trim_py(in_buf, q->len); if (not_on_tty && debug) - SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, q->len); + SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, + q->len); + + while (stage_cur < stage_max) { - while(stage_cur < stage_max) { sprintf(tmp, "ptrim %s", DI(trim_exec)); u32 cksum; - char* retbuf = NULL; + char* retbuf = NULL; size_t retlen = 0; trim_py(&retbuf, &retlen); if (retlen > orig_len) - FATAL("Trimmed data returned by Python module is larger than original data"); + FATAL( + "Trimmed data returned by Python module is larger than original " + "data"); write_to_testcase(retbuf, retlen); @@ -280,17 +348,23 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { stage_cur = post_trim_py(1); if (not_on_tty && debug) - SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", stage_cur, stage_max, q->len); + SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", + stage_cur, stage_max, q->len); + } else { + /* Tell the Python module that the trimming was unsuccessful */ stage_cur = post_trim_py(0); if (not_on_tty && debug) - SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, stage_max); + SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, + stage_max); + } - /* Since this can be slow, update the screen every now and then. */ + /* Since this can be slow, update the screen every now and then. */ + + if (!(trim_exec++ % stats_update_freq)) show_stats(); - if (!(trim_exec++ % stats_update_freq)) show_stats(); } if (not_on_tty && debug) @@ -303,7 +377,7 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { s32 fd; - unlink(q->fname); /* ignore errors */ + unlink(q->fname); /* ignore errors */ fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); @@ -317,8 +391,6 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) { } - - abort_trimming: bytes_trim_out += q->len; @@ -327,3 +399,4 @@ abort_trimming: } #endif /* USE_PYTHON */ + diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index c1547b48..22a9ccb0 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -43,7 +43,6 @@ void mark_as_det_done(struct queue_entry* q) { } - /* Mark as variable. Create symlinks if possible to make it easier to examine the files. */ @@ -69,7 +68,6 @@ void mark_as_variable(struct queue_entry* q) { } - /* Mark / unmark as redundant (edge-only). This is not used for restoring state, but may be useful for post-processing datasets. */ @@ -102,18 +100,17 @@ void mark_as_redundant(struct queue_entry* q, u8 state) { } - /* Append new test case to the queue. */ void add_to_queue(u8* fname, u32 len, u8 passed_det) { struct queue_entry* q = ck_alloc(sizeof(struct queue_entry)); - q->fname = fname; - q->len = len; - q->depth = cur_depth + 1; - q->passed_det = passed_det; - q->n_fuzz = 1; + q->fname = fname; + q->len = len; + q->depth = cur_depth + 1; + q->passed_det = passed_det; + q->n_fuzz = 1; if (q->depth > max_depth) max_depth = q->depth; @@ -122,7 +119,9 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) { queue_top->next = q; queue_top = q; - } else q_prev100 = queue = queue_top = q; + } else + + q_prev100 = queue = queue_top = q; ++queued_paths; ++pending_not_fuzzed; @@ -140,7 +139,6 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) { } - /* Destroy the entire queue. */ void destroy_queue(void) { @@ -159,7 +157,6 @@ void destroy_queue(void) { } - /* When we bump into a new path, we call this to see if the path appears more "favorable" than any of the existing ones. The purpose of the "favorables" is to have a minimal set of paths that trigger all the bits @@ -170,12 +167,11 @@ void destroy_queue(void) { for every byte in the bitmap. We win that slot if there is no previous contender, or if the contender has a more favorable speed x size factor. */ - void update_bitmap_score(struct queue_entry* q) { u32 i; u64 fav_factor = q->exec_us * q->len; - u64 fuzz_p2 = next_p2 (q->n_fuzz); + u64 fuzz_p2 = next_p2(q->n_fuzz); /* For every byte set in trace_bits[], see if there is a previous winner, and how it compares to us. */ @@ -184,48 +180,54 @@ void update_bitmap_score(struct queue_entry* q) { if (trace_bits[i]) { - if (top_rated[i]) { + if (top_rated[i]) { - /* Faster-executing or smaller test cases are favored. */ - u64 top_rated_fuzz_p2 = next_p2 (top_rated[i]->n_fuzz); - u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len; + /* Faster-executing or smaller test cases are favored. */ + u64 top_rated_fuzz_p2 = next_p2(top_rated[i]->n_fuzz); + u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len; - if (fuzz_p2 > top_rated_fuzz_p2) { - continue; - } else if (fuzz_p2 == top_rated_fuzz_p2) { - if (fav_factor > top_rated_fav_factor) - continue; - } + if (fuzz_p2 > top_rated_fuzz_p2) { - if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue; + continue; - /* Looks like we're going to win. Decrease ref count for the - previous winner, discard its trace_bits[] if necessary. */ + } else if (fuzz_p2 == top_rated_fuzz_p2) { - if (!--top_rated[i]->tc_ref) { - ck_free(top_rated[i]->trace_mini); - top_rated[i]->trace_mini = 0; - } + if (fav_factor > top_rated_fav_factor) continue; - } + } - /* Insert ourselves as the new winner. */ + if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue; - top_rated[i] = q; - ++q->tc_ref; + /* Looks like we're going to win. Decrease ref count for the + previous winner, discard its trace_bits[] if necessary. */ - if (!q->trace_mini) { - q->trace_mini = ck_alloc(MAP_SIZE >> 3); - minimize_bits(q->trace_mini, trace_bits); - } + if (!--top_rated[i]->tc_ref) { - score_changed = 1; + ck_free(top_rated[i]->trace_mini); + top_rated[i]->trace_mini = 0; - } + } + + } + + /* Insert ourselves as the new winner. */ + + top_rated[i] = q; + ++q->tc_ref; + + if (!q->trace_mini) { + + q->trace_mini = ck_alloc(MAP_SIZE >> 3); + minimize_bits(q->trace_mini, trace_bits); + + } + + score_changed = 1; + + } } - /* The second part of the mechanism discussed above is a routine that goes over top_rated[] entries, and then sequentially grabs winners for previously-unseen bytes (temp_v) and marks them as favored, at least @@ -235,8 +237,8 @@ void update_bitmap_score(struct queue_entry* q) { void cull_queue(void) { struct queue_entry* q; - static u8 temp_v[MAP_SIZE >> 3]; - u32 i; + static u8 temp_v[MAP_SIZE >> 3]; + u32 i; if (dumb_mode || !score_changed) return; @@ -244,14 +246,16 @@ void cull_queue(void) { memset(temp_v, 255, MAP_SIZE >> 3); - queued_favored = 0; + queued_favored = 0; pending_favored = 0; q = queue; while (q) { + q->favored = 0; q = q->next; + } /* Let's see if anything in the bitmap isn't captured in temp_v. @@ -264,27 +268,29 @@ void cull_queue(void) { /* Remove all bits belonging to the current entry from temp_v. */ - while (j--) + while (j--) if (top_rated[i]->trace_mini[j]) temp_v[j] &= ~top_rated[i]->trace_mini[j]; top_rated[i]->favored = 1; ++queued_favored; - if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored; + if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) + ++pending_favored; } q = queue; while (q) { + mark_as_redundant(q, !q->favored); q = q->next; + } } - /* Calculate case desirability score to adjust the length of havoc fuzzing. A helper function for fuzz_one(). Maybe some of these constants should go into config.h. */ @@ -305,34 +311,51 @@ u32 calculate_score(struct queue_entry* q) { // Longer execution time means longer work on the input, the deeper in // coverage, the better the fuzzing, right? -mh - if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10; - else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25; - else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50; - else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75; - else if (q->exec_us * 4 < avg_exec_us) perf_score = 300; - else if (q->exec_us * 3 < avg_exec_us) perf_score = 200; - else if (q->exec_us * 2 < avg_exec_us) perf_score = 150; + if (q->exec_us * 0.1 > avg_exec_us) + perf_score = 10; + else if (q->exec_us * 0.25 > avg_exec_us) + perf_score = 25; + else if (q->exec_us * 0.5 > avg_exec_us) + perf_score = 50; + else if (q->exec_us * 0.75 > avg_exec_us) + perf_score = 75; + else if (q->exec_us * 4 < avg_exec_us) + perf_score = 300; + else if (q->exec_us * 3 < avg_exec_us) + perf_score = 200; + else if (q->exec_us * 2 < avg_exec_us) + perf_score = 150; /* Adjust score based on bitmap size. The working theory is that better coverage translates to better targets. Multiplier from 0.25x to 3x. */ - if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3; - else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2; - else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5; - else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25; - else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5; - else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75; + if (q->bitmap_size * 0.3 > avg_bitmap_size) + perf_score *= 3; + else if (q->bitmap_size * 0.5 > avg_bitmap_size) + perf_score *= 2; + else if (q->bitmap_size * 0.75 > avg_bitmap_size) + perf_score *= 1.5; + else if (q->bitmap_size * 3 < avg_bitmap_size) + perf_score *= 0.25; + else if (q->bitmap_size * 2 < avg_bitmap_size) + perf_score *= 0.5; + else if (q->bitmap_size * 1.5 < avg_bitmap_size) + perf_score *= 0.75; /* Adjust score based on handicap. Handicap is proportional to how late in the game we learned about this path. Latecomers are allowed to run for a bit longer until they catch up with the rest. */ if (q->handicap >= 4) { + perf_score *= 4; q->handicap -= 4; + } else if (q->handicap) { + perf_score *= 2; --q->handicap; + } /* Final adjustment based on input depth, under the assumption that fuzzing @@ -341,11 +364,11 @@ u32 calculate_score(struct queue_entry* q) { switch (q->depth) { - case 0 ... 3: break; - case 4 ... 7: perf_score *= 2; break; - case 8 ... 13: perf_score *= 3; break; + case 0 ... 3: break; + case 4 ... 7: perf_score *= 2; break; + case 8 ... 13: perf_score *= 3; break; case 14 ... 25: perf_score *= 4; break; - default: perf_score *= 5; + default: perf_score *= 5; } @@ -357,61 +380,69 @@ u32 calculate_score(struct queue_entry* q) { switch (schedule) { - case EXPLORE: - break; + case EXPLORE: break; - case EXPLOIT: - factor = MAX_FACTOR; - break; + case EXPLOIT: factor = MAX_FACTOR; break; case COE: fuzz_total = 0; n_paths = 0; - struct queue_entry *queue_it = queue; + struct queue_entry* queue_it = queue; while (queue_it) { + fuzz_total += queue_it->n_fuzz; - n_paths ++; + n_paths++; queue_it = queue_it->next; + } fuzz_mu = fuzz_total / n_paths; if (fuzz <= fuzz_mu) { + if (q->fuzz_level < 16) - factor = ((u32) (1 << q->fuzz_level)); + factor = ((u32)(1 << q->fuzz_level)); else factor = MAX_FACTOR; + } else { + factor = 0; + } + break; case FAST: if (q->fuzz_level < 16) { - factor = ((u32) (1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz); + + factor = ((u32)(1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz); + } else - factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2 (fuzz)); + + factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2(fuzz)); break; - case LIN: - factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); - break; + case LIN: factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); break; case QUAD: factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz); break; - default: - PFATAL ("Unknown Power Schedule"); + default: PFATAL("Unknown Power Schedule"); + } - if (factor > MAX_FACTOR) - factor = MAX_FACTOR; + + if (factor > MAX_FACTOR) factor = MAX_FACTOR; perf_score *= factor / POWER_BETA; // MOpt mode - if (limit_time_sig != 0 && max_depth - q->depth < 3) perf_score *= 2; - else if (perf_score < 1) perf_score = 1; // Add a lower bound to AFLFast's energy assignment strategies + if (limit_time_sig != 0 && max_depth - q->depth < 3) + perf_score *= 2; + else if (perf_score < 1) + perf_score = + 1; // Add a lower bound to AFLFast's energy assignment strategies /* Make sure that we don't go over limit. */ @@ -420,3 +451,4 @@ u32 calculate_score(struct queue_entry* q) { return perf_score; } + diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index c14ecc87..4093d991 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -28,8 +28,8 @@ u8 run_target(char** argv, u32 timeout) { static struct itimerval it; - static u32 prev_timed_out = 0; - static u64 exec_ms = 0; + static u32 prev_timed_out = 0; + static u64 exec_ms = 0; int status = 0; u32 tb4; @@ -45,7 +45,7 @@ u8 run_target(char** argv, u32 timeout) { /* If we're running in "dumb" mode, we can't rely on the fork server logic compiled into the target program, so we will just keep calling - execve(). There is a bit of code duplication between here and + execve(). There is a bit of code duplication between here and init_forkserver(), but c'est la vie. */ if (dumb_mode == 1 || no_forkserver) { @@ -64,11 +64,11 @@ u8 run_target(char** argv, u32 timeout) { #ifdef RLIMIT_AS - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ #else - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ #endif /* ^RLIMIT_AS */ @@ -76,7 +76,7 @@ u8 run_target(char** argv, u32 timeout) { r.rlim_max = r.rlim_cur = 0; - setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ /* Isolate the process and configure standard descriptors. If out_file is specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ @@ -108,10 +108,12 @@ u8 run_target(char** argv, u32 timeout) { /* Set sane defaults for ASAN if nothing else specified. */ - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" "symbolize=0:" @@ -152,7 +154,8 @@ u8 run_target(char** argv, u32 timeout) { } - /* Configure timeout, as requested by user, then wait for child to terminate. */ + /* Configure timeout, as requested by user, then wait for child to terminate. + */ it.it_value.tv_sec = (timeout / 1000); it.it_value.tv_usec = (timeout % 1000) * 1000; @@ -179,9 +182,10 @@ u8 run_target(char** argv, u32 timeout) { } if (!WIFSTOPPED(status)) child_pid = 0; - + getitimer(ITIMER_REAL, &it); - exec_ms = (u64) timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000); + exec_ms = + (u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000); if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms; it.it_value.tv_sec = 0; @@ -223,8 +227,10 @@ u8 run_target(char** argv, u32 timeout) { must use a special exit code. */ if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) { + kill_signal = 0; return FAULT_CRASH; + } if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG) @@ -234,7 +240,6 @@ u8 run_target(char** argv, u32 timeout) { } - /* Write modified data to file for testing. If out_file is set, the old file is unlinked and a new one is created. Otherwise, out_fd is rewound and truncated. */ @@ -245,20 +250,26 @@ void write_to_testcase(void* mem, u32 len) { if (out_file) { - unlink(out_file); /* Ignore errors. */ + unlink(out_file); /* Ignore errors. */ fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", out_file); - } else lseek(fd, 0, SEEK_SET); + } else + + lseek(fd, 0, SEEK_SET); if (pre_save_handler) { - u8* new_data; + + u8* new_data; size_t new_size = pre_save_handler(mem, len, &new_data); ck_write(fd, new_data, new_size, out_file); + } else { + ck_write(fd, mem, len, out_file); + } if (!out_file) { @@ -266,11 +277,12 @@ void write_to_testcase(void* mem, u32 len) { if (ftruncate(fd, len)) PFATAL("ftruncate() failed"); lseek(fd, 0, SEEK_SET); - } else close(fd); + } else + + close(fd); } - /* The same, but with an adjustable gap. Used for trimming. */ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) { @@ -280,17 +292,19 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) { if (out_file) { - unlink(out_file); /* Ignore errors. */ + unlink(out_file); /* Ignore errors. */ fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", out_file); - } else lseek(fd, 0, SEEK_SET); + } else + + lseek(fd, 0, SEEK_SET); if (skip_at) ck_write(fd, mem, skip_at, out_file); - u8 *memu8 = mem; + u8* memu8 = mem; if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file); if (!out_file) { @@ -298,22 +312,23 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) { if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed"); lseek(fd, 0, SEEK_SET); - } else close(fd); + } else + + close(fd); } - /* Calibrate a new test case. This is done when processing the input directory to warn about flaky or otherwise problematic test cases early on; and when new paths are discovered to detect variable behavior and so on. */ -u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, - u32 handicap, u8 from_queue) { +u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap, + u8 from_queue) { static u8 first_trace[MAP_SIZE]; - u8 fault = 0, new_bits = 0, var_detected = 0, - first_run = (q->exec_cksum == 0); + u8 fault = 0, new_bits = 0, var_detected = 0, + first_run = (q->exec_cksum == 0); u64 start_us, stop_us; @@ -326,19 +341,18 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, to intermittent latency. */ if (!from_queue || resuming_fuzz) - use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD, - exec_tmout * CAL_TMOUT_PERC / 100); + use_tmout = + MAX(exec_tmout + CAL_TMOUT_ADD, exec_tmout * CAL_TMOUT_PERC / 100); ++q->cal_failed; stage_name = "calibration"; - stage_max = fast_cal ? 3 : CAL_CYCLES; + stage_max = fast_cal ? 3 : CAL_CYCLES; /* Make sure the forkserver is up before we do anything, and let's not count its spin-up time toward binary calibration. */ - if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) - init_forkserver(argv); + if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) init_forkserver(argv); if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE); @@ -360,8 +374,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, if (stop_soon || fault != crash_mode) goto abort_calibration; if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) { + fault = FAULT_NOINST; goto abort_calibration; + } cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); @@ -380,7 +396,7 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, if (!var_bytes[i] && first_trace[i] != trace_bits[i]) { var_bytes[i] = 1; - stage_max = CAL_CYCLES_LONG; + stage_max = CAL_CYCLES_LONG; } @@ -401,16 +417,16 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, stop_us = get_cur_time_us(); - total_cal_us += stop_us - start_us; + total_cal_us += stop_us - start_us; total_cal_cycles += stage_max; /* OK, let's collect some stats about the performance of this test case. This is used for fuzzing air time calculations in calculate_score(). */ - q->exec_us = (stop_us - start_us) / stage_max; + q->exec_us = (stop_us - start_us) / stage_max; q->bitmap_size = count_bytes(trace_bits); - q->handicap = handicap; - q->cal_failed = 0; + q->handicap = handicap; + q->cal_failed = 0; total_bitmap_size += q->bitmap_size; ++total_bitmap_entries; @@ -426,8 +442,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, abort_calibration: if (new_bits == 2 && !q->has_new_cov) { + q->has_new_cov = 1; ++queued_with_cov; + } /* Mark variable paths. */ @@ -437,15 +455,17 @@ abort_calibration: var_byte_count = count_bytes(var_bytes); if (!q->var_behavior) { + mark_as_variable(q); ++queued_variable; + } } stage_name = old_sn; - stage_cur = old_sc; - stage_max = old_sm; + stage_cur = old_sc; + stage_max = old_sm; if (!first_run) show_stats(); @@ -453,14 +473,13 @@ abort_calibration: } - /* Grab interesting test cases from other fuzzers. */ void sync_fuzzers(char** argv) { - DIR* sd; + DIR* sd; struct dirent* sd_ent; - u32 sync_cnt = 0; + u32 sync_cnt = 0; sd = opendir(sync_dir); if (!sd) PFATAL("Unable to open '%s'", sync_dir); @@ -468,16 +487,17 @@ void sync_fuzzers(char** argv) { stage_max = stage_cur = 0; cur_depth = 0; - /* Look at the entries created for every other fuzzer in the sync directory. */ + /* Look at the entries created for every other fuzzer in the sync directory. + */ while ((sd_ent = readdir(sd))) { static u8 stage_tmp[128]; - DIR* qd; + DIR* qd; struct dirent* qd_ent; - u8 *qd_path, *qd_synced_path; - u32 min_accept = 0, next_min_accept; + u8 * qd_path, *qd_synced_path; + u32 min_accept = 0, next_min_accept; s32 id_fd; @@ -490,8 +510,10 @@ void sync_fuzzers(char** argv) { qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name); if (!(qd = opendir(qd_path))) { + ck_free(qd_path); continue; + } /* Retrieve the ID of the last seen test case. */ @@ -502,35 +524,34 @@ void sync_fuzzers(char** argv) { if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path); - if (read(id_fd, &min_accept, sizeof(u32)) > 0) - lseek(id_fd, 0, SEEK_SET); + if (read(id_fd, &min_accept, sizeof(u32)) > 0) lseek(id_fd, 0, SEEK_SET); next_min_accept = min_accept; - /* Show stats */ + /* Show stats */ sprintf(stage_tmp, "sync %u", ++sync_cnt); stage_name = stage_tmp; - stage_cur = 0; - stage_max = 0; + stage_cur = 0; + stage_max = 0; - /* For every file queued by this fuzzer, parse ID and see if we have looked at - it before; exec a test case if not. */ + /* For every file queued by this fuzzer, parse ID and see if we have looked + at it before; exec a test case if not. */ while ((qd_ent = readdir(qd))) { - u8* path; - s32 fd; + u8* path; + s32 fd; struct stat st; if (qd_ent->d_name[0] == '.' || - sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || - syncing_case < min_accept) continue; + sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || + syncing_case < min_accept) + continue; /* OK, sounds like a new one. Let's give it a try. */ - if (syncing_case >= next_min_accept) - next_min_accept = syncing_case + 1; + if (syncing_case >= next_min_accept) next_min_accept = syncing_case + 1; path = alloc_printf("%s/%s", qd_path, qd_ent->d_name); @@ -539,8 +560,10 @@ void sync_fuzzers(char** argv) { fd = open(path, O_RDONLY); if (fd < 0) { - ck_free(path); - continue; + + ck_free(path); + continue; + } if (fstat(fd, &st)) PFATAL("fstat() failed"); @@ -584,14 +607,13 @@ void sync_fuzzers(char** argv) { closedir(qd); ck_free(qd_path); ck_free(qd_synced_path); - - } + + } closedir(sd); } - /* Trim all new test cases to save cycles when doing deterministic checks. The trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of file size, to keep the stage short and sweet. */ @@ -599,8 +621,7 @@ void sync_fuzzers(char** argv) { u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { #ifdef USE_PYTHON - if (py_functions[PY_FUNC_TRIM]) - return trim_case_python(argv, q, in_buf); + if (py_functions[PY_FUNC_TRIM]) return trim_case_python(argv, q, in_buf); #endif static u8 tmp[64]; @@ -664,9 +685,9 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { u32 move_tail = q->len - remove_pos - trim_avail; q->len -= trim_avail; - len_p2 = next_p2(q->len); + len_p2 = next_p2(q->len); - memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, + memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, move_tail); /* Let's save a clean trace, which will be needed by @@ -679,7 +700,9 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { } - } else remove_pos += remove_len; + } else + + remove_pos += remove_len; /* Since this can be slow, update the screen every now and then. */ @@ -699,7 +722,7 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { s32 fd; - unlink(q->fname); /* ignore errors */ + unlink(q->fname); /* ignore errors */ fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); @@ -720,7 +743,6 @@ abort_trimming: } - /* Write a modified test case, run program, process results. Handle error conditions, returning 1 if it's time to bail out. This is a helper function for fuzz_one(). */ @@ -745,20 +767,24 @@ u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) { if (fault == FAULT_TMOUT) { if (subseq_tmouts++ > TMOUT_LIMIT) { + ++cur_skipped_paths; return 1; + } - } else subseq_tmouts = 0; + } else + + subseq_tmouts = 0; /* Users can hit us with SIGUSR1 to request the current input to be abandoned. */ if (skip_requested) { - skip_requested = 0; - ++cur_skipped_paths; - return 1; + skip_requested = 0; + ++cur_skipped_paths; + return 1; } diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 5dbd59ac..3614599d 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -26,11 +26,11 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) { - static double last_bcvg, last_stab, last_eps; + static double last_bcvg, last_stab, last_eps; static struct rusage usage; - u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); - s32 fd; + u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); + s32 fd; FILE* f; fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); @@ -47,66 +47,74 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) { where exec/sec stats and such are not readily available. */ if (!bitmap_cvg && !stability && !eps) { + bitmap_cvg = last_bcvg; - stability = last_stab; - eps = last_eps; + stability = last_stab; + eps = last_eps; + } else { + last_bcvg = bitmap_cvg; last_stab = stability; - last_eps = eps; + last_eps = eps; + } - fprintf(f, "start_time : %llu\n" - "last_update : %llu\n" - "fuzzer_pid : %d\n" - "cycles_done : %llu\n" - "execs_done : %llu\n" - "execs_per_sec : %0.02f\n" - "paths_total : %u\n" - "paths_favored : %u\n" - "paths_found : %u\n" - "paths_imported : %u\n" - "max_depth : %u\n" - "cur_path : %u\n" /* Must match find_start_position() */ - "pending_favs : %u\n" - "pending_total : %u\n" - "variable_paths : %u\n" - "stability : %0.02f%%\n" - "bitmap_cvg : %0.02f%%\n" - "unique_crashes : %llu\n" - "unique_hangs : %llu\n" - "last_path : %llu\n" - "last_crash : %llu\n" - "last_hang : %llu\n" - "execs_since_crash : %llu\n" - "exec_timeout : %u\n" - "slowest_exec_ms : %llu\n" - "peak_rss_mb : %lu\n" - "afl_banner : %s\n" - "afl_version : " VERSION "\n" - "target_mode : %s%s%s%s%s%s%s%s\n" - "command_line : %s\n", - start_time / 1000, get_cur_time() / 1000, getpid(), - queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, - queued_paths, queued_favored, queued_discovered, queued_imported, - max_depth, current_entry, pending_favored, pending_not_fuzzed, - queued_variable, stability, bitmap_cvg, unique_crashes, - unique_hangs, last_path_time / 1000, last_crash_time / 1000, - last_hang_time / 1000, total_execs - last_crash_execs, - exec_tmout, slowest_exec_ms, (unsigned long int)usage.ru_maxrss, use_banner, - unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "", - no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "", - persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "", - (unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode || - persistent_mode || deferred_mode) ? "" : "default", - orig_cmdline); - /* ignore errors */ + fprintf(f, + "start_time : %llu\n" + "last_update : %llu\n" + "fuzzer_pid : %d\n" + "cycles_done : %llu\n" + "execs_done : %llu\n" + "execs_per_sec : %0.02f\n" + "paths_total : %u\n" + "paths_favored : %u\n" + "paths_found : %u\n" + "paths_imported : %u\n" + "max_depth : %u\n" + "cur_path : %u\n" /* Must match find_start_position() */ + "pending_favs : %u\n" + "pending_total : %u\n" + "variable_paths : %u\n" + "stability : %0.02f%%\n" + "bitmap_cvg : %0.02f%%\n" + "unique_crashes : %llu\n" + "unique_hangs : %llu\n" + "last_path : %llu\n" + "last_crash : %llu\n" + "last_hang : %llu\n" + "execs_since_crash : %llu\n" + "exec_timeout : %u\n" + "slowest_exec_ms : %llu\n" + "peak_rss_mb : %lu\n" + "afl_banner : %s\n" + "afl_version : " VERSION + "\n" + "target_mode : %s%s%s%s%s%s%s%s\n" + "command_line : %s\n", + start_time / 1000, get_cur_time() / 1000, getpid(), + queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, queued_paths, + queued_favored, queued_discovered, queued_imported, max_depth, + current_entry, pending_favored, pending_not_fuzzed, queued_variable, + stability, bitmap_cvg, unique_crashes, unique_hangs, + last_path_time / 1000, last_crash_time / 1000, last_hang_time / 1000, + total_execs - last_crash_execs, exec_tmout, slowest_exec_ms, + (unsigned long int)usage.ru_maxrss, use_banner, + unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", + dumb_mode ? " dumb " : "", no_forkserver ? "no_forksrv " : "", + crash_mode ? "crash " : "", persistent_mode ? "persistent " : "", + deferred_mode ? "deferred " : "", + (unicorn_mode || qemu_mode || dumb_mode || no_forkserver || + crash_mode || persistent_mode || deferred_mode) + ? "" + : "default", + orig_cmdline); + /* ignore errors */ fclose(f); } - /* Update the plot file if there is a reason to. */ void maybe_update_plot_file(double bitmap_cvg, double eps) { @@ -114,19 +122,20 @@ void maybe_update_plot_file(double bitmap_cvg, double eps) { static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md; static u64 prev_qc, prev_uc, prev_uh; - if (prev_qp == queued_paths && prev_pf == pending_favored && + if (prev_qp == queued_paths && prev_pf == pending_favored && prev_pnf == pending_not_fuzzed && prev_ce == current_entry && prev_qc == queue_cycle && prev_uc == unique_crashes && - prev_uh == unique_hangs && prev_md == max_depth) return; + prev_uh == unique_hangs && prev_md == max_depth) + return; - prev_qp = queued_paths; - prev_pf = pending_favored; + prev_qp = queued_paths; + prev_pf = pending_favored; prev_pnf = pending_not_fuzzed; - prev_ce = current_entry; - prev_qc = queue_cycle; - prev_uc = unique_crashes; - prev_uh = unique_hangs; - prev_md = max_depth; + prev_ce = current_entry; + prev_qc = queue_cycle; + prev_uc = unique_crashes; + prev_uh = unique_hangs; + prev_md = max_depth; /* Fields in the file: @@ -134,17 +143,16 @@ void maybe_update_plot_file(double bitmap_cvg, double eps) { favored_not_fuzzed, unique_crashes, unique_hangs, max_depth, execs_per_sec */ - fprintf(plot_file, + fprintf(plot_file, "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n", get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths, pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes, - unique_hangs, max_depth, eps); /* ignore errors */ + unique_hangs, max_depth, eps); /* ignore errors */ fflush(plot_file); } - /* Check terminal dimensions after resize. */ static void check_term_size(void) { @@ -160,15 +168,14 @@ static void check_term_size(void) { } - /* A spiffy retro stats screen! This is called every stats_update_freq execve() calls, plus in several other circumstances. */ void show_stats(void) { - static u64 last_stats_ms, last_plot_ms, last_ms, last_execs; + static u64 last_stats_ms, last_plot_ms, last_ms, last_execs; static double avg_exec; - double t_byte_ratio, stab_ratio; + double t_byte_ratio, stab_ratio; u64 cur_ms; u32 t_bytes, t_bits; @@ -194,14 +201,13 @@ void show_stats(void) { } else { - double cur_avg = ((double)(total_execs - last_execs)) * 1000 / - (cur_ms - last_ms); + double cur_avg = + ((double)(total_execs - last_execs)) * 1000 / (cur_ms - last_ms); /* If there is a dramatic (5x+) jump in speed, reset the indicator more quickly. */ - if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) - avg_exec = cur_avg; + if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) avg_exec = cur_avg; avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) + cur_avg * (1.0 / AVG_SMOOTHING); @@ -249,7 +255,8 @@ void show_stats(void) { /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */ if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed && - getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2; + getenv("AFL_EXIT_WHEN_DONE")) + stop_soon = 2; if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2; @@ -276,7 +283,8 @@ void show_stats(void) { if (term_too_small) { - SAYF(cBRI "Your terminal is too small to display the UI.\n" + SAYF(cBRI + "Your terminal is too small to display the UI.\n" "Please resize terminal window to at least 79x24.\n" cRST); return; @@ -285,38 +293,41 @@ void show_stats(void) { /* Let's start by drawing a centered banner. */ - banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + strlen(power_name) + 3 + 5; + banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + + strlen(power_name) + 3 + 5; banner_pad = (79 - banner_len) / 2; memset(tmp, ' ', banner_pad); #ifdef HAVE_AFFINITY - sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN - " (%s) " cPIN "[%s]" cBLU " {%d}", crash_mode ? cPIN "peruvian were-rabbit" : - cYEL "american fuzzy lop", use_banner, power_name, cpu_aff); + sprintf(tmp + banner_pad, + "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]" cBLU " {%d}", + crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop", + use_banner, power_name, cpu_aff); #else - sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN - " (%s) " cPIN "[%s]", crash_mode ? cPIN "peruvian were-rabbit" : - cYEL "american fuzzy lop", use_banner, power_name); + sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]", + crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop", + use_banner, power_name); #endif /* HAVE_AFFINITY */ SAYF("\n%s\n", tmp); /* "Handy" shortcuts for drawing boxes... */ -#define bSTG bSTART cGRA -#define bH2 bH bH -#define bH5 bH2 bH2 bH -#define bH10 bH5 bH5 -#define bH20 bH10 bH10 -#define bH30 bH20 bH10 -#define SP5 " " -#define SP10 SP5 SP5 -#define SP20 SP10 SP10 +#define bSTG bSTART cGRA +#define bH2 bH bH +#define bH5 bH2 bH2 bH +#define bH10 bH5 bH5 +#define bH20 bH10 bH10 +#define bH30 bH20 bH10 +#define SP5 " " +#define SP10 SP5 SP5 +#define SP20 SP10 SP10 /* Lord, forgive me this. */ - SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH bHB - bH bSTOP cCYA " overall results " bSTG bH2 bH2 bRT "\n"); + SAYF(SET_G1 bSTG bLT bH bSTOP cCYA + " process timing " bSTG bH30 bH5 bH bHB bH bSTOP cCYA + " overall results " bSTG bH2 bH2 bRT "\n"); if (dumb_mode) { @@ -327,29 +338,34 @@ void show_stats(void) { u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60; /* First queue cycle: don't stop now! */ - if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else + if (queue_cycle == 1 || min_wo_finds < 15) + strcpy(tmp, cMGN); + else - /* Subsequent cycles, but we're still making finds. */ - if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else + /* Subsequent cycles, but we're still making finds. */ + if (cycles_wo_finds < 25 || min_wo_finds < 30) + strcpy(tmp, cYEL); + else - /* No finds for a long time and no test cases to try. */ - if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120) + /* No finds for a long time and no test cases to try. */ + if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120) strcpy(tmp, cLGN); /* Default: cautiously OK to stop? */ - else strcpy(tmp, cLBL); + else + strcpy(tmp, cLBL); } SAYF(bV bSTOP " run time : " cRST "%-33s " bSTG bV bSTOP - " cycles done : %s%-5s " bSTG bV "\n", + " cycles done : %s%-5s " bSTG bV "\n", DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1)); /* We want to warn people about not seeing new paths after a full cycle, except when resuming fuzzing or running in non-instrumented mode. */ if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 || - in_bitmap || crash_mode)) { + in_bitmap || crash_mode)) { SAYF(bV bSTOP " last new path : " cRST "%-33s ", DTD(cur_ms, last_path_time)); @@ -359,12 +375,12 @@ void show_stats(void) { if (dumb_mode) SAYF(bV bSTOP " last new path : " cPIN "n/a" cRST - " (non-instrumented mode) "); + " (non-instrumented mode) "); - else + else SAYF(bV bSTOP " last new path : " cRST "none yet " cLRD - "(odd, check syntax!) "); + "(odd, check syntax!) "); } @@ -378,18 +394,18 @@ void show_stats(void) { (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : ""); SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP - " uniq crashes : %s%-6s" bSTG bV "\n", - DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, - tmp); + " uniq crashes : %s%-6s" bSTG bV "\n", + DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, tmp); sprintf(tmp, "%s%s", DI(unique_hangs), - (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); + (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); SAYF(bV bSTOP " last uniq hang : " cRST "%-33s " bSTG bV bSTOP - " uniq hangs : " cRST "%-6s" bSTG bV "\n", + " uniq hangs : " cRST "%-6s" bSTG bV "\n", DTD(cur_ms, last_hang_time), tmp); - SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA + SAYF(bVR bH bSTOP cCYA + " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA " map coverage " bSTG bH bHT bH20 bH2 bVL "\n"); /* This gets funny because we want to print several variable-length variables @@ -402,23 +418,24 @@ void show_stats(void) { SAYF(bV bSTOP " now processing : " cRST "%-16s " bSTG bV bSTOP, tmp); - sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) * - 100 / MAP_SIZE, t_byte_ratio); + sprintf(tmp, "%0.02f%% / %0.02f%%", + ((double)queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio); - SAYF(" map density : %s%-21s" bSTG bV "\n", t_byte_ratio > 70 ? cLRD : - ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp); + SAYF(" map density : %s%-21s" bSTG bV "\n", + t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), + tmp); sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths), ((double)cur_skipped_paths * 100) / queued_paths); SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp); - sprintf(tmp, "%0.02f bits/tuple", - t_bytes ? (((double)t_bits) / t_bytes) : 0); + sprintf(tmp, "%0.02f bits/tuple", t_bytes ? (((double)t_bits) / t_bytes) : 0); SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp); - SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA + SAYF(bVR bH bSTOP cCYA + " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA " findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n"); sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored), @@ -427,7 +444,8 @@ void show_stats(void) { /* Yeah... it's still going on... halp? */ SAYF(bV bSTOP " now trying : " cRST "%-20s " bSTG bV bSTOP - " favored paths : " cRST "%-22s" bSTG bV "\n", stage_name, tmp); + " favored paths : " cRST "%-22s" bSTG bV "\n", + stage_name, tmp); if (!stage_max) { @@ -453,14 +471,14 @@ void show_stats(void) { if (crash_mode) { SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP - " new crashes : %s%-22s" bSTG bV "\n", DI(total_execs), - unique_crashes ? cLRD : cRST, tmp); + " new crashes : %s%-22s" bSTG bV "\n", + DI(total_execs), unique_crashes ? cLRD : cRST, tmp); } else { SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP - " total crashes : %s%-22s" bSTG bV "\n", DI(total_execs), - unique_crashes ? cLRD : cRST, tmp); + " total crashes : %s%-22s" bSTG bV "\n", + DI(total_execs), unique_crashes ? cLRD : cRST, tmp); } @@ -468,8 +486,8 @@ void show_stats(void) { if (avg_exec < 100) { - sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ? - "zzzz..." : "slow!"); + sprintf(tmp, "%s/sec (%s)", DF(avg_exec), + avg_exec < 20 ? "zzzz..." : "slow!"); SAYF(bV bSTOP " exec speed : " cLRD "%-20s ", tmp); @@ -483,12 +501,13 @@ void show_stats(void) { sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts), (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); - SAYF (bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp); + SAYF(bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp); /* Aaaalmost there... hold on! */ - SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bHT bH10 - bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bVL "\n"); + SAYF(bVR bH cCYA bSTOP + " fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA + " path geometry " bSTG bH5 bH2 bVL "\n"); if (skip_deterministic) { @@ -496,66 +515,77 @@ void show_stats(void) { } else { - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]), - DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]), - DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP1]), + DI(stage_cycles[STAGE_FLIP1]), DI(stage_finds[STAGE_FLIP2]), + DI(stage_cycles[STAGE_FLIP2]), DI(stage_finds[STAGE_FLIP4]), + DI(stage_cycles[STAGE_FLIP4])); } - SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP " levels : " - cRST "%-10s" bSTG bV "\n", tmp, DI(max_depth)); + SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP + " levels : " cRST "%-10s" bSTG bV "\n", + tmp, DI(max_depth)); if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]), - DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]), - DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP8]), + DI(stage_cycles[STAGE_FLIP8]), DI(stage_finds[STAGE_FLIP16]), + DI(stage_cycles[STAGE_FLIP16]), DI(stage_finds[STAGE_FLIP32]), + DI(stage_cycles[STAGE_FLIP32])); - SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP " pending : " - cRST "%-10s" bSTG bV "\n", tmp, DI(pending_not_fuzzed)); + SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP + " pending : " cRST "%-10s" bSTG bV "\n", + tmp, DI(pending_not_fuzzed)); if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]), - DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]), - DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_ARITH8]), + DI(stage_cycles[STAGE_ARITH8]), DI(stage_finds[STAGE_ARITH16]), + DI(stage_cycles[STAGE_ARITH16]), DI(stage_finds[STAGE_ARITH32]), + DI(stage_cycles[STAGE_ARITH32])); - SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP " pend fav : " - cRST "%-10s" bSTG bV "\n", tmp, DI(pending_favored)); + SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP + " pend fav : " cRST "%-10s" bSTG bV "\n", + tmp, DI(pending_favored)); if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]), - DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]), - DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32])); + sprintf( + tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_INTEREST8]), + DI(stage_cycles[STAGE_INTEREST8]), DI(stage_finds[STAGE_INTEREST16]), + DI(stage_cycles[STAGE_INTEREST16]), DI(stage_finds[STAGE_INTEREST32]), + DI(stage_cycles[STAGE_INTEREST32])); - SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP " own finds : " - cRST "%-10s" bSTG bV "\n", tmp, DI(queued_discovered)); + SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP + " own finds : " cRST "%-10s" bSTG bV "\n", + tmp, DI(queued_discovered)); if (!skip_deterministic) - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]), - DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]), - DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_EXTRAS_UO]), + DI(stage_cycles[STAGE_EXTRAS_UO]), DI(stage_finds[STAGE_EXTRAS_UI]), + DI(stage_cycles[STAGE_EXTRAS_UI]), DI(stage_finds[STAGE_EXTRAS_AO]), + DI(stage_cycles[STAGE_EXTRAS_AO])); SAYF(bV bSTOP " dictionary : " cRST "%-36s " bSTG bV bSTOP - " imported : " cRST "%-10s" bSTG bV "\n", tmp, - sync_id ? DI(queued_imported) : (u8*)"n/a"); + " imported : " cRST "%-10s" bSTG bV "\n", + tmp, sync_id ? DI(queued_imported) : (u8*)"n/a"); - sprintf(tmp, "%s/%s, %s/%s, %s/%s", - DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]), - DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]), - DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON])); + sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_HAVOC]), + DI(stage_cycles[STAGE_HAVOC]), DI(stage_finds[STAGE_SPLICE]), + DI(stage_cycles[STAGE_SPLICE]), DI(stage_finds[STAGE_PYTHON]), + DI(stage_cycles[STAGE_PYTHON])); SAYF(bV bSTOP " havoc : " cRST "%-36s " bSTG bV bSTOP, tmp); - if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio); - else strcpy(tmp, "n/a"); + if (t_bytes) + sprintf(tmp, "%0.02f%%", stab_ratio); + else + strcpy(tmp, "n/a"); - SAYF(" stability : %s%-10s" bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40) - ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20)) - ? cMGN : cRST), tmp); + SAYF(" stability : %s%-10s" bSTG bV "\n", + (stab_ratio < 85 && var_byte_count > 40) + ? cLRD + : ((queued_variable && (!persistent_mode || var_byte_count > 20)) + ? cMGN + : cRST), + tmp); if (!bytes_trim_out) { @@ -582,18 +612,26 @@ void show_stats(void) { sprintf(tmp2, "%0.02f%%", ((double)(blocks_eff_total - blocks_eff_select)) * 100 / - blocks_eff_total); + blocks_eff_total); strcat(tmp, tmp2); } + if (custom_mutator) { - sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), DI(stage_cycles[STAGE_CUSTOM_MUTATOR])); - SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n" - bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp); + + sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), + DI(stage_cycles[STAGE_CUSTOM_MUTATOR])); + SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB + "\n" bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, + tmp); + } else { - SAYF(bV bSTOP " trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n" - bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, tmp); + + SAYF(bV bSTOP " trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB + "\n" bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, + tmp); + } /* Provide some CPU utilization stats. */ @@ -601,7 +639,7 @@ void show_stats(void) { if (cpu_core_count) { double cur_runnable = get_runnable_processes(); - u32 cur_utilization = cur_runnable * 100 / cpu_core_count; + u32 cur_utilization = cur_runnable * 100 / cpu_core_count; u8* cpu_color = cCYA; @@ -618,25 +656,26 @@ void show_stats(void) { if (cpu_aff >= 0) { - SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, - MIN(cpu_aff, 999), cpu_color, - MIN(cur_utilization, 999)); + SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, MIN(cpu_aff, 999), + cpu_color, MIN(cur_utilization, 999)); } else { - SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, - cpu_color, MIN(cur_utilization, 999)); - - } + SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color, + MIN(cur_utilization, 999)); + + } #else - SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, - cpu_color, MIN(cur_utilization, 999)); + SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color, + MIN(cur_utilization, 999)); #endif /* ^HAVE_AFFINITY */ - } else SAYF("\r"); + } else + + SAYF("\r"); /* Hallelujah! */ @@ -644,7 +683,6 @@ void show_stats(void) { } - /* Display quick statistics at the end of processing the input directory, plus a bunch of warnings. Some calibration stuff also ended up here, along with several hardcoded constants. Maybe clean up eventually. */ @@ -652,10 +690,10 @@ void show_stats(void) { void show_init_stats(void) { struct queue_entry* q = queue; - u32 min_bits = 0, max_bits = 0; - u64 min_us = 0, max_us = 0; - u64 avg_us = 0; - u32 max_len = 0; + u32 min_bits = 0, max_bits = 0; + u64 min_us = 0, max_us = 0; + u64 avg_us = 0; + u32 max_len = 0; if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles; @@ -681,9 +719,12 @@ void show_init_stats(void) { /* Let's keep things moving with slow binaries. */ - if (avg_us > 50000) havoc_div = 10; /* 0-19 execs/sec */ - else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec */ - else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */ + if (avg_us > 50000) + havoc_div = 10; /* 0-19 execs/sec */ + else if (avg_us > 20000) + havoc_div = 5; /* 20-49 execs/sec */ + else if (avg_us > 10000) + havoc_div = 2; /* 50-100 execs/sec */ if (!resuming_fuzz) { @@ -698,7 +739,9 @@ void show_init_stats(void) { WARNF(cLRD "Some test cases look useless. Consider using a smaller set."); if (queued_paths > 100) - WARNF(cLRD "You probably have far too many input files! Consider trimming down."); + WARNF(cLRD + "You probably have far too many input files! Consider trimming " + "down."); else if (queued_paths > 20) WARNF("You have lots of input files; try starting small."); @@ -706,11 +749,13 @@ void show_init_stats(void) { OKF("Here are some useful stats:\n\n" - cGRA " Test case count : " cRST "%u favored, %u variable, %u total\n" - cGRA " Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n" - cGRA " Exec timing : " cRST "%s to %s us (average: %s us)\n", - queued_favored, queued_variable, queued_paths, min_bits, max_bits, - ((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1), + cGRA " Test case count : " cRST + "%u favored, %u variable, %u total\n" cGRA " Bitmap range : " cRST + "%u to %u bits (average: %0.02f bits)\n" cGRA + " Exec timing : " cRST "%s to %s us (average: %s us)\n", + queued_favored, queued_variable, queued_paths, min_bits, max_bits, + ((double)total_bitmap_size) / + (total_bitmap_entries ? total_bitmap_entries : 1), DI(min_us), DI(max_us), DI(avg_us)); if (!timeout_given) { @@ -722,16 +767,19 @@ void show_init_stats(void) { random scheduler jitter is less likely to have any impact, and because our patience is wearing thin =) */ - if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000; - else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000; - else exec_tmout = avg_us * 5 / 1000; + if (avg_us > 50000) + exec_tmout = avg_us * 2 / 1000; + else if (avg_us > 10000) + exec_tmout = avg_us * 3 / 1000; + else + exec_tmout = avg_us * 5 / 1000; exec_tmout = MAX(exec_tmout, max_us / 1000); exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND; if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT; - ACTF("No -t option specified, so I'll use exec timeout of %u ms.", + ACTF("No -t option specified, so I'll use exec timeout of %u ms.", exec_tmout); timeout_given = 1; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 2242dd6b..685840c6 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -27,53 +27,62 @@ static void usage(u8* argv0) { #ifdef USE_PYTHON -#define PHYTON_SUPPORT \ - "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n" +# define PHYTON_SUPPORT\ + "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n" #else -#define PHYTON_SUPPORT "" +# define PHYTON_SUPPORT "" #endif - SAYF("\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n" + SAYF( + "\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n" - "Required parameters:\n" - " -i dir - input directory with test cases\n" - " -o dir - output directory for fuzzer findings\n\n" + "Required parameters:\n" + " -i dir - input directory with test cases\n" + " -o dir - output directory for fuzzer findings\n\n" - "Execution control settings:\n" - " -p schedule - power schedules recompute a seed's performance score.\n" - " \n" - " see docs/power_schedules.txt\n" - " -f file - location read by the fuzzed program (stdin)\n" - " -t msec - timeout for each run (auto-scaled, 50-%d ms)\n" - " -m megs - memory limit for child process (%d MB)\n" - " -Q - use binary-only instrumentation (QEMU mode)\n" - " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" - " -L minutes - use MOpt(imize) mode and set the limit time for entering the\n" - " pacemaker mode (minutes of no new paths, 0 = immediately).\n" - " a recommended value is 10-60. see docs/README.MOpt\n\n" - - "Fuzzing behavior settings:\n" - " -d - quick & dirty mode (skips deterministic steps)\n" - " -n - fuzz without instrumentation (dumb mode)\n" - " -x dir - optional fuzzer dictionary (see README)\n\n" + "Execution control settings:\n" + " -p schedule - power schedules recompute a seed's performance " + "score.\n" + " \n" + " see docs/power_schedules.txt\n" + " -f file - location read by the fuzzed program (stdin)\n" + " -t msec - timeout for each run (auto-scaled, 50-%d ms)\n" + " -m megs - memory limit for child process (%d MB)\n" + " -Q - use binary-only instrumentation (QEMU mode)\n" + " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" + " -L minutes - use MOpt(imize) mode and set the limit time for " + "entering the\n" + " pacemaker mode (minutes of no new paths, 0 = " + "immediately).\n" + " a recommended value is 10-60. see docs/README.MOpt\n\n" - "Testing settings:\n" - " -s seed - use a fixed seed for the RNG\n" - " -V seconds - fuzz for a maximum total time of seconds then terminate\n" - " -E execs - fuzz for a maximum number of total executions then terminate\n\n" + "Fuzzing behavior settings:\n" + " -d - quick & dirty mode (skips deterministic steps)\n" + " -n - fuzz without instrumentation (dumb mode)\n" + " -x dir - optional fuzzer dictionary (see README)\n\n" - "Other stuff:\n" - " -T text - text banner to show on the screen\n" - " -M / -S id - distributed mode (see parallel_fuzzing.txt)\n" - " -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap file\n" - " -C - crash exploration mode (the peruvian rabbit thing)\n" - " -e ext - File extension for the temporarily generated test case\n\n" + "Testing settings:\n" + " -s seed - use a fixed seed for the RNG\n" + " -V seconds - fuzz for a maximum total time of seconds then " + "terminate\n" + " -E execs - fuzz for a maximum number of total executions then " + "terminate\n\n" - PHYTON_SUPPORT + "Other stuff:\n" + " -T text - text banner to show on the screen\n" + " -M / -S id - distributed mode (see parallel_fuzzing.txt)\n" + " -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap " + "file\n" + " -C - crash exploration mode (the peruvian rabbit thing)\n" + " -e ext - File extension for the temporarily generated test " + "case\n\n" - "For additional tips, please consult %s/README\n\n", + PHYTON_SUPPORT - argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); + "For additional tips, please consult %s/README\n\n", + + argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); exit(1); #undef PHYTON_SUPPORT @@ -82,65 +91,90 @@ static void usage(u8* argv0) { #ifndef AFL_LIB -static int stricmp(char const *a, char const *b) { +static int stricmp(char const* a, char const* b) { + for (;; ++a, ++b) { + int d; d = tolower(*a) - tolower(*b); - if (d != 0 || !*a) - return d; + if (d != 0 || !*a) return d; + } + } /* Main entry point */ int main(int argc, char** argv) { - s32 opt; - u64 prev_queued = 0; - u32 sync_interval_cnt = 0, seek_to; - u8 *extras_dir = 0; - u8 mem_limit_given = 0; - u8 exit_1 = !!getenv("AFL_BENCH_JUST_ONE"); + s32 opt; + u64 prev_queued = 0; + u32 sync_interval_cnt = 0, seek_to; + u8* extras_dir = 0; + u8 mem_limit_given = 0; + u8 exit_1 = !!getenv("AFL_BENCH_JUST_ONE"); char** use_argv; - s64 init_seed; + s64 init_seed; - struct timeval tv; + struct timeval tv; struct timezone tz; - SAYF(cCYA "afl-fuzz" VERSION cRST " based on afl by and a big online community\n"); + SAYF(cCYA + "afl-fuzz" VERSION cRST + " based on afl by and a big online community\n"); doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH; gettimeofday(&tv, &tz); init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid(); - while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) > 0) + while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) > + 0) switch (opt) { case 's': { + init_seed = strtoul(optarg, 0L, 10); fixed_seed = 1; break; + } - case 'p': /* Power schedule */ + case 'p': /* Power schedule */ if (!stricmp(optarg, "fast")) { + schedule = FAST; + } else if (!stricmp(optarg, "coe")) { + schedule = COE; + } else if (!stricmp(optarg, "exploit")) { + schedule = EXPLOIT; + } else if (!stricmp(optarg, "lin")) { + schedule = LIN; + } else if (!stricmp(optarg, "quad")) { + schedule = QUAD; - } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") || !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) { + + } else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") || + + !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) { + schedule = EXPLORE; + } else { + FATAL("Unknown -p power schedule"); + } + break; case 'e': @@ -151,7 +185,7 @@ int main(int argc, char** argv) { break; - case 'i': /* input dir */ + case 'i': /* input dir */ if (in_dir) FATAL("Multiple -i options not supported"); in_dir = optarg; @@ -160,115 +194,121 @@ int main(int argc, char** argv) { break; - case 'o': /* output dir */ + case 'o': /* output dir */ if (out_dir) FATAL("Multiple -o options not supported"); out_dir = optarg; break; - case 'M': { /* master sync ID */ + case 'M': { /* master sync ID */ - u8* c; + u8* c; - if (sync_id) FATAL("Multiple -S or -M options not supported"); - sync_id = ck_strdup(optarg); + if (sync_id) FATAL("Multiple -S or -M options not supported"); + sync_id = ck_strdup(optarg); - if ((c = strchr(sync_id, ':'))) { + if ((c = strchr(sync_id, ':'))) { - *c = 0; + *c = 0; - if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 || - !master_id || !master_max || master_id > master_max || - master_max > 1000000) FATAL("Bogus master ID passed to -M"); - - } - - force_deterministic = 1; + if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 || + !master_id || !master_max || master_id > master_max || + master_max > 1000000) + FATAL("Bogus master ID passed to -M"); } - break; + force_deterministic = 1; - case 'S': + } + + break; + + case 'S': if (sync_id) FATAL("Multiple -S or -M options not supported"); sync_id = ck_strdup(optarg); break; - case 'f': /* target file */ + case 'f': /* target file */ if (out_file) FATAL("Multiple -f options not supported"); out_file = optarg; break; - case 'x': /* dictionary */ + case 'x': /* dictionary */ if (extras_dir) FATAL("Multiple -x options not supported"); extras_dir = optarg; break; - case 't': { /* timeout */ + case 't': { /* timeout */ - u8 suffix = 0; + u8 suffix = 0; - if (timeout_given) FATAL("Multiple -t options not supported"); + if (timeout_given) FATAL("Multiple -t options not supported"); - if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -t"); + if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -t"); - if (exec_tmout < 5) FATAL("Dangerously low value of -t"); + if (exec_tmout < 5) FATAL("Dangerously low value of -t"); - if (suffix == '+') timeout_given = 2; else timeout_given = 1; - - break; - - } - - case 'm': { /* mem limit */ - - u8 suffix = 'M'; - - if (mem_limit_given) FATAL("Multiple -m options not supported"); - mem_limit_given = 1; - - if (!strcmp(optarg, "none")) { - - mem_limit = 0; - break; - - } - - if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -m"); - - switch (suffix) { - - case 'T': mem_limit *= 1024 * 1024; break; - case 'G': mem_limit *= 1024; break; - case 'k': mem_limit /= 1024; break; - case 'M': break; - - default: FATAL("Unsupported suffix or bad syntax for -m"); - - } - - if (mem_limit < 5) FATAL("Dangerously low value of -m"); - - if (sizeof(rlim_t) == 4 && mem_limit > 2000) - FATAL("Value of -m out of range on 32-bit systems"); - - } + if (suffix == '+') + timeout_given = 2; + else + timeout_given = 1; break; - case 'd': /* skip deterministic */ + } + + case 'm': { /* mem limit */ + + u8 suffix = 'M'; + + if (mem_limit_given) FATAL("Multiple -m options not supported"); + mem_limit_given = 1; + + if (!strcmp(optarg, "none")) { + + mem_limit = 0; + break; + + } + + if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -m"); + + switch (suffix) { + + case 'T': mem_limit *= 1024 * 1024; break; + case 'G': mem_limit *= 1024; break; + case 'k': mem_limit /= 1024; break; + case 'M': break; + + default: FATAL("Unsupported suffix or bad syntax for -m"); + + } + + if (mem_limit < 5) FATAL("Dangerously low value of -m"); + + if (sizeof(rlim_t) == 4 && mem_limit > 2000) + FATAL("Value of -m out of range on 32-bit systems"); + + } + + break; + + case 'd': /* skip deterministic */ if (skip_deterministic) FATAL("Multiple -d options not supported"); skip_deterministic = 1; use_splicing = 1; break; - case 'B': /* load bitmap */ + case 'B': /* load bitmap */ /* This is a secret undocumented option! It is useful if you find an interesting test case during a normal fuzzing process, and want @@ -287,26 +327,29 @@ int main(int argc, char** argv) { read_bitmap(in_bitmap); break; - case 'C': /* crash mode */ + case 'C': /* crash mode */ if (crash_mode) FATAL("Multiple -C options not supported"); crash_mode = FAULT_CRASH; break; - case 'n': /* dumb mode */ + case 'n': /* dumb mode */ if (dumb_mode) FATAL("Multiple -n options not supported"); - if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1; + if (getenv("AFL_DUMB_FORKSRV")) + dumb_mode = 2; + else + dumb_mode = 1; break; - case 'T': /* banner */ + case 'T': /* banner */ if (use_banner) FATAL("Multiple -T options not supported"); use_banner = optarg; break; - case 'Q': /* QEMU mode */ + case 'Q': /* QEMU mode */ if (qemu_mode) FATAL("Multiple -Q options not supported"); qemu_mode = 1; @@ -315,7 +358,7 @@ int main(int argc, char** argv) { break; - case 'U': /* Unicorn mode */ + case 'U': /* Unicorn mode */ if (unicorn_mode) FATAL("Multiple -U options not supported"); unicorn_mode = 1; @@ -325,115 +368,132 @@ int main(int argc, char** argv) { break; case 'V': { - most_time_key = 1; - if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-') - FATAL("Bad syntax used for -V"); - } - break; + + most_time_key = 1; + if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-') + FATAL("Bad syntax used for -V"); + + } break; case 'E': { - most_execs_key = 1; - if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-') - FATAL("Bad syntax used for -E"); - } - break; - case 'L': { /* MOpt mode */ + most_execs_key = 1; + if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-') + FATAL("Bad syntax used for -E"); - if (limit_time_sig) FATAL("Multiple -L options not supported"); - limit_time_sig = 1; - havoc_max_mult = HAVOC_MAX_MULT_MOPT; + } break; - if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -L"); + case 'L': { /* MOpt mode */ - u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000; + if (limit_time_sig) FATAL("Multiple -L options not supported"); + limit_time_sig = 1; + havoc_max_mult = HAVOC_MAX_MULT_MOPT; - if (limit_time_puppet2 < limit_time_puppet ) FATAL("limit_time overflow"); - limit_time_puppet = limit_time_puppet2; + if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 || optarg[0] == '-') + FATAL("Bad syntax used for -L"); - SAYF("limit_time_puppet %llu\n",limit_time_puppet); - swarm_now = 0; + u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000; - if (limit_time_puppet == 0 ) - key_puppet = 1; + if (limit_time_puppet2 < limit_time_puppet) + FATAL("limit_time overflow"); + limit_time_puppet = limit_time_puppet2; - int i; - int tmp_swarm = 0; + SAYF("limit_time_puppet %llu\n", limit_time_puppet); + swarm_now = 0; - if (g_now > g_max) g_now = 0; - w_now = (w_init - w_end)*(g_max - g_now) / (g_max)+w_end; + if (limit_time_puppet == 0) key_puppet = 1; - for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) { - double total_puppet_temp = 0.0; - swarm_fitness[tmp_swarm] = 0.0; + int i; + int tmp_swarm = 0; - for (i = 0; i < operator_num; ++i) { - stage_finds_puppet[tmp_swarm][i] = 0; - probability_now[tmp_swarm][i] = 0.0; - x_now[tmp_swarm][i] = ((double)(random() % 7000)*0.0001 + 0.1); - total_puppet_temp += x_now[tmp_swarm][i]; - v_now[tmp_swarm][i] = 0.1; - L_best[tmp_swarm][i] = 0.5; - G_best[i] = 0.5; - eff_best[tmp_swarm][i] = 0.0; + if (g_now > g_max) g_now = 0; + w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end; - } + for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) { - for (i = 0; i < operator_num; ++i) { - stage_cycles_puppet_v2[tmp_swarm][i] = stage_cycles_puppet[tmp_swarm][i]; - stage_finds_puppet_v2[tmp_swarm][i] = stage_finds_puppet[tmp_swarm][i]; - x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp; - } + double total_puppet_temp = 0.0; + swarm_fitness[tmp_swarm] = 0.0; - double x_temp = 0.0; + for (i = 0; i < operator_num; ++i) { - for (i = 0; i < operator_num; ++i) { - probability_now[tmp_swarm][i] = 0.0; - v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); + stage_finds_puppet[tmp_swarm][i] = 0; + probability_now[tmp_swarm][i] = 0.0; + x_now[tmp_swarm][i] = ((double)(random() % 7000) * 0.0001 + 0.1); + total_puppet_temp += x_now[tmp_swarm][i]; + v_now[tmp_swarm][i] = 0.1; + L_best[tmp_swarm][i] = 0.5; + G_best[i] = 0.5; + eff_best[tmp_swarm][i] = 0.0; - x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; + } - if (x_now[tmp_swarm][i] > v_max) - x_now[tmp_swarm][i] = v_max; - else if (x_now[tmp_swarm][i] < v_min) - x_now[tmp_swarm][i] = v_min; + for (i = 0; i < operator_num; ++i) { - x_temp += x_now[tmp_swarm][i]; - } + stage_cycles_puppet_v2[tmp_swarm][i] = + stage_cycles_puppet[tmp_swarm][i]; + stage_finds_puppet_v2[tmp_swarm][i] = + stage_finds_puppet[tmp_swarm][i]; + x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp; - for (i = 0; i < operator_num; ++i) { - x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; - if (likely(i != 0)) - probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; - else - probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; - } - if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01) - FATAL("ERROR probability"); - } + } - for (i = 0; i < operator_num; ++i) { - core_operator_finds_puppet[i] = 0; - core_operator_finds_puppet_v2[i] = 0; - core_operator_cycles_puppet[i] = 0; - core_operator_cycles_puppet_v2[i] = 0; - core_operator_cycles_puppet_v3[i] = 0; - } + double x_temp = 0.0; + + for (i = 0; i < operator_num; ++i) { + + probability_now[tmp_swarm][i] = 0.0; + v_now[tmp_swarm][i] = + w_now * v_now[tmp_swarm][i] + + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + + RAND_C * (G_best[i] - x_now[tmp_swarm][i]); + + x_now[tmp_swarm][i] += v_now[tmp_swarm][i]; + + if (x_now[tmp_swarm][i] > v_max) + x_now[tmp_swarm][i] = v_max; + else if (x_now[tmp_swarm][i] < v_min) + x_now[tmp_swarm][i] = v_min; + + x_temp += x_now[tmp_swarm][i]; + + } + + for (i = 0; i < operator_num; ++i) { + + x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp; + if (likely(i != 0)) + probability_now[tmp_swarm][i] = + probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i]; + else + probability_now[tmp_swarm][i] = x_now[tmp_swarm][i]; + + } + + if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || + probability_now[tmp_swarm][operator_num - 1] > 1.01) + FATAL("ERROR probability"); } - break; - default: + for (i = 0; i < operator_num; ++i) { - usage(argv[0]); + core_operator_finds_puppet[i] = 0; + core_operator_finds_puppet_v2[i] = 0; + core_operator_cycles_puppet[i] = 0; + core_operator_cycles_puppet_v2[i] = 0; + core_operator_cycles_puppet_v3[i] = 0; + + } + + } break; + + default: usage(argv[0]); } if (optind == argc || !in_dir || !out_dir) usage(argv[0]); - if (fixed_seed) - OKF("Running with fixed seed: %u", (u32)init_seed); + if (fixed_seed) OKF("Running with fixed seed: %u", (u32)init_seed); srandom((u32)init_seed); setup_signal_handlers(); check_asan_opts(); @@ -446,28 +506,39 @@ int main(int argc, char** argv) { FATAL("Input and output directories can't be the same"); if ((tmp_dir = getenv("AFL_TMPDIR")) != NULL) { + char tmpfile[strlen(tmp_dir + 16)]; sprintf(tmpfile, "%s/%s", tmp_dir, ".cur_input"); - if (access(tmpfile, F_OK) != -1) // there is still a race condition here, but well ... - FATAL("TMP_DIR already has an existing temporary input file: %s", tmpfile); + if (access(tmpfile, F_OK) != + -1) // there is still a race condition here, but well ... + FATAL("TMP_DIR already has an existing temporary input file: %s", + tmpfile); + } else + tmp_dir = out_dir; if (dumb_mode) { if (crash_mode) FATAL("-C and -n are mutually exclusive"); - if (qemu_mode) FATAL("-Q and -n are mutually exclusive"); + if (qemu_mode) FATAL("-Q and -n are mutually exclusive"); if (unicorn_mode) FATAL("-U and -n are mutually exclusive"); } - + if (getenv("AFL_NO_UI") && getenv("AFL_FORCE_UI")) FATAL("AFL_NO_UI and AFL_FORCE_UI are mutually exclusive"); - - if (strchr(argv[optind], '/') == NULL) WARNF(cLRD "Target binary called without a prefixed path, make sure you are fuzzing the right binary: " cRST "%s", argv[optind]); - OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" Eissfeldt and Andrea Fioraldi"); - OKF("afl++ is open source, get it at https://github.com/vanhauser-thc/AFLplusplus"); + if (strchr(argv[optind], '/') == NULL) + WARNF(cLRD + "Target binary called without a prefixed path, make sure you are " + "fuzzing the right binary: " cRST "%s", + argv[optind]); + + OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" " + "Eissfeldt and Andrea Fioraldi"); + OKF("afl++ is open source, get it at " + "https://github.com/vanhauser-thc/AFLplusplus"); OKF("Power schedules from github.com/mboehme/aflfast"); OKF("Python Mutator and llvm_mode whitelisting from github.com/choller/afl"); OKF("afl-tmin fork server patch from github.com/nccgroup/TriforceAFL"); @@ -475,32 +546,42 @@ int main(int argc, char** argv) { ACTF("Getting to work..."); switch (schedule) { - case FAST: OKF ("Using exponential power schedule (FAST)"); break; - case COE: OKF ("Using cut-off exponential power schedule (COE)"); break; - case EXPLOIT: OKF ("Using exploitation-based constant power schedule (EXPLOIT)"); break; - case LIN: OKF ("Using linear power schedule (LIN)"); break; - case QUAD: OKF ("Using quadratic power schedule (QUAD)"); break; - case EXPLORE: OKF ("Using exploration-based constant power schedule (EXPLORE)"); break; - default : FATAL ("Unknown power schedule"); break; + + case FAST: OKF("Using exponential power schedule (FAST)"); break; + case COE: OKF("Using cut-off exponential power schedule (COE)"); break; + case EXPLOIT: + OKF("Using exploitation-based constant power schedule (EXPLOIT)"); + break; + case LIN: OKF("Using linear power schedule (LIN)"); break; + case QUAD: OKF("Using quadratic power schedule (QUAD)"); break; + case EXPLORE: + OKF("Using exploration-based constant power schedule (EXPLORE)"); + break; + default: FATAL("Unknown power schedule"); break; + } - if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1; - if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1; - if (getenv("AFL_NO_ARITH")) no_arith = 1; - if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1; - if (getenv("AFL_FAST_CAL")) fast_cal = 1; + if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1; + if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1; + if (getenv("AFL_NO_ARITH")) no_arith = 1; + if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1; + if (getenv("AFL_FAST_CAL")) fast_cal = 1; if (getenv("AFL_HANG_TMOUT")) { + hang_tmout = atoi(getenv("AFL_HANG_TMOUT")); if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT"); + } if (dumb_mode == 2 && no_forkserver) FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive"); if (getenv("AFL_PRELOAD")) { + setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1); setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1); + } if (getenv("AFL_LD_PRELOAD")) @@ -511,31 +592,33 @@ int main(int argc, char** argv) { fix_up_banner(argv[optind]); check_if_tty(); - if (getenv("AFL_FORCE_UI")) - not_on_tty = 0; + if (getenv("AFL_FORCE_UI")) not_on_tty = 0; if (getenv("AFL_CAL_FAST")) { + /* Use less calibration cycles, for slow applications */ cal_cycles = 3; cal_cycles_long = 5; + } - if (getenv("AFL_DEBUG")) - debug = 1; + if (getenv("AFL_DEBUG")) debug = 1; if (getenv("AFL_PYTHON_ONLY")) { + /* This ensures we don't proceed to havoc/splice */ python_only = 1; /* Ensure we also skip all deterministic steps */ skip_deterministic = 1; + } get_core_count(); -#ifdef HAVE_AFFINITY +# ifdef HAVE_AFFINITY bind_to_free_cpu(); -#endif /* HAVE_AFFINITY */ +# endif /* HAVE_AFFINITY */ check_crash_handling(); check_cpu_governor(); @@ -552,13 +635,12 @@ int main(int argc, char** argv) { setup_dirs_fds(); -#ifdef USE_PYTHON - if (init_py()) - FATAL("Failed to initialize Python module"); -#else +# ifdef USE_PYTHON + if (init_py()) FATAL("Failed to initialize Python module"); +# else if (getenv("AFL_PYTHON_MODULE")) - FATAL("Your AFL binary was built without Python support"); -#endif + FATAL("Your AFL binary was built without Python support"); +# endif setup_cmdline_file(argv + optind); @@ -574,24 +656,33 @@ int main(int argc, char** argv) { /* If we don't have a file name chosen yet, use a safe default. */ if (!out_file) { + u32 i = optind + 1; while (argv[i]) { u8* aa_loc = strstr(argv[i], "@@"); if (aa_loc && !out_file) { + if (file_extension) { + out_file = alloc_printf("%s/.cur_input.%s", out_dir, file_extension); + } else { + out_file = alloc_printf("%s/.cur_input", out_dir); + } + detect_file_args(argv + optind + 1, out_file); - break; + break; + } ++i; } + } if (!out_file) setup_stdio_file(); @@ -621,9 +712,11 @@ int main(int argc, char** argv) { /* Woop woop woop */ if (!not_on_tty) { + sleep(4); start_time += 4000; if (stop_soon) goto stop_fuzzing; + } // real start time, we reset, so this works correctly with -V @@ -638,21 +731,25 @@ int main(int argc, char** argv) { if (!queue_cur) { ++queue_cycle; - current_entry = 0; + current_entry = 0; cur_skipped_paths = 0; - queue_cur = queue; + queue_cur = queue; while (seek_to) { + ++current_entry; --seek_to; queue_cur = queue_cur->next; + } show_stats(); if (not_on_tty) { + ACTF("Entering queue cycle %llu.", queue_cycle); fflush(stdout); + } /* If we had a full queue cycle with no new finds, try @@ -660,9 +757,14 @@ int main(int argc, char** argv) { if (queued_paths == prev_queued) { - if (use_splicing) ++cycles_wo_finds; else use_splicing = 1; + if (use_splicing) + ++cycles_wo_finds; + else + use_splicing = 1; - } else cycles_wo_finds = 0; + } else + + cycles_wo_finds = 0; prev_queued = queued_paths; @@ -674,9 +776,8 @@ int main(int argc, char** argv) { skipped_fuzz = fuzz_one(use_argv); if (!stop_soon && sync_id && !skipped_fuzz) { - - if (!(sync_interval_cnt++ % SYNC_INTERVAL)) - sync_fuzzers(use_argv); + + if (!(sync_interval_cnt++ % SYNC_INTERVAL)) sync_fuzzers(use_argv); } @@ -688,18 +789,28 @@ int main(int argc, char** argv) { ++current_entry; if (most_time_key == 1) { + u64 cur_ms_lv = get_cur_time(); - if (most_time * 1000 < cur_ms_lv - start_time) { + if (most_time * 1000 < cur_ms_lv - start_time) { + most_time_key = 2; break; + } + } + if (most_execs_key == 1) { + if (most_execs <= total_execs) { + most_execs_key = 2; break; + } + } + } if (queue_cur) show_stats(); @@ -708,19 +819,20 @@ int main(int argc, char** argv) { * ATTENTION - the following 10 lines were copied from a PR to Google's afl * repository - and slightly fixed. * These lines have nothing to do with the purpose of original PR though. - * Looks like when an exit condition was completed (AFL_BENCH_JUST_ONE, + * Looks like when an exit condition was completed (AFL_BENCH_JUST_ONE, * AFL_EXIT_WHEN_DONE or AFL_BENCH_UNTIL_CRASH) the child and forkserver * where not killed? */ - /* if we stopped programmatically, we kill the forkserver and the current runner. - if we stopped manually, this is done by the signal handler */ - if (stop_soon == 2){ + /* if we stopped programmatically, we kill the forkserver and the current + runner. if we stopped manually, this is done by the signal handler */ + if (stop_soon == 2) { + if (child_pid > 0) kill(child_pid, SIGKILL); if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL); - /* Now that we've killed the forkserver, we wait for it to be able to get rusage stats. */ - if (waitpid(forksrv_pid, NULL, 0) <= 0) { - WARNF("error waitpid\n"); - } + /* Now that we've killed the forkserver, we wait for it to be able to get + * rusage stats. */ + if (waitpid(forksrv_pid, NULL, 0) <= 0) { WARNF("error waitpid\n"); } + } write_bitmap(); @@ -732,8 +844,7 @@ stop_fuzzing: SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST, stop_soon == 2 ? "programmatically" : "by user"); - if (most_time_key == 2) - SAYF(cYEL "[!] " cRST "Time limit was reached\n"); + if (most_time_key == 2) SAYF(cYEL "[!] " cRST "Time limit was reached\n"); if (most_execs_key == 2) SAYF(cYEL "[!] " cRST "Execution limit was reached\n"); @@ -742,8 +853,9 @@ stop_fuzzing: if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) { SAYF("\n" cYEL "[!] " cRST - "Stopped during the first cycle, results may be incomplete.\n" - " (For info on resuming, see %s/README)\n", doc_path); + "Stopped during the first cycle, results may be incomplete.\n" + " (For info on resuming, see %s/README)\n", + doc_path); } @@ -755,9 +867,9 @@ stop_fuzzing: alloc_report(); -#ifdef USE_PYTHON +# ifdef USE_PYTHON finalize_py(); -#endif +# endif OKF("We're done here. Have a nice day!\n"); @@ -766,3 +878,4 @@ stop_fuzzing: } #endif /* !AFL_LIB */ + diff --git a/src/afl-gcc.c b/src/afl-gcc.c index f6ededeb..750f9b72 100644 --- a/src/afl-gcc.c +++ b/src/afl-gcc.c @@ -43,19 +43,18 @@ #include #include -static u8* as_path; /* Path to the AFL 'as' wrapper */ -static u8** cc_params; /* Parameters passed to the real CC */ -static u32 cc_par_cnt = 1; /* Param count, including argv0 */ -static u8 be_quiet, /* Quiet mode */ - clang_mode; /* Invoked as afl-clang*? */ - +static u8* as_path; /* Path to the AFL 'as' wrapper */ +static u8** cc_params; /* Parameters passed to the real CC */ +static u32 cc_par_cnt = 1; /* Param count, including argv0 */ +static u8 be_quiet, /* Quiet mode */ + clang_mode; /* Invoked as afl-clang*? */ /* Try to find our "fake" GNU assembler in AFL_PATH or at the location derived from argv[0]. If that fails, abort. */ static void find_as(u8* argv0) { - u8 *afl_path = getenv("AFL_PATH"); + u8* afl_path = getenv("AFL_PATH"); u8 *slash, *tmp; if (afl_path) { @@ -63,9 +62,11 @@ static void find_as(u8* argv0) { tmp = alloc_printf("%s/as", afl_path); if (!access(tmp, X_OK)) { + as_path = afl_path; ck_free(tmp); return; + } ck_free(tmp); @@ -76,7 +77,7 @@ static void find_as(u8* argv0) { if (slash) { - u8 *dir; + u8* dir; *slash = 0; dir = ck_strdup(argv0); @@ -85,9 +86,11 @@ static void find_as(u8* argv0) { tmp = alloc_printf("%s/afl-as", dir); if (!access(tmp, X_OK)) { + as_path = dir; ck_free(tmp); return; + } ck_free(tmp); @@ -96,21 +99,22 @@ static void find_as(u8* argv0) { } if (!access(AFL_PATH "/as", X_OK)) { + as_path = AFL_PATH; return; + } FATAL("Unable to find AFL wrapper binary for 'as'. Please set AFL_PATH"); - -} +} /* Copy argv to cc_params, making the necessary edits. */ static void edit_params(u32 argc, char** argv) { - u8 fortify_set = 0, asan_set = 0; - u8 *name; + u8 fortify_set = 0, asan_set = 0; + u8* name; #if defined(__FreeBSD__) && defined(__x86_64__) u8 m32_set = 0; @@ -119,7 +123,10 @@ static void edit_params(u32 argc, char** argv) { cc_params = ck_alloc((argc + 128) * sizeof(u8*)); name = strrchr(argv[0], '/'); - if (!name) name = argv[0]; else name++; + if (!name) + name = argv[0]; + else + name++; if (!strncmp(name, "afl-clang", 9)) { @@ -128,11 +135,15 @@ static void edit_params(u32 argc, char** argv) { setenv(CLANG_ENV_VAR, "1", 1); if (!strcmp(name, "afl-clang++")) { + u8* alt_cxx = getenv("AFL_CXX"); cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++"; + } else { + u8* alt_cc = getenv("AFL_CC"); cc_params[0] = alt_cc ? alt_cc : (u8*)"clang"; + } } else { @@ -145,16 +156,22 @@ static void edit_params(u32 argc, char** argv) { #ifdef __APPLE__ - if (!strcmp(name, "afl-g++")) cc_params[0] = getenv("AFL_CXX"); - else if (!strcmp(name, "afl-gcj")) cc_params[0] = getenv("AFL_GCJ"); - else cc_params[0] = getenv("AFL_CC"); + if (!strcmp(name, "afl-g++")) + cc_params[0] = getenv("AFL_CXX"); + else if (!strcmp(name, "afl-gcj")) + cc_params[0] = getenv("AFL_GCJ"); + else + cc_params[0] = getenv("AFL_CC"); if (!cc_params[0]) { SAYF("\n" cLRD "[-] " cRST - "On Apple systems, 'gcc' is usually just a wrapper for clang. Please use the\n" - " 'afl-clang' utility instead of 'afl-gcc'. If you really have GCC installed,\n" - " set AFL_CC or AFL_CXX to specify the correct path to that compiler.\n"); + "On Apple systems, 'gcc' is usually just a wrapper for clang. " + "Please use the\n" + " 'afl-clang' utility instead of 'afl-gcc'. If you really have " + "GCC installed,\n" + " set AFL_CC or AFL_CXX to specify the correct path to that " + "compiler.\n"); FATAL("AFL_CC or AFL_CXX required on MacOS X"); @@ -163,14 +180,20 @@ static void edit_params(u32 argc, char** argv) { #else if (!strcmp(name, "afl-g++")) { + u8* alt_cxx = getenv("AFL_CXX"); cc_params[0] = alt_cxx ? alt_cxx : (u8*)"g++"; + } else if (!strcmp(name, "afl-gcj")) { + u8* alt_cc = getenv("AFL_GCJ"); cc_params[0] = alt_cc ? alt_cc : (u8*)"gcj"; + } else { + u8* alt_cc = getenv("AFL_CC"); cc_params[0] = alt_cc ? alt_cc : (u8*)"gcc"; + } #endif /* __APPLE__ */ @@ -178,13 +201,20 @@ static void edit_params(u32 argc, char** argv) { } while (--argc) { + u8* cur = *(++argv); if (!strncmp(cur, "-B", 2)) { if (!be_quiet) WARNF("-B is already set, overriding"); - if (!cur[2] && argc > 1) { argc--; argv++; } + if (!cur[2] && argc > 1) { + + argc--; + argv++; + + } + continue; } @@ -197,8 +227,8 @@ static void edit_params(u32 argc, char** argv) { if (!strcmp(cur, "-m32")) m32_set = 1; #endif - if (!strcmp(cur, "-fsanitize=address") || - !strcmp(cur, "-fsanitize=memory")) asan_set = 1; + if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory")) + asan_set = 1; if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1; @@ -209,15 +239,13 @@ static void edit_params(u32 argc, char** argv) { cc_params[cc_par_cnt++] = "-B"; cc_params[cc_par_cnt++] = as_path; - if (clang_mode) - cc_params[cc_par_cnt++] = "-no-integrated-as"; + if (clang_mode) cc_params[cc_par_cnt++] = "-no-integrated-as"; if (getenv("AFL_HARDEN")) { cc_params[cc_par_cnt++] = "-fstack-protector-all"; - if (!fortify_set) - cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2"; + if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2"; } @@ -229,8 +257,7 @@ static void edit_params(u32 argc, char** argv) { } else if (getenv("AFL_USE_ASAN")) { - if (getenv("AFL_USE_MSAN")) - FATAL("ASAN and MSAN are mutually exclusive"); + if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("ASAN and AFL_HARDEN are mutually exclusive"); @@ -240,8 +267,7 @@ static void edit_params(u32 argc, char** argv) { } else if (getenv("AFL_USE_MSAN")) { - if (getenv("AFL_USE_ASAN")) - FATAL("ASAN and MSAN are mutually exclusive"); + if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive"); if (getenv("AFL_HARDEN")) FATAL("MSAN and AFL_HARDEN are mutually exclusive"); @@ -249,11 +275,10 @@ static void edit_params(u32 argc, char** argv) { cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE"; cc_params[cc_par_cnt++] = "-fsanitize=memory"; - } #ifdef USEMMAP - cc_params[cc_par_cnt++] = "-lrt"; + cc_params[cc_par_cnt++] = "-lrt"; #endif if (!getenv("AFL_DONT_OPTIMIZE")) { @@ -264,12 +289,11 @@ static void edit_params(u32 argc, char** argv) { works OK. This has nothing to do with us, but let's avoid triggering that bug. */ - if (!clang_mode || !m32_set) - cc_params[cc_par_cnt++] = "-g"; + if (!clang_mode || !m32_set) cc_params[cc_par_cnt++] = "-g"; #else - cc_params[cc_par_cnt++] = "-g"; + cc_params[cc_par_cnt++] = "-g"; #endif @@ -300,7 +324,6 @@ static void edit_params(u32 argc, char** argv) { } - /* Main entry point */ int main(int argc, char** argv) { @@ -308,23 +331,33 @@ int main(int argc, char** argv) { if (isatty(2) && !getenv("AFL_QUIET")) { SAYF(cCYA "afl-cc" VERSION cRST " by \n"); - SAYF(cYEL "[!] " cBRI "NOTE: " cRST "afl-gcc is deprecated, llvm_mode is much faster and has more options\n"); + SAYF(cYEL "[!] " cBRI "NOTE: " cRST + "afl-gcc is deprecated, llvm_mode is much faster and has more " + "options\n"); - } else be_quiet = 1; + } else + + be_quiet = 1; if (argc < 2) { - SAYF("\n" - "This is a helper application for afl-fuzz. It serves as a drop-in replacement\n" - "for gcc or clang, letting you recompile third-party code with the required\n" - "runtime instrumentation. A common use pattern would be one of the following:\n\n" + SAYF( + "\n" + "This is a helper application for afl-fuzz. It serves as a drop-in " + "replacement\n" + "for gcc or clang, letting you recompile third-party code with the " + "required\n" + "runtime instrumentation. A common use pattern would be one of the " + "following:\n\n" - " CC=%s/afl-gcc ./configure\n" - " CXX=%s/afl-g++ ./configure\n\n" + " CC=%s/afl-gcc ./configure\n" + " CXX=%s/afl-g++ ./configure\n\n" - "You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and AFL_AS.\n" - "Setting AFL_HARDEN enables hardening optimizations in the compiled code.\n\n", - BIN_PATH, BIN_PATH); + "You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and " + "AFL_AS.\n" + "Setting AFL_HARDEN enables hardening optimizations in the compiled " + "code.\n\n", + BIN_PATH, BIN_PATH); exit(1); @@ -341,3 +374,4 @@ int main(int argc, char** argv) { return 0; } + diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c index fa629eb7..5aa9b35c 100644 --- a/src/afl-gotcpu.c +++ b/src/afl-gotcpu.c @@ -31,7 +31,7 @@ #endif #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include #include @@ -51,12 +51,11 @@ # define HAVE_AFFINITY 1 #endif /* __linux__ */ - /* Get unix time in microseconds. */ static u64 get_cur_time_us(void) { - struct timeval tv; + struct timeval tv; struct timezone tz; gettimeofday(&tv, &tz); @@ -65,7 +64,6 @@ static u64 get_cur_time_us(void) { } - /* Get CPU usage in microseconds. */ static u64 get_cpu_usage_us(void) { @@ -79,7 +77,6 @@ static u64 get_cpu_usage_us(void) { } - /* Measure preemption rate. */ static u32 measure_preemption(u32 target_ms) { @@ -96,14 +93,17 @@ repeat_loop: v1 = CTEST_BUSY_CYCLES; - while (v1--) v2++; + while (v1--) + v2++; sched_yield(); en_t = get_cur_time_us(); if (en_t - st_t < target_ms * 1000) { + loop_repeats++; goto repeat_loop; + } /* Let's see what percentage of this time we actually had a chance to @@ -111,22 +111,20 @@ repeat_loop: en_c = get_cpu_usage_us(); - real_delta = (en_t - st_t) / 1000; + real_delta = (en_t - st_t) / 1000; slice_delta = (en_c - st_c) / 1000; return real_delta * 100 / slice_delta; } - /* Do the benchmark thing. */ int main(int argc, char** argv) { #ifdef HAVE_AFFINITY - u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), - idle_cpus = 0, maybe_cpus = 0, i; + u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), idle_cpus = 0, maybe_cpus = 0, i; SAYF(cCYA "afl-gotcpu" VERSION cRST " by \n"); @@ -142,7 +140,7 @@ int main(int argc, char** argv) { if (!fr) { cpu_set_t c; - u32 util_perc; + u32 util_perc; CPU_ZERO(&c); CPU_SET(i, &c); @@ -159,7 +157,7 @@ int main(int argc, char** argv) { } else if (util_perc < 250) { - SAYF(" Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc); + SAYF(" Core #%u: " cYEL "CAUTION " cRST "(%u%%)\n", i, util_perc); exit(1); } @@ -255,3 +253,4 @@ int main(int argc, char** argv) { #endif /* ^HAVE_AFFINITY */ } + diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c index ce3b76e6..9c7ac7c3 100644 --- a/src/afl-sharedmem.c +++ b/src/afl-sharedmem.c @@ -5,7 +5,7 @@ #define AFL_MAIN #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -32,68 +32,79 @@ #include #ifndef USEMMAP - #include - #include +# include +# include #endif -extern unsigned char*trace_bits; +extern unsigned char *trace_bits; #ifdef USEMMAP /* ================ Proteas ================ */ -int g_shm_fd = -1; +int g_shm_fd = -1; unsigned char *g_shm_base = NULL; -char g_shm_file_path[L_tmpnam]; +char g_shm_file_path[L_tmpnam]; /* ========================================= */ #else -static s32 shm_id; /* ID of the SHM region */ +static s32 shm_id; /* ID of the SHM region */ #endif /* Get rid of shared memory (atexit handler). */ void remove_shm(void) { + #ifdef USEMMAP if (g_shm_base != NULL) { + munmap(g_shm_base, MAP_SIZE); g_shm_base = NULL; + } if (g_shm_fd != -1) { + close(g_shm_fd); g_shm_fd = -1; + } + #else shmctl(shm_id, IPC_RMID, NULL); #endif -} +} /* Configure shared memory. */ void setup_shm(unsigned char dumb_mode) { + #ifdef USEMMAP /* generate random file name for multi instance */ - /* thanks to f*cking glibc we can not use tmpnam securely, it generates a security warning that cannot be suppressed */ + /* thanks to f*cking glibc we can not use tmpnam securely, it generates a + * security warning that cannot be suppressed */ /* so we do this worse workaround */ snprintf(g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random()); /* create the shared memory segment as if it was a file */ g_shm_fd = shm_open(g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600); - if (g_shm_fd == -1) { - PFATAL("shm_open() failed"); - } + if (g_shm_fd == -1) { PFATAL("shm_open() failed"); } /* configure the size of the shared memory segment */ if (ftruncate(g_shm_fd, MAP_SIZE)) { + PFATAL("setup_shm(): ftruncate() failed"); + } /* map the shared memory segment to the address space of the process */ - g_shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0); + g_shm_base = + mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0); if (g_shm_base == MAP_FAILED) { + close(g_shm_fd); g_shm_fd = -1; PFATAL("mmap() failed"); + } atexit(remove_shm); @@ -108,7 +119,7 @@ void setup_shm(unsigned char dumb_mode) { trace_bits = g_shm_base; if (!trace_bits) PFATAL("mmap() failed"); - + #else u8* shm_str; @@ -132,9 +143,10 @@ void setup_shm(unsigned char dumb_mode) { ck_free(shm_str); trace_bits = shmat(shm_id, NULL, 0); - + if (!trace_bits) PFATAL("shmat() failed"); #endif + } diff --git a/src/afl-showmap.c b/src/afl-showmap.c index ee00bf22..ac3d687d 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -24,7 +24,7 @@ #define AFL_MAIN #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -51,61 +51,54 @@ #include #include -static s32 child_pid; /* PID of the tested program */ +static s32 child_pid; /* PID of the tested program */ - u8* trace_bits; /* SHM with instrumentation bitmap */ +u8* trace_bits; /* SHM with instrumentation bitmap */ -static u8 *out_file, /* Trace output file */ - *doc_path, /* Path to docs */ - *target_path, /* Path to target binary */ - *at_file; /* Substitution string for @@ */ +static u8 *out_file, /* Trace output file */ + *doc_path, /* Path to docs */ + *target_path, /* Path to target binary */ + *at_file; /* Substitution string for @@ */ -static u32 exec_tmout; /* Exec timeout (ms) */ +static u32 exec_tmout; /* Exec timeout (ms) */ -static u32 total, highest; /* tuple content information */ +static u32 total, highest; /* tuple content information */ -static u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ +static u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ -static u8 quiet_mode, /* Hide non-essential messages? */ - edges_only, /* Ignore hit counts? */ - raw_instr_output, /* Do not apply AFL filters */ - cmin_mode, /* Generate output in afl-cmin mode? */ - binary_mode, /* Write output as a binary map */ - keep_cores; /* Allow coredumps? */ +static u8 quiet_mode, /* Hide non-essential messages? */ + edges_only, /* Ignore hit counts? */ + raw_instr_output, /* Do not apply AFL filters */ + cmin_mode, /* Generate output in afl-cmin mode? */ + binary_mode, /* Write output as a binary map */ + keep_cores; /* Allow coredumps? */ -static volatile u8 - stop_soon, /* Ctrl-C pressed? */ - child_timed_out, /* Child timed out? */ - child_crashed; /* Child crashed? */ +static volatile u8 stop_soon, /* Ctrl-C pressed? */ + child_timed_out, /* Child timed out? */ + child_crashed; /* Child crashed? */ /* Classify tuple counts. Instead of mapping to individual bits, as in afl-fuzz.c, we map to more user-friendly numbers between 1 and 8. */ static const u8 count_class_human[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 3, - [4 ... 7] = 4, - [8 ... 15] = 5, - [16 ... 31] = 6, - [32 ... 127] = 7, - [128 ... 255] = 8 + [0] = 0, [1] = 1, [2] = 2, [3] = 3, + [4 ... 7] = 4, [8 ... 15] = 5, [16 ... 31] = 6, [32 ... 127] = 7, + [128 ... 255] = 8 }; static const u8 count_class_binary[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 4, - [4 ... 7] = 8, - [8 ... 15] = 16, - [16 ... 31] = 32, - [32 ... 127] = 64, - [128 ... 255] = 128 + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 }; @@ -116,22 +109,25 @@ static void classify_counts(u8* mem, const u8* map) { if (edges_only) { while (i--) { + if (*mem) *mem = 1; mem++; + } } else if (!raw_instr_output) { while (i--) { + *mem = map[*mem]; mem++; + } } } - /* Write results. */ static u32 write_results(void) { @@ -139,8 +135,8 @@ static u32 write_results(void) { s32 fd; u32 i, ret = 0; - u8 cco = !!getenv("AFL_CMIN_CRASHES_ONLY"), - caa = !!getenv("AFL_CMIN_ALLOW_ANY"); + u8 cco = !!getenv("AFL_CMIN_CRASHES_ONLY"), + caa = !!getenv("AFL_CMIN_ALLOW_ANY"); if (!strncmp(out_file, "/dev/", 5)) { @@ -154,7 +150,7 @@ static u32 write_results(void) { } else { - unlink(out_file); /* Ignore errors */ + unlink(out_file); /* Ignore errors */ fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", out_file); @@ -164,7 +160,7 @@ static u32 write_results(void) { for (i = 0; i < MAP_SIZE; i++) if (trace_bits[i]) ret++; - + ck_write(fd, trace_bits, MAP_SIZE, out_file); close(fd); @@ -178,10 +174,9 @@ static u32 write_results(void) { if (!trace_bits[i]) continue; ret++; - + total += trace_bits[i]; - if (highest < trace_bits[i]) - highest = trace_bits[i]; + if (highest < trace_bits[i]) highest = trace_bits[i]; if (cmin_mode) { @@ -190,10 +185,12 @@ static u32 write_results(void) { fprintf(f, "%u%u\n", trace_bits[i], i); - } else fprintf(f, "%06u:%u\n", i, trace_bits[i]); + } else + + fprintf(f, "%06u:%u\n", i, trace_bits[i]); } - + fclose(f); } @@ -202,7 +199,6 @@ static u32 write_results(void) { } - /* Handle timeout signal. */ static void handle_timeout(int sig) { @@ -212,16 +208,14 @@ static void handle_timeout(int sig) { } - /* Execute target application. */ static void run_target(char** argv) { static struct itimerval it; - int status = 0; + int status = 0; - if (!quiet_mode) - SAYF("-- Program output begins --\n" cRST); + if (!quiet_mode) SAYF("-- Program output begins --\n" cRST); MEM_BARRIER(); @@ -238,8 +232,10 @@ static void run_target(char** argv) { s32 fd = open("/dev/null", O_RDWR); if (fd < 0 || dup2(fd, 1) < 0 || dup2(fd, 2) < 0) { + *(u32*)trace_bits = EXEC_FAIL_SIG; PFATAL("Descriptor initialization failed"); + } close(fd); @@ -252,20 +248,22 @@ static void run_target(char** argv) { #ifdef RLIMIT_AS - setrlimit(RLIMIT_AS, &r); /* Ignore errors */ + setrlimit(RLIMIT_AS, &r); /* Ignore errors */ #else - setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ + setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ #endif /* ^RLIMIT_AS */ } - if (!keep_cores) r.rlim_max = r.rlim_cur = 0; - else r.rlim_max = r.rlim_cur = RLIM_INFINITY; + if (!keep_cores) + r.rlim_max = r.rlim_cur = 0; + else + r.rlim_max = r.rlim_cur = RLIM_INFINITY; - setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ + setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0); @@ -304,14 +302,12 @@ static void run_target(char** argv) { if (*(u32*)trace_bits == EXEC_FAIL_SIG) FATAL("Unable to execute '%s'", argv[0]); - classify_counts(trace_bits, binary_mode ? - count_class_binary : count_class_human); + classify_counts(trace_bits, + binary_mode ? count_class_binary : count_class_human); - if (!quiet_mode) - SAYF(cRST "-- Program output ends --\n"); + if (!quiet_mode) SAYF(cRST "-- Program output ends --\n"); - if (!child_timed_out && !stop_soon && WIFSIGNALED(status)) - child_crashed = 1; + if (!child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1; if (!quiet_mode) { @@ -320,14 +316,13 @@ static void run_target(char** argv) { else if (stop_soon) SAYF(cLRD "\n+++ Program aborted by user +++\n" cRST); else if (child_crashed) - SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status)); + SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, + WTERMSIG(status)); } - } - /* Handle Ctrl-C and the like. */ static void handle_stop_sig(int sig) { @@ -338,15 +333,16 @@ static void handle_stop_sig(int sig) { } - /* Do basic preparations - persistent fds, filenames, etc. */ static void set_up_environment(void) { - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" "symbolize=0:" @@ -355,21 +351,22 @@ static void set_up_environment(void) { "msan_track_origins=0", 0); if (getenv("AFL_PRELOAD")) { + setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1); setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1); + } } - /* Setup signal handlers, duh. */ static void setup_signal_handlers(void) { struct sigaction sa; - sa.sa_handler = NULL; - sa.sa_flags = SA_RESTART; + sa.sa_handler = NULL; + sa.sa_flags = SA_RESTART; sa.sa_sigaction = NULL; sigemptyset(&sa.sa_mask); @@ -388,7 +385,6 @@ static void setup_signal_handlers(void) { } - /* Show banner. */ static void show_banner(void) { @@ -403,42 +399,43 @@ static void usage(u8* argv0) { show_banner(); - SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" + SAYF( + "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" - "Required parameters:\n\n" + "Required parameters:\n\n" - " -o file - file to write the trace data to\n\n" + " -o file - file to write the trace data to\n\n" - "Execution control settings:\n\n" + "Execution control settings:\n\n" - " -t msec - timeout for each run (none)\n" - " -m megs - memory limit for child process (%d MB)\n" - " -Q - use binary-only instrumentation (QEMU mode)\n" - " -U - use Unicorn-based instrumentation (Unicorn mode)\n" - " (Not necessary, here for consistency with other afl-* tools)\n\n" + " -t msec - timeout for each run (none)\n" + " -m megs - memory limit for child process (%d MB)\n" + " -Q - use binary-only instrumentation (QEMU mode)\n" + " -U - use Unicorn-based instrumentation (Unicorn mode)\n" + " (Not necessary, here for consistency with other afl-* " + "tools)\n\n" - "Other settings:\n\n" + "Other settings:\n\n" - " -q - sink program's output and don't show messages\n" - " -e - show edge coverage only, ignore hit counts\n" - " -r - show real tuple values instead of AFL filter values\n" - " -c - allow core dumps\n\n" + " -q - sink program's output and don't show messages\n" + " -e - show edge coverage only, ignore hit counts\n" + " -r - show real tuple values instead of AFL filter values\n" + " -c - allow core dumps\n\n" - "This tool displays raw tuple data captured by AFL instrumentation.\n" - "For additional help, consult %s/README.\n\n" cRST, + "This tool displays raw tuple data captured by AFL instrumentation.\n" + "For additional help, consult %s/README.\n\n" cRST, - argv0, MEM_LIMIT, doc_path); + argv0, MEM_LIMIT, doc_path); exit(1); } - /* Find binary. */ static void find_binary(u8* fname) { - u8* env_path = 0; + u8* env_path = 0; struct stat st; if (strchr(fname, '/') || !(env_path = getenv("PATH"))) { @@ -461,7 +458,9 @@ static void find_binary(u8* fname) { memcpy(cur_elem, env_path, delim - env_path); delim++; - } else cur_elem = ck_strdup(env_path); + } else + + cur_elem = ck_strdup(env_path); env_path = delim; @@ -473,7 +472,8 @@ static void find_binary(u8* fname) { ck_free(cur_elem); if (!stat(target_path, &st) && S_ISREG(st.st_mode) && - (st.st_mode & 0111) && st.st_size >= 4) break; + (st.st_mode & 0111) && st.st_size >= 4) + break; ck_free(target_path); target_path = 0; @@ -486,13 +486,12 @@ static void find_binary(u8* fname) { } - /* Fix up argv for QEMU. */ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); - u8 *tmp, *cp, *rsl, *own_copy; + u8 * tmp, *cp, *rsl, *own_copy; memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); @@ -507,8 +506,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { cp = alloc_printf("%s/afl-qemu-trace", tmp); - if (access(cp, X_OK)) - FATAL("Unable to find '%s'", tmp); + if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp); target_path = new_argv[0] = cp; return new_argv; @@ -532,7 +530,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } - } else ck_free(own_copy); + } else + + ck_free(own_copy); if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { @@ -556,7 +556,7 @@ int main(int argc, char** argv) { doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH; - while ((opt = getopt(argc,argv,"+o:m:t:A:eqZQUbcr")) > 0) + while ((opt = getopt(argc, argv, "+o:m:t:A:eqZQUbcr")) > 0) switch (opt) { @@ -568,40 +568,41 @@ int main(int argc, char** argv) { case 'm': { - u8 suffix = 'M'; + u8 suffix = 'M'; - if (mem_limit_given) FATAL("Multiple -m options not supported"); - mem_limit_given = 1; + if (mem_limit_given) FATAL("Multiple -m options not supported"); + mem_limit_given = 1; - if (!strcmp(optarg, "none")) { + if (!strcmp(optarg, "none")) { - mem_limit = 0; - break; - - } - - if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -m"); - - switch (suffix) { - - case 'T': mem_limit *= 1024 * 1024; break; - case 'G': mem_limit *= 1024; break; - case 'k': mem_limit /= 1024; break; - case 'M': break; - - default: FATAL("Unsupported suffix or bad syntax for -m"); - - } - - if (mem_limit < 5) FATAL("Dangerously low value of -m"); - - if (sizeof(rlim_t) == 4 && mem_limit > 2000) - FATAL("Value of -m out of range on 32-bit systems"); + mem_limit = 0; + break; } - break; + if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -m"); + + switch (suffix) { + + case 'T': mem_limit *= 1024 * 1024; break; + case 'G': mem_limit *= 1024; break; + case 'k': mem_limit /= 1024; break; + case 'M': break; + + default: FATAL("Unsupported suffix or bad syntax for -m"); + + } + + if (mem_limit < 5) FATAL("Dangerously low value of -m"); + + if (sizeof(rlim_t) == 4 && mem_limit > 2000) + FATAL("Value of -m out of range on 32-bit systems"); + + } + + break; case 't': @@ -609,6 +610,7 @@ int main(int argc, char** argv) { timeout_given = 1; if (strcmp(optarg, "none")) { + exec_tmout = atoi(optarg); if (exec_tmout < 20 || optarg[0] == '-') @@ -636,7 +638,7 @@ int main(int argc, char** argv) { /* This is an undocumented option to write data in the syntax expected by afl-cmin. Nobody else should have any use for this. */ - cmin_mode = 1; + cmin_mode = 1; quiet_mode = 1; break; @@ -675,7 +677,7 @@ int main(int argc, char** argv) { if (keep_cores) FATAL("Multiple -c options not supported"); keep_cores = 1; break; - + case 'r': if (raw_instr_output) FATAL("Multiple -r options not supported"); @@ -683,9 +685,7 @@ int main(int argc, char** argv) { raw_instr_output = 1; break; - default: - - usage(argv[0]); + default: usage(argv[0]); } @@ -699,8 +699,10 @@ int main(int argc, char** argv) { find_binary(argv[optind]); if (!quiet_mode) { + show_banner(); ACTF("Executing '%s'...\n", target_path); + } detect_file_args(argv + optind, at_file); @@ -717,7 +719,8 @@ int main(int argc, char** argv) { if (!quiet_mode) { if (!tcnt) FATAL("No instrumentation detected" cRST); - OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST, tcnt, highest, total, out_file); + OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST, + tcnt, highest, total, out_file); } diff --git a/src/afl-tmin.c b/src/afl-tmin.c index 529720ca..9decdb4d 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -22,7 +22,7 @@ #define AFL_MAIN #ifdef __ANDROID__ - #include "android-ashmem.h" +# include "android-ashmem.h" #endif #include "config.h" @@ -51,72 +51,71 @@ #include #include -s32 forksrv_pid, /* PID of the fork server */ - child_pid; /* PID of the tested program */ +s32 forksrv_pid, /* PID of the fork server */ + child_pid; /* PID of the tested program */ -s32 fsrv_ctl_fd, /* Fork server control pipe (write) */ - fsrv_st_fd; /* Fork server status pipe (read) */ +s32 fsrv_ctl_fd, /* Fork server control pipe (write) */ + fsrv_st_fd; /* Fork server status pipe (read) */ - u8 *trace_bits; /* SHM with instrumentation bitmap */ -static u8 *mask_bitmap; /* Mask for trace bits (-B) */ +u8* trace_bits; /* SHM with instrumentation bitmap */ +static u8* mask_bitmap; /* Mask for trace bits (-B) */ - u8 *in_file, /* Minimizer input test case */ - *output_file, /* Minimizer output file */ - *out_file, /* Targeted program input file */ - *target_path, /* Path to target binary */ - *doc_path; /* Path to docs */ +u8 *in_file, /* Minimizer input test case */ + *output_file, /* Minimizer output file */ + *out_file, /* Targeted program input file */ + *target_path, /* Path to target binary */ + *doc_path; /* Path to docs */ - s32 out_fd; /* Persistent fd for out_file */ +s32 out_fd; /* Persistent fd for out_file */ -static u8* in_data; /* Input data for trimming */ +static u8* in_data; /* Input data for trimming */ -static u32 in_len, /* Input data length */ - orig_cksum, /* Original checksum */ - total_execs, /* Total number of execs */ - missed_hangs, /* Misses due to hangs */ - missed_crashes, /* Misses due to crashes */ - missed_paths; /* Misses due to exec path diffs */ - u32 exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ +static u32 in_len, /* Input data length */ + orig_cksum, /* Original checksum */ + total_execs, /* Total number of execs */ + missed_hangs, /* Misses due to hangs */ + missed_crashes, /* Misses due to crashes */ + missed_paths; /* Misses due to exec path diffs */ +u32 exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */ - u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ +u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */ - s32 dev_null_fd = -1; /* FD to /dev/null */ +s32 dev_null_fd = -1; /* FD to /dev/null */ -static u8 crash_mode, /* Crash-centric mode? */ - exit_crash, /* Treat non-zero exit as crash? */ - edges_only, /* Ignore hit counts? */ - exact_mode, /* Require path match for crashes? */ - use_stdin = 1; /* Use stdin for program input? */ +static u8 crash_mode, /* Crash-centric mode? */ + exit_crash, /* Treat non-zero exit as crash? */ + edges_only, /* Ignore hit counts? */ + exact_mode, /* Require path match for crashes? */ + use_stdin = 1; /* Use stdin for program input? */ -static volatile u8 - stop_soon; /* Ctrl-C pressed? */ +static volatile u8 stop_soon; /* Ctrl-C pressed? */ /* * forkserver section */ /* we only need this to use afl-forkserver */ -FILE *plot_file; -u8 uses_asan; -s32 out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1; +FILE* plot_file; +u8 uses_asan; +s32 out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1; /* we import this as we need this information */ extern u8 child_timed_out; - -/* Classify tuple counts. This is a slow & naive version, but good enough here. */ +/* Classify tuple counts. This is a slow & naive version, but good enough here. + */ static const u8 count_class_lookup[256] = { - [0] = 0, - [1] = 1, - [2] = 2, - [3] = 4, - [4 ... 7] = 8, - [8 ... 15] = 16, - [16 ... 31] = 32, - [32 ... 127] = 64, - [128 ... 255] = 128 + [0] = 0, + [1] = 1, + [2] = 2, + [3] = 4, + [4 ... 7] = 8, + [8 ... 15] = 16, + [16 ... 31] = 32, + [32 ... 127] = 64, + [128 ... 255] = 128 }; @@ -127,22 +126,25 @@ static void classify_counts(u8* mem) { if (edges_only) { while (i--) { + if (*mem) *mem = 1; mem++; + } } else { while (i--) { + *mem = count_class_lookup[*mem]; mem++; + } } } - /* Apply mask to classified bitmap (if set). */ static void apply_mask(u32* mem, u32* mask) { @@ -161,25 +163,26 @@ static void apply_mask(u32* mem, u32* mask) { } - /* See if any bytes are set in the bitmap. */ static inline u8 anything_set(void) { u32* ptr = (u32*)trace_bits; - u32 i = (MAP_SIZE >> 2); + u32 i = (MAP_SIZE >> 2); - while (i--) if (*(ptr++)) return 1; + while (i--) + if (*(ptr++)) return 1; return 0; } - /* Get rid of temp files (atexit handler). */ static void at_exit_handler(void) { - if (out_file) unlink(out_file); /* Ignore errors */ + + if (out_file) unlink(out_file); /* Ignore errors */ + } /* Read initial file. */ @@ -187,17 +190,16 @@ static void at_exit_handler(void) { static void read_initial_file(void) { struct stat st; - s32 fd = open(in_file, O_RDONLY); + s32 fd = open(in_file, O_RDONLY); if (fd < 0) PFATAL("Unable to open '%s'", in_file); - if (fstat(fd, &st) || !st.st_size) - FATAL("Zero-sized input file."); + if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file."); if (st.st_size >= TMIN_MAX_FILE) FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024); - in_len = st.st_size; + in_len = st.st_size; in_data = ck_alloc_nozero(in_len); ck_read(fd, in_data, in_len, in_file); @@ -208,14 +210,13 @@ static void read_initial_file(void) { } - /* Write output file. */ static s32 write_to_file(u8* path, u8* mem, u32 len) { s32 ret; - unlink(path); /* Ignore errors */ + unlink(path); /* Ignore errors */ ret = open(path, O_RDWR | O_CREAT | O_EXCL, 0600); @@ -239,13 +240,15 @@ static void write_to_testcase(void* mem, u32 len) { if (!use_stdin) { - unlink(out_file); /* Ignore errors. */ + unlink(out_file); /* Ignore errors. */ fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); if (fd < 0) PFATAL("Unable to create '%s'", out_file); - } else lseek(fd, 0, SEEK_SET); + } else + + lseek(fd, 0, SEEK_SET); ck_write(fd, mem, len, out_file); @@ -254,12 +257,12 @@ static void write_to_testcase(void* mem, u32 len) { if (ftruncate(fd, len)) PFATAL("ftruncate() failed"); lseek(fd, 0, SEEK_SET); - } else close(fd); + } else + + close(fd); } - - /* Handle timeout signal. */ /* static void handle_timeout(int sig) { @@ -277,11 +280,13 @@ static void handle_timeout(int sig) { } } + */ /* start the app and it's forkserver */ /* static void init_forkserver(char **argv) { + static struct itimerval it; int st_pipe[2], ctl_pipe[2]; int status = 0; @@ -348,7 +353,7 @@ static void init_forkserver(char **argv) { } - // Close the unneeded endpoints. + // Close the unneeded endpoints. close(ctl_pipe[0]); close(st_pipe[1]); @@ -378,8 +383,10 @@ static void init_forkserver(char **argv) { // Otherwise, try to figure out what went wrong. if (rlen == 4) { + ACTF("All right - fork server is up."); return; + } if (waitpid(forksrv_pid, &status, 0) <= 0) @@ -398,6 +405,7 @@ static void init_forkserver(char **argv) { SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status)); } + */ /* Execute target application. Returns 0 if the changes are a dud, or @@ -406,8 +414,8 @@ static void init_forkserver(char **argv) { static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { static struct itimerval it; - static u32 prev_timed_out = 0; - int status = 0; + static u32 prev_timed_out = 0; + int status = 0; u32 cksum; @@ -440,8 +448,10 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { /* Configure timeout, wait for child, cancel timeout. */ if (exec_tmout) { + it.it_value.tv_sec = (exec_tmout / 1000); it.it_value.tv_usec = (exec_tmout % 1000) * 1000; + } setitimer(ITIMER_REAL, &it, NULL); @@ -508,9 +518,9 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { } else - /* Handle non-crashing inputs appropriately. */ + /* Handle non-crashing inputs appropriately. */ - if (crash_mode) { + if (crash_mode) { missed_paths++; return 0; @@ -522,24 +532,23 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) { if (first_run) orig_cksum = cksum; if (orig_cksum == cksum) return 1; - + missed_paths++; return 0; } - /* Find first power of two greater or equal to val. */ static u32 next_p2(u32 val) { u32 ret = 1; - while (val > ret) ret <<= 1; + while (val > ret) + ret <<= 1; return ret; } - /* Actually minimize! */ static void minimize(char** argv) { @@ -557,8 +566,8 @@ static void minimize(char** argv) { * BLOCK NORMALIZATION * ***********************/ - set_len = next_p2(in_len / TMIN_SET_STEPS); - set_pos = 0; + set_len = next_p2(in_len / TMIN_SET_STEPS); + set_pos = 0; if (set_len < TMIN_SET_MIN_SIZE) set_len = TMIN_SET_MIN_SIZE; @@ -575,14 +584,14 @@ static void minimize(char** argv) { memcpy(tmp_buf, in_data, in_len); memset(tmp_buf + set_pos, '0', use_len); - - u8 res; + + u8 res; res = run_target(argv, tmp_buf, in_len, 0); if (res) { memset(in_data + set_pos, '0', use_len); -/* changed_any = 1; value is not used */ + /* changed_any = 1; value is not used */ alpha_del0 += use_len; } @@ -615,11 +624,11 @@ next_pass: next_del_blksize: if (!del_len) del_len = 1; - del_pos = 0; + del_pos = 0; prev_del = 1; - SAYF(cGRA " Block length = %u, remaining size = %u\n" cRST, - del_len, in_len); + SAYF(cGRA " Block length = %u, remaining size = %u\n" cRST, del_len, + in_len); while (del_pos < in_len) { @@ -634,8 +643,8 @@ next_del_blksize: very end of the buffer (tail_len > 0), and the current block is the same as the previous one... skip this step as a no-op. */ - if (!prev_del && tail_len && !memcmp(in_data + del_pos - del_len, - in_data + del_pos, del_len)) { + if (!prev_del && tail_len && + !memcmp(in_data + del_pos - del_len, in_data + del_pos, del_len)) { del_pos += del_len; continue; @@ -656,11 +665,13 @@ next_del_blksize: memcpy(in_data, tmp_buf, del_pos + tail_len); prev_del = 1; - in_len = del_pos + tail_len; + in_len = del_pos + tail_len; changed_any = 1; - } else del_pos += del_len; + } else + + del_pos += del_len; } @@ -674,7 +685,8 @@ next_del_blksize: OKF("Block removal complete, %u bytes deleted.", stage_o_len - in_len); if (!in_len && changed_any) - WARNF(cLRD "Down to zero bytes - check the command line and mem limit!" cRST); + WARNF(cLRD + "Down to zero bytes - check the command line and mem limit!" cRST); if (cur_pass > 1 && !changed_any) goto finalize_all; @@ -682,15 +694,17 @@ next_del_blksize: * ALPHABET MINIMIZATION * *************************/ - alpha_size = 0; - alpha_del1 = 0; + alpha_size = 0; + alpha_del1 = 0; syms_removed = 0; memset(alpha_map, 0, sizeof(alpha_map)); for (i = 0; i < in_len; i++) { + if (!alpha_map[in_data[i]]) alpha_size++; alpha_map[in_data[i]]++; + } ACTF(cBRI "Stage #2: " cRST "Minimizing symbols (%u code point%s)...", @@ -699,14 +713,14 @@ next_del_blksize: for (i = 0; i < 256; i++) { u32 r; - u8 res; + u8 res; if (i == '0' || !alpha_map[i]) continue; memcpy(tmp_buf, in_data, in_len); for (r = 0; r < in_len; r++) - if (tmp_buf[r] == i) tmp_buf[r] = '0'; + if (tmp_buf[r] == i) tmp_buf[r] = '0'; res = run_target(argv, tmp_buf, in_len, 0); @@ -724,8 +738,8 @@ next_del_blksize: alpha_d_total += alpha_del1; OKF("Symbol minimization finished, %u symbol%s (%u byte%s) replaced.", - syms_removed, syms_removed == 1 ? "" : "s", - alpha_del1, alpha_del1 == 1 ? "" : "s"); + syms_removed, syms_removed == 1 ? "" : "s", alpha_del1, + alpha_del1 == 1 ? "" : "s"); /************************** * CHARACTER MINIMIZATION * @@ -752,36 +766,34 @@ next_del_blksize: alpha_del2++; changed_any = 1; - } else tmp_buf[i] = orig; + } else + + tmp_buf[i] = orig; } alpha_d_total += alpha_del2; - OKF("Character minimization done, %u byte%s replaced.", - alpha_del2, alpha_del2 == 1 ? "" : "s"); + OKF("Character minimization done, %u byte%s replaced.", alpha_del2, + alpha_del2 == 1 ? "" : "s"); if (changed_any) goto next_pass; finalize_all: - SAYF("\n" - cGRA " File size reduced by : " cRST "%0.02f%% (to %u byte%s)\n" - cGRA " Characters simplified : " cRST "%0.02f%%\n" - cGRA " Number of execs done : " cRST "%u\n" - cGRA " Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n", + SAYF("\n" cGRA " File size reduced by : " cRST + "%0.02f%% (to %u byte%s)\n" cGRA " Characters simplified : " cRST + "%0.02f%%\n" cGRA " Number of execs done : " cRST "%u\n" cGRA + " Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n", 100 - ((double)in_len) * 100 / orig_len, in_len, in_len == 1 ? "" : "s", - ((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1), - total_execs, missed_paths, missed_crashes, missed_hangs ? cLRD : "", - missed_hangs); + ((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1), total_execs, + missed_paths, missed_crashes, missed_hangs ? cLRD : "", missed_hangs); if (total_execs > 50 && missed_hangs * 10 > total_execs) WARNF(cLRD "Frequent timeouts - results may be skewed." cRST); } - - /* Handle Ctrl-C and the like. */ static void handle_stop_sig(int sig) { @@ -792,7 +804,6 @@ static void handle_stop_sig(int sig) { } - /* Do basic preparations - persistent fds, filenames, etc. */ static void set_up_environment(void) { @@ -823,7 +834,6 @@ static void set_up_environment(void) { if (out_fd < 0) PFATAL("Unable to create '%s'", out_file); - /* Set sane defaults... */ x = getenv("ASAN_OPTIONS"); @@ -843,18 +853,20 @@ static void set_up_environment(void) { if (x) { if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR))) - FATAL("Custom MSAN_OPTIONS set without exit_code=" - STRINGIFY(MSAN_ERROR) " - please fix!"); + FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY( + MSAN_ERROR) " - please fix!"); if (!strstr(x, "symbolize=0")) FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!"); } - setenv("ASAN_OPTIONS", "abort_on_error=1:" - "detect_leaks=0:" - "symbolize=0:" - "allocator_may_return_null=1", 0); + setenv("ASAN_OPTIONS", + "abort_on_error=1:" + "detect_leaks=0:" + "symbolize=0:" + "allocator_may_return_null=1", + 0); setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" "symbolize=0:" @@ -863,21 +875,22 @@ static void set_up_environment(void) { "msan_track_origins=0", 0); if (getenv("AFL_PRELOAD")) { + setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1); setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1); + } } - /* Setup signal handlers, duh. */ static void setup_signal_handlers(void) { struct sigaction sa; - sa.sa_handler = NULL; - sa.sa_flags = SA_RESTART; + sa.sa_handler = NULL; + sa.sa_flags = SA_RESTART; sa.sa_sigaction = NULL; sigemptyset(&sa.sa_mask); @@ -896,46 +909,46 @@ static void setup_signal_handlers(void) { } - /* Display usage hints. */ static void usage(u8* argv0) { - SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" + SAYF( + "\n%s [ options ] -- /path/to/target_app [ ... ]\n\n" - "Required parameters:\n\n" + "Required parameters:\n\n" - " -i file - input test case to be shrunk by the tool\n" - " -o file - final output location for the minimized data\n\n" + " -i file - input test case to be shrunk by the tool\n" + " -o file - final output location for the minimized data\n\n" - "Execution control settings:\n\n" + "Execution control settings:\n\n" - " -f file - input file read by the tested program (stdin)\n" - " -t msec - timeout for each run (%d ms)\n" - " -m megs - memory limit for child process (%d MB)\n" - " -Q - use binary-only instrumentation (QEMU mode)\n" - " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" - " (Not necessary, here for consistency with other afl-* tools)\n\n" + " -f file - input file read by the tested program (stdin)\n" + " -t msec - timeout for each run (%d ms)\n" + " -m megs - memory limit for child process (%d MB)\n" + " -Q - use binary-only instrumentation (QEMU mode)\n" + " -U - use Unicorn-based instrumentation (Unicorn mode)\n\n" + " (Not necessary, here for consistency with other afl-* " + "tools)\n\n" - "Minimization settings:\n\n" + "Minimization settings:\n\n" - " -e - solve for edge coverage only, ignore hit counts\n" - " -x - treat non-zero exit codes as crashes\n\n" + " -e - solve for edge coverage only, ignore hit counts\n" + " -x - treat non-zero exit codes as crashes\n\n" - "For additional tips, please consult %s/README.\n\n", + "For additional tips, please consult %s/README.\n\n", - argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); + argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); exit(1); } - /* Find binary. */ static void find_binary(u8* fname) { - u8* env_path = 0; + u8* env_path = 0; struct stat st; if (strchr(fname, '/') || !(env_path = getenv("PATH"))) { @@ -958,7 +971,9 @@ static void find_binary(u8* fname) { memcpy(cur_elem, env_path, delim - env_path); delim++; - } else cur_elem = ck_strdup(env_path); + } else + + cur_elem = ck_strdup(env_path); env_path = delim; @@ -970,7 +985,8 @@ static void find_binary(u8* fname) { ck_free(cur_elem); if (!stat(target_path, &st) && S_ISREG(st.st_mode) && - (st.st_mode & 0111) && st.st_size >= 4) break; + (st.st_mode & 0111) && st.st_size >= 4) + break; ck_free(target_path); target_path = 0; @@ -983,13 +999,12 @@ static void find_binary(u8* fname) { } - /* Fix up argv for QEMU. */ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); - u8 *tmp, *cp, *rsl, *own_copy; + u8 * tmp, *cp, *rsl, *own_copy; memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); @@ -1004,8 +1019,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { cp = alloc_printf("%s/afl-qemu-trace", tmp); - if (access(cp, X_OK)) - FATAL("Unable to find '%s'", tmp); + if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp); target_path = new_argv[0] = cp; return new_argv; @@ -1029,7 +1043,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { } - } else ck_free(own_copy); + } else + + ck_free(own_copy); if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { @@ -1056,8 +1072,6 @@ static void read_bitmap(u8* fname) { } - - /* Main entry point */ int main(int argc, char** argv) { @@ -1070,7 +1084,7 @@ int main(int argc, char** argv) { SAYF(cCYA "afl-tmin" VERSION cRST " by \n"); - while ((opt = getopt(argc,argv,"+i:o:f:m:t:B:xeQU")) > 0) + while ((opt = getopt(argc, argv, "+i:o:f:m:t:B:xeQU")) > 0) switch (opt) { @@ -1090,7 +1104,7 @@ int main(int argc, char** argv) { if (out_file) FATAL("Multiple -f options not supported"); use_stdin = 0; - out_file = optarg; + out_file = optarg; break; case 'e': @@ -1107,40 +1121,41 @@ int main(int argc, char** argv) { case 'm': { - u8 suffix = 'M'; + u8 suffix = 'M'; - if (mem_limit_given) FATAL("Multiple -m options not supported"); - mem_limit_given = 1; + if (mem_limit_given) FATAL("Multiple -m options not supported"); + mem_limit_given = 1; - if (!strcmp(optarg, "none")) { + if (!strcmp(optarg, "none")) { - mem_limit = 0; - break; - - } - - if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || - optarg[0] == '-') FATAL("Bad syntax used for -m"); - - switch (suffix) { - - case 'T': mem_limit *= 1024 * 1024; break; - case 'G': mem_limit *= 1024; break; - case 'k': mem_limit /= 1024; break; - case 'M': break; - - default: FATAL("Unsupported suffix or bad syntax for -m"); - - } - - if (mem_limit < 5) FATAL("Dangerously low value of -m"); - - if (sizeof(rlim_t) == 4 && mem_limit > 2000) - FATAL("Value of -m out of range on 32-bit systems"); + mem_limit = 0; + break; } - break; + if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || + optarg[0] == '-') + FATAL("Bad syntax used for -m"); + + switch (suffix) { + + case 'T': mem_limit *= 1024 * 1024; break; + case 'G': mem_limit *= 1024; break; + case 'k': mem_limit /= 1024; break; + case 'M': break; + + default: FATAL("Unsupported suffix or bad syntax for -m"); + + } + + if (mem_limit < 5) FATAL("Dangerously low value of -m"); + + if (sizeof(rlim_t) == 4 && mem_limit > 2000) + FATAL("Value of -m out of range on 32-bit systems"); + + } + + break; case 't': @@ -1170,7 +1185,7 @@ int main(int argc, char** argv) { unicorn_mode = 1; break; - case 'B': /* load bitmap */ + case 'B': /* load bitmap */ /* This is a secret undocumented option! It is speculated to be useful if you have a baseline "boring" input file and another "interesting" @@ -1190,9 +1205,7 @@ int main(int argc, char** argv) { read_bitmap(optarg); break; - default: - - usage(argv[0]); + default: usage(argv[0]); } @@ -1230,15 +1243,16 @@ int main(int argc, char** argv) { if (!crash_mode) { - OKF("Program terminates normally, minimizing in " - cCYA "instrumented" cRST " mode."); + OKF("Program terminates normally, minimizing in " cCYA "instrumented" cRST + " mode."); - if (!anything_set()) FATAL("No instrumentation detected."); + if (!anything_set()) FATAL("No instrumentation detected."); } else { - OKF("Program exits with a signal, minimizing in " cMGN "%scrash" cRST - " mode.", exact_mode ? "EXACT " : ""); + OKF("Program exits with a signal, minimizing in " cMGN "%scrash" cRST + " mode.", + exact_mode ? "EXACT " : ""); } diff --git a/test-instr.c b/test-instr.c index 9107f15e..71838462 100644 --- a/test-instr.c +++ b/test-instr.c @@ -20,14 +20,16 @@ int main(int argc, char** argv) { - char buff[8]; - char *buf = buff; + char buff[8]; + char* buf = buff; if (argc > 1) buf = argv[1]; else if (read(0, buf, sizeof(buf)) < 1) { + printf("Hum?\n"); exit(1); + } if (buf[0] == '0') @@ -40,3 +42,4 @@ int main(int argc, char** argv) { exit(0); } + diff --git a/unicorn_mode/patches/afl-unicorn-common.h b/unicorn_mode/patches/afl-unicorn-common.h index 6798832c..d5038d06 100644 --- a/unicorn_mode/patches/afl-unicorn-common.h +++ b/unicorn_mode/patches/afl-unicorn-common.h @@ -32,19 +32,17 @@ #include "../../config.h" -/* NeverZero */ +/* NeverZero */ #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) -# define INC_AFL_AREA(loc) \ - asm volatile ( \ - "incb (%0, %1, 1)\n" \ - "adcb $0, (%0, %1, 1)\n" \ - : /* no out */ \ - : "r" (afl_area_ptr), "r" (loc) \ - : "memory", "eax" \ - ) +# define INC_AFL_AREA(loc) \ + asm volatile( \ + "incb (%0, %1, 1)\n" \ + "adcb $0, (%0, %1, 1)\n" \ + : /* no out */ \ + : "r"(afl_area_ptr), "r"(loc) \ + : "memory", "eax") #else -# define INC_AFL_AREA(loc) \ - afl_area_ptr[loc]++ +# define INC_AFL_AREA(loc) afl_area_ptr[loc]++ #endif diff --git a/unicorn_mode/patches/afl-unicorn-cpu-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-inl.h index a713e4ca..082d6d68 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-inl.h @@ -44,21 +44,29 @@ it to translate within its own context, too (this avoids translation overhead in the next forked-off copy). */ -#define AFL_UNICORN_CPU_SNIPPET1 do { \ +#define AFL_UNICORN_CPU_SNIPPET1 \ + do { \ + \ afl_request_tsl(pc, cs_base, flags); \ + \ } while (0) /* This snippet kicks in when the instruction pointer is positioned at _start and does the usual forkserver stuff, not very different from regular instrumentation injected via afl-as.h. */ -#define AFL_UNICORN_CPU_SNIPPET2 do { \ - if(unlikely(afl_first_instr == 0)) { \ - afl_setup(env->uc); \ - afl_forkserver(env); \ - afl_first_instr = 1; \ - } \ - afl_maybe_log(env->uc, tb->pc); \ +#define AFL_UNICORN_CPU_SNIPPET2 \ + do { \ + \ + if (unlikely(afl_first_instr == 0)) { \ + \ + afl_setup(env->uc); \ + afl_forkserver(env); \ + afl_first_instr = 1; \ + \ + } \ + afl_maybe_log(env->uc, tb->pc); \ + \ } while (0) /* We use one additional file descriptor to relay "needs translation" @@ -69,26 +77,28 @@ /* Set in the child process in forkserver mode: */ static unsigned char afl_fork_child; -static unsigned int afl_forksrv_pid; +static unsigned int afl_forksrv_pid; /* Function declarations. */ -static void afl_setup(struct uc_struct* uc); -static void afl_forkserver(CPUArchState*); +static void afl_setup(struct uc_struct* uc); +static void afl_forkserver(CPUArchState*); static inline void afl_maybe_log(struct uc_struct* uc, unsigned long); static void afl_wait_tsl(CPUArchState*, int); static void afl_request_tsl(target_ulong, target_ulong, uint64_t); -static TranslationBlock *tb_find_slow(CPUArchState*, target_ulong, - target_ulong, uint64_t); +static TranslationBlock* tb_find_slow(CPUArchState*, target_ulong, target_ulong, + uint64_t); /* Data structure passed around by the translate handlers: */ struct afl_tsl { + target_ulong pc; target_ulong cs_base; - uint64_t flags; + uint64_t flags; + }; /************************* @@ -99,8 +109,7 @@ struct afl_tsl { static void afl_setup(struct uc_struct* uc) { - char *id_str = getenv(SHM_ENV_VAR), - *inst_r = getenv("AFL_INST_RATIO"); + char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO"); int shm_id; @@ -116,9 +125,9 @@ static void afl_setup(struct uc_struct* uc) { uc->afl_inst_rms = MAP_SIZE * r / 100; } else { - + uc->afl_inst_rms = MAP_SIZE; - + } if (id_str) { @@ -132,22 +141,22 @@ static void afl_setup(struct uc_struct* uc) { so that the parent doesn't give up on us. */ if (inst_r) uc->afl_area_ptr[0] = 1; - } - - /* Maintain for compatibility */ - if (getenv("AFL_QEMU_COMPCOV")) { - uc->afl_compcov_level = 1; } + + /* Maintain for compatibility */ + if (getenv("AFL_QEMU_COMPCOV")) { uc->afl_compcov_level = 1; } if (getenv("AFL_COMPCOV_LEVEL")) { uc->afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); + } + } /* Fork server logic, invoked once we hit first emulated instruction. */ -static void afl_forkserver(CPUArchState *env) { +static void afl_forkserver(CPUArchState* env) { static unsigned char tmp[4]; @@ -165,13 +174,13 @@ static void afl_forkserver(CPUArchState *env) { while (1) { pid_t child_pid; - int status, t_fd[2]; + int status, t_fd[2]; /* Whoops, parent dead? */ if (read(FORKSRV_FD, tmp, 4) != 4) exit(2); - /* Establish a channel with child to grab translation commands. We'll + /* Establish a channel with child to grab translation commands. We'll read from t_fd[0], child will write to TSL_FD. */ if (pipe(t_fd) || dup2(t_fd[1], TSL_FD) < 0) exit(3); @@ -211,7 +220,6 @@ static void afl_forkserver(CPUArchState *env) { } - /* The equivalent of the tuple logging routine from afl-as.h. */ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { @@ -220,14 +228,13 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { u8* afl_area_ptr = uc->afl_area_ptr; - if(!afl_area_ptr) - return; + if (!afl_area_ptr) return; /* Looks like QEMU always maps to fixed locations, so ASAN is not a concern. Phew. But instruction addresses may be aligned. Let's mangle the value to get something quasi-uniform. */ - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 1; /* Implement probabilistic instrumentation by looking at scrambled block @@ -243,7 +250,6 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) { } - /* This code is invoked whenever QEMU decides that it doesn't have a translation of a particular block and needs to compute it. When this happens, we tell the parent to mirror the operation, so that the next fork() has a @@ -255,20 +261,19 @@ static void afl_request_tsl(target_ulong pc, target_ulong cb, uint64_t flags) { if (!afl_fork_child) return; - t.pc = pc; + t.pc = pc; t.cs_base = cb; - t.flags = flags; + t.flags = flags; if (write(TSL_FD, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) return; } - /* This is the other side of the same channel. Since timeouts are handled by afl-fuzz simply killing the child, we can just wait until the pipe breaks. */ -static void afl_wait_tsl(CPUArchState *env, int fd) { +static void afl_wait_tsl(CPUArchState* env, int fd) { struct afl_tsl t; @@ -276,12 +281,13 @@ static void afl_wait_tsl(CPUArchState *env, int fd) { /* Broken pipe means it's time to return to the fork server routine. */ - if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) - break; + if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break; tb_find_slow(env, t.pc, t.cs_base, t.flags); + } close(fd); + } diff --git a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h index 69877c6b..7c84058f 100644 --- a/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h +++ b/unicorn_mode/patches/afl-unicorn-cpu-translate-inl.h @@ -35,28 +35,23 @@ static void afl_gen_compcov(TCGContext *s, uint64_t cur_loc, TCGv_i64 arg1, TCGv_i64 arg2, TCGMemOp ot, int is_imm) { - if (!s->uc->afl_compcov_level || !s->uc->afl_area_ptr) - return; - - if (!is_imm && s->uc->afl_compcov_level < 2) - return; + if (!s->uc->afl_compcov_level || !s->uc->afl_area_ptr) return; - cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + if (!is_imm && s->uc->afl_compcov_level < 2) return; + + cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); cur_loc &= MAP_SIZE - 7; - + if (cur_loc >= s->uc->afl_inst_rms) return; switch (ot) { - case MO_64: - gen_afl_compcov_log_64(s, cur_loc, arg1, arg2); - break; - case MO_32: - gen_afl_compcov_log_32(s, cur_loc, arg1, arg2); - break; - case MO_16: - gen_afl_compcov_log_16(s, cur_loc, arg1, arg2); - break; - default: - return; + + case MO_64: gen_afl_compcov_log_64(s, cur_loc, arg1, arg2); break; + case MO_32: gen_afl_compcov_log_32(s, cur_loc, arg1, arg2); break; + case MO_16: gen_afl_compcov_log_16(s, cur_loc, arg1, arg2); break; + default: return; + } + } + diff --git a/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h index fa4974d6..d21bbcc7 100644 --- a/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h +++ b/unicorn_mode/patches/afl-unicorn-tcg-op-inl.h @@ -31,26 +31,29 @@ */ static inline void gen_afl_compcov_log_16(TCGContext *tcg_ctx, uint64_t cur_loc, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); - TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); - gen_helper_afl_compcov_log_16(tcg_ctx, tuc, tcur_loc, arg1, arg2); + TCGv_i64 arg1, TCGv_i64 arg2) { + + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); + TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); + gen_helper_afl_compcov_log_16(tcg_ctx, tuc, tcur_loc, arg1, arg2); + } static inline void gen_afl_compcov_log_32(TCGContext *tcg_ctx, uint64_t cur_loc, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); - TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); - gen_helper_afl_compcov_log_32(tcg_ctx, tuc, tcur_loc, arg1, arg2); + TCGv_i64 arg1, TCGv_i64 arg2) { + + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); + TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); + gen_helper_afl_compcov_log_32(tcg_ctx, tuc, tcur_loc, arg1, arg2); + } static inline void gen_afl_compcov_log_64(TCGContext *tcg_ctx, uint64_t cur_loc, - TCGv_i64 arg1, TCGv_i64 arg2) -{ - TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); - TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); - gen_helper_afl_compcov_log_64(tcg_ctx, tuc, tcur_loc, arg1, arg2); + TCGv_i64 arg1, TCGv_i64 arg2) { + + TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc); + TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc); + gen_helper_afl_compcov_log_64(tcg_ctx, tuc, tcur_loc, arg1, arg2); + } diff --git a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h index 1f0667ce..95e68302 100644 --- a/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h +++ b/unicorn_mode/patches/afl-unicorn-tcg-runtime-inl.h @@ -38,9 +38,8 @@ void HELPER(afl_compcov_log_16)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr; - if ((arg1 & 0xff) == (arg2 & 0xff)) { - INC_AFL_AREA(cur_loc); - } + if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); } + } void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, @@ -49,14 +48,17 @@ void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - INC_AFL_AREA(cur_loc +1); - if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - INC_AFL_AREA(cur_loc +2); - } + + INC_AFL_AREA(cur_loc + 1); + if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { INC_AFL_AREA(cur_loc + 2); } + } + } + } void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, @@ -65,25 +67,40 @@ void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1, u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr; if ((arg1 & 0xff) == (arg2 & 0xff)) { + INC_AFL_AREA(cur_loc); if ((arg1 & 0xffff) == (arg2 & 0xffff)) { - INC_AFL_AREA(cur_loc +1); + + INC_AFL_AREA(cur_loc + 1); if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { - INC_AFL_AREA(cur_loc +2); + + INC_AFL_AREA(cur_loc + 2); if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) { - INC_AFL_AREA(cur_loc +3); + + INC_AFL_AREA(cur_loc + 3); if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) { - INC_AFL_AREA(cur_loc +4); + + INC_AFL_AREA(cur_loc + 4); if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) { - INC_AFL_AREA(cur_loc +5); + + INC_AFL_AREA(cur_loc + 5); if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) { - INC_AFL_AREA(cur_loc +6); + + INC_AFL_AREA(cur_loc + 6); + } + } + } + } + } + } + } + } From d47ef88fcd842bd13923b1b519544fa2c8d6d0eb Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Mon, 2 Sep 2019 18:53:43 +0200 Subject: [PATCH 69/83] minor fixes --- include/afl-fuzz.h | 14 +++++++------- include/alloc-inl.h | 6 ++++++ llvm_mode/afl-clang-fast.c | 4 ++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 3e121851..b5c5afaf 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -237,8 +237,8 @@ extern u8 *in_dir, /* Input directory with test cases */ *use_banner, /* Display banner */ *in_bitmap, /* Input bitmap */ *file_extension, /* File extension */ - *orig_cmdline; /* Original command line */ -extern u8 *doc_path, /* Path to documentation dir */ + *orig_cmdline, /* Original command line */ + *doc_path, /* Path to documentation dir */ *target_path, /* Path to target binary */ *out_file; /* File to fuzz, if any */ @@ -532,11 +532,11 @@ u8 common_fuzz_stuff(char**, u8*, u32); /* Fuzz one */ -u8 fuzz_one_original(char**); -static u8 pilot_fuzzing(char**); -u8 core_fuzzing(char**); -void pso_updating(void); -u8 fuzz_one(char**); +u8 fuzz_one_original(char**); +u8 pilot_fuzzing(char**); +u8 core_fuzzing(char**); +void pso_updating(void); +u8 fuzz_one(char**); /* Init */ diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 4a4beff1..302d15b6 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -104,20 +104,26 @@ /* #define CHECK_PTR(_p) do { \ \ + \ if (_p) { \ \ + \ if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\ \ + \ if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \ ABORT("Use after free."); \ else ABORT("Corrupted head alloc canary."); \ \ } \ + \ if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \ ABORT("Corrupted tail alloc canary."); \ \ } \ \ + \ + \ } while (0) */ diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c index 666fd043..ed320716 100644 --- a/llvm_mode/afl-clang-fast.c +++ b/llvm_mode/afl-clang-fast.c @@ -173,8 +173,8 @@ static void edit_params(u32 argc, char** argv) { "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default // cc_params[cc_par_cnt++] = "-mllvm"; // cc_params[cc_par_cnt++] = - // "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; cc_params[cc_par_cnt++] - // = "-sanitizer-coverage-block-threshold=0"; + // "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; + // cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0"; #else cc_params[cc_par_cnt++] = "-Xclang"; cc_params[cc_par_cnt++] = "-load"; From 3bfd88aabbf3fdf70cb053aa25944f32d2113d8f Mon Sep 17 00:00:00 2001 From: hexcoder- Date: Tue, 3 Sep 2019 04:28:24 +0200 Subject: [PATCH 70/83] better support for OpenBSD thanks to CaBeckmann (issue #9). On OpenBSD there is a restricted system LLVM, but a full LLVM package can be installed (typically in /usr/local/bin). Added a check if the full package is installed. If so, use it, otherwise bail out early with a hint to install it. --- llvm_mode/Makefile | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index 160a8fe6..7f0c8c5d 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -25,13 +25,22 @@ BIN_PATH = $(PREFIX)/bin VERSION = $(shell grep '^\#define VERSION ' ../config.h | cut -d '"' -f2) -LLVM_CONFIG ?= llvm-config +ifeq "$(shell uname)" "OpenBSD" + LLVM_CONFIG ?= $(BIN_PATH)/llvm-config + HAS_OPT = $(shell test -x $(BIN_PATH)/opt && echo 0 || echo 1) + ifeq "$(HAS_OPT)" "1" + $(error llvm_mode needs a complete llvm installation (versions 3.8.0 up to 9) -> e.g. "pkg_add llvm-7.0.1p9") + endif +else + LLVM_CONFIG ?= llvm-config +endif + LLVMVER = $(shell $(LLVM_CONFIG) --version) LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version | egrep -q '^[12]|^3\.0|^1[0-9]' && echo 1 || echo 0 ) LLVM_MAJOR = ($shell $(LLVM_CONFIG) --version | sed 's/\..*//') ifeq "$(LLVM_UNSUPPORTED)" "1" - $(warn llvm_mode only supports versions 3.8.0 up to 9 ) + $(error llvm_mode only supports versions 3.8.0 up to 9) endif # this is not visible yet: @@ -61,7 +70,7 @@ ifeq "$(shell uname)" "Darwin" endif ifeq "$(shell uname)" "OpenBSD" - CLANG_LFL += `$(LLVM_CONFIG) --libdir`/libLLVM.so.0.0 + CLANG_LFL += `$(LLVM_CONFIG) --libdir`/libLLVM.so endif # We were using llvm-config --bindir to get the location of clang, but @@ -69,8 +78,13 @@ endif # probably better. ifeq "$(origin CC)" "default" - CC = clang - CXX = clang++ + ifeq "$(shell uname)" "OpenBSD" + CC = $(BIN_PATH)/clang + CXX = $(BIN_PATH)/clang++ + else + CC = clang + CXX = clang++ + endif endif # sanity check. From f094908f549151e604c92286e791816fae7d67fa Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 3 Sep 2019 11:19:27 +0200 Subject: [PATCH 71/83] contributing file --- CONTRIBUTING.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..d6211ffd --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# How to submit a Pull Request to AFLplusplus + +Each modified source file, before merging, must be formatted. + +``` +make code-formatter +``` + +This should be fine if you modified one of the file already present in the +project, otherwise run: + +``` +./.custom-format.py -i file-that-you-have-created.c +``` From e969afc627ee625472b6e5d8b96c06c81d722aa4 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 3 Sep 2019 11:24:45 +0200 Subject: [PATCH 72/83] update todo --- TODO | 16 ++++++---------- docs/ChangeLog | 2 +- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/TODO b/TODO index c2cf10a5..06e9a634 100644 --- a/TODO +++ b/TODO @@ -1,13 +1,7 @@ Roadmap 2.53d: ============== -all: - - indent all the code: .clang-format? - (vh: tried, the variable definion look very ugly then, what to do?) - afl-fuzz: - - put mutator, scheduler, forkserver and input channels in individual files - - reuse forkserver for showmap, afl-cmin, etc. - custom mutator lib: example and readme - env var to exclusively run the custom lib/py mutator @@ -19,13 +13,10 @@ gcc_plugin: - neverZero qemu_mode: - - update to 4.x - - deferred mode with AFL_DEFERRED_QEMU=0xaddress - (vh: @andrea - dont we have that already with AFL_ENTRYPOINT?) + - update to 4.x (probably this will be skipped :( ) unit testing / or large testcase campaign - Roadmap 2.54d: ============== Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges. @@ -59,3 +50,8 @@ Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges. qemu_mode: - persistent mode patching the return address (WinAFL style) + - deferred mode with AFL_DEFERRED_QEMU=0xaddress + (AFL_ENTRYPOINT let you to specify only a basic block address as starting + point. This will be implemented togheter with the logic for persistent + mode.) + diff --git a/docs/ChangeLog b/docs/ChangeLog index 1cd95650..2fc4efbc 100644 --- a/docs/ChangeLog +++ b/docs/ChangeLog @@ -39,7 +39,7 @@ Version ++2.53d (dev): - NeverZero counters for QEMU - NeverZero counters for Unicorn - CompareCoverage Unicorn - - Immediates-only instrumentation for CompareCoverage + - immediates-only instrumentation for CompareCoverage -------------------------- From 45f00e45be3eeb1109aaa7b7756d15b795f2acde Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 3 Sep 2019 11:38:44 +0200 Subject: [PATCH 73/83] error to warn change --- llvm_mode/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm_mode/Makefile b/llvm_mode/Makefile index 88e9d579..6332b01a 100644 --- a/llvm_mode/Makefile +++ b/llvm_mode/Makefile @@ -40,7 +40,7 @@ LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version | egrep -q '^[12]|^3\.0|^1[0 LLVM_MAJOR = ($shell $(LLVM_CONFIG) --version | sed 's/\..*//') ifeq "$(LLVM_UNSUPPORTED)" "1" - $(error llvm_mode only supports versions 3.8.0 up to 9) + $(warn llvm_mode only supports versions 3.8.0 up to 9) endif # this is not visible yet: From 50530c144efafafbcf5fa40376742a806f755b63 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 3 Sep 2019 11:42:22 +0200 Subject: [PATCH 74/83] updated TODO --- TODO | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/TODO b/TODO index 06e9a634..19c3b2eb 100644 --- a/TODO +++ b/TODO @@ -5,6 +5,10 @@ afl-fuzz: - custom mutator lib: example and readme - env var to exclusively run the custom lib/py mutator + +Roadmap 2.54d: +============== + gcc_plugin: - needs to be rewritten - whitelist support @@ -17,8 +21,6 @@ qemu_mode: unit testing / or large testcase campaign -Roadmap 2.54d: -============== Problem: Average targets (tiff, jpeg, unrar) go through 1500 edges. At afl's default map that means ~16 collisions and ~3 wrappings. Solution #1: increase map size. From 0d7ecd4327aa4c67a0ddf1807847f9106db07be1 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Tue, 3 Sep 2019 12:03:12 +0200 Subject: [PATCH 75/83] updated TODO --- TODO | 1 - 1 file changed, 1 deletion(-) diff --git a/TODO b/TODO index 19c3b2eb..df32db84 100644 --- a/TODO +++ b/TODO @@ -3,7 +3,6 @@ Roadmap 2.53d: afl-fuzz: - custom mutator lib: example and readme - - env var to exclusively run the custom lib/py mutator Roadmap 2.54d: From 7151651ea9a87f394cf7f44fcba56cd0d2b7eefa Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Tue, 3 Sep 2019 20:43:11 +0200 Subject: [PATCH 76/83] remove macro indentation from code-format --- .custom-format.py | 19 +-- include/afl-as.h | 12 +- include/afl-fuzz.h | 16 +- include/alloc-inl.h | 52 +++--- include/android-ashmem.h | 8 +- include/config.h | 4 +- include/debug.h | 196 +++++++++++----------- include/forkserver.h | 8 +- include/hash.h | 4 +- include/types.h | 16 +- libdislocator/libdislocator.so.c | 4 +- libtokencap/libtokencap.so.c | 2 +- llvm_mode/MarkNodes.h | 8 +- llvm_mode/afl-llvm-rt.o.c | 6 +- qemu_mode/libcompcov/libcompcov.so.c | 2 +- qemu_mode/libcompcov/pmparser.h | 22 +-- qemu_mode/patches/afl-qemu-common.h | 4 +- src/afl-analyze.c | 2 +- src/afl-as.c | 6 +- src/afl-common.c | 2 +- src/afl-fuzz-init.c | 16 +- src/afl-fuzz-one.c | 6 +- src/afl-fuzz.c | 18 +- src/afl-gotcpu.c | 6 +- src/afl-sharedmem.c | 6 +- src/afl-showmap.c | 2 +- src/afl-tmin.c | 2 +- unicorn_mode/patches/afl-unicorn-common.h | 4 +- 28 files changed, 221 insertions(+), 232 deletions(-) diff --git a/.custom-format.py b/.custom-format.py index 32b8f7c9..81adbd85 100755 --- a/.custom-format.py +++ b/.custom-format.py @@ -45,31 +45,14 @@ def custom_format(filename): src, _ = p.communicate() src = str(src, "utf-8") - macro_indent = 0 in_define = False last_line = None out = "" for line in src.split("\n"): if line.startswith("#"): - i = macro_indent - if line.startswith("#end") and macro_indent > 0: - macro_indent -= 1 - i -= 1 - elif line.startswith("#el") and macro_indent > 0: - i -= 1 - elif line.startswith("#if") and not (line.startswith("#ifndef") and (line.endswith("_H") or line.endswith("H_"))): - macro_indent += 1 - elif line.startswith("#define"): + if line.startswith("#define"): in_define = True - r = "#" + (i * " ") + line[1:] - if i != 0 and line.endswith("\\"): - r = r[:-1] - while r[-1].isspace() and len(r) != (len(line)-1): - r = r[:-1] - r += "\\" - if len(r) <= COLUMN_LIMIT: - line = r elif "/*" in line and not line.strip().startswith("/*") and line.endswith("*/") and len(line) < (COLUMN_LIMIT-2): cmt_start = line.rfind("/*") diff --git a/include/afl-as.h b/include/afl-as.h index 4f8fb640..55d8694d 100644 --- a/include/afl-as.h +++ b/include/afl-as.h @@ -398,9 +398,9 @@ static const u8* main_payload_32 = recognize .string. */ #ifdef __APPLE__ -# define CALL_L64(str) "call _" str "\n" +#define CALL_L64(str) "call _" str "\n" #else -# define CALL_L64(str) "call " str "@PLT\n" +#define CALL_L64(str) "call " str "@PLT\n" #endif /* ^__APPLE__ */ static const u8* main_payload_64 = @@ -737,9 +737,9 @@ static const u8* main_payload_64 = #ifdef __APPLE__ " .comm __afl_area_ptr, 8\n" -# ifndef COVERAGE_ONLY +#ifndef COVERAGE_ONLY " .comm __afl_prev_loc, 8\n" -# endif /* !COVERAGE_ONLY */ +#endif /* !COVERAGE_ONLY */ " .comm __afl_fork_pid, 4\n" " .comm __afl_temp, 4\n" " .comm __afl_setup_failure, 1\n" @@ -747,9 +747,9 @@ static const u8* main_payload_64 = #else " .lcomm __afl_area_ptr, 8\n" -# ifndef COVERAGE_ONLY +#ifndef COVERAGE_ONLY " .lcomm __afl_prev_loc, 8\n" -# endif /* !COVERAGE_ONLY */ +#endif /* !COVERAGE_ONLY */ " .lcomm __afl_fork_pid, 4\n" " .lcomm __afl_temp, 4\n" " .lcomm __afl_setup_failure, 1\n" diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index b5c5afaf..3206ee72 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -27,12 +27,12 @@ #define MESSAGES_TO_STDOUT #ifndef _GNU_SOURCE -# define _GNU_SOURCE +#define _GNU_SOURCE #endif #define _FILE_OFFSET_BITS 64 #ifdef __ANDROID__ -# include "android-ashmem.h" +#include "android-ashmem.h" #endif #include "config.h" @@ -69,21 +69,21 @@ #include #if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) -# include -# define HAVE_ARC4RANDOM 1 +#include +#define HAVE_ARC4RANDOM 1 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ /* For systems that have sched_setaffinity; right now just Linux, but one can hope... */ #ifdef __linux__ -# define HAVE_AFFINITY 1 +#define HAVE_AFFINITY 1 #endif /* __linux__ */ #ifndef SIMPLE_FILES -# define CASE_PREFIX "id:" +#define CASE_PREFIX "id:" #else -# define CASE_PREFIX "id_" +#define CASE_PREFIX "id_" #endif /* ^!SIMPLE_FILES */ struct queue_entry { @@ -432,7 +432,7 @@ extern s32 /* Python stuff */ #ifdef USE_PYTHON -# include +#include extern PyObject* py_module; diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 302d15b6..6e46ae19 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -105,18 +105,22 @@ #define CHECK_PTR(_p) do { \ \ \ + \ if (_p) { \ \ \ + \ if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\ \ \ + \ if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \ ABORT("Use after free."); \ else ABORT("Corrupted head alloc canary."); \ \ } \ \ + \ if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \ ABORT("Corrupted tail alloc canary."); \ \ @@ -124,7 +128,9 @@ \ \ \ + \ } while (0) + */ #define CHECK_PTR_EXPR(_p) \ @@ -371,16 +377,16 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) { /* In non-debug mode, we just do straightforward aliasing of the above functions to user-visible names such as ck_alloc(). */ -# define ck_alloc DFL_ck_alloc -# define ck_alloc_nozero DFL_ck_alloc_nozero -# define ck_realloc DFL_ck_realloc -# define ck_realloc_block DFL_ck_realloc_block -# define ck_strdup DFL_ck_strdup -# define ck_memdup DFL_ck_memdup -# define ck_memdup_str DFL_ck_memdup_str -# define ck_free DFL_ck_free +#define ck_alloc DFL_ck_alloc +#define ck_alloc_nozero DFL_ck_alloc_nozero +#define ck_realloc DFL_ck_realloc +#define ck_realloc_block DFL_ck_realloc_block +#define ck_strdup DFL_ck_strdup +#define ck_memdup DFL_ck_memdup +#define ck_memdup_str DFL_ck_memdup_str +#define ck_free DFL_ck_free -# define alloc_report() +#define alloc_report() #else @@ -389,7 +395,7 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) { /* Alloc tracking data structures: */ -# define ALLOC_BUCKETS 4096 +#define ALLOC_BUCKETS 4096 struct TRK_obj { @@ -399,25 +405,25 @@ struct TRK_obj { }; -# ifdef AFL_MAIN +#ifdef AFL_MAIN struct TRK_obj* TRK[ALLOC_BUCKETS]; u32 TRK_cnt[ALLOC_BUCKETS]; -# define alloc_report() TRK_report() +#define alloc_report() TRK_report() -# else +#else extern struct TRK_obj* TRK[ALLOC_BUCKETS]; extern u32 TRK_cnt[ALLOC_BUCKETS]; -# define alloc_report() +#define alloc_report() -# endif /* ^AFL_MAIN */ +#endif /* ^AFL_MAIN */ /* Bucket-assigning function for a given pointer: */ -# define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS) +#define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS) /* Add a new entry to the list of allocated objects. */ @@ -569,25 +575,25 @@ static inline void TRK_ck_free(void* ptr, const char* file, const char* func, /* Aliasing user-facing names to tracking functions: */ -# define ck_alloc(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) +#define ck_alloc(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) #define ck_alloc_nozero(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__) -# define ck_realloc(_p1, _p2)\ +#define ck_realloc(_p1, _p2) \ TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -# define ck_realloc_block(_p1, _p2)\ +#define ck_realloc_block(_p1, _p2) \ TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -# define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__) +#define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__) -# define ck_memdup(_p1, _p2)\ +#define ck_memdup(_p1, _p2) \ TRK_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -# define ck_memdup_str(_p1, _p2)\ +#define ck_memdup_str(_p1, _p2) \ TRK_ck_memdup_str(_p1, _p2, __FILE__, __FUNCTION__, __LINE__) -# define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__) +#define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__) #endif /* ^!DEBUG_BUILD */ diff --git a/include/android-ashmem.h b/include/android-ashmem.h index a4b5bf30..02a84d11 100644 --- a/include/android-ashmem.h +++ b/include/android-ashmem.h @@ -8,10 +8,10 @@ #include #if __ANDROID_API__ >= 26 -# define shmat bionic_shmat -# define shmctl bionic_shmctl -# define shmdt bionic_shmdt -# define shmget bionic_shmget +#define shmat bionic_shmat +#define shmctl bionic_shmctl +#define shmdt bionic_shmdt +#define shmget bionic_shmget #endif #include #undef shmat diff --git a/include/config.h b/include/config.h index babba3bd..d85a503d 100644 --- a/include/config.h +++ b/include/config.h @@ -50,9 +50,9 @@ /* Default memory limit for child process (MB): */ #ifndef __x86_64__ -# define MEM_LIMIT 25 +#define MEM_LIMIT 25 #else -# define MEM_LIMIT 50 +#define MEM_LIMIT 50 #endif /* ^!__x86_64__ */ /* Default memory limit when running in QEMU mode (MB): */ diff --git a/include/debug.h b/include/debug.h index 6a59ad7a..5ff2e23e 100644 --- a/include/debug.h +++ b/include/debug.h @@ -28,77 +28,77 @@ #ifdef USE_COLOR -# define cBLK "\x1b[0;30m" -# define cRED "\x1b[0;31m" -# define cGRN "\x1b[0;32m" -# define cBRN "\x1b[0;33m" -# define cBLU "\x1b[0;34m" -# define cMGN "\x1b[0;35m" -# define cCYA "\x1b[0;36m" -# define cLGR "\x1b[0;37m" -# define cGRA "\x1b[1;90m" -# define cLRD "\x1b[1;91m" -# define cLGN "\x1b[1;92m" -# define cYEL "\x1b[1;93m" -# define cLBL "\x1b[1;94m" -# define cPIN "\x1b[1;95m" -# define cLCY "\x1b[1;96m" -# define cBRI "\x1b[1;97m" -# define cRST "\x1b[0m" +#define cBLK "\x1b[0;30m" +#define cRED "\x1b[0;31m" +#define cGRN "\x1b[0;32m" +#define cBRN "\x1b[0;33m" +#define cBLU "\x1b[0;34m" +#define cMGN "\x1b[0;35m" +#define cCYA "\x1b[0;36m" +#define cLGR "\x1b[0;37m" +#define cGRA "\x1b[1;90m" +#define cLRD "\x1b[1;91m" +#define cLGN "\x1b[1;92m" +#define cYEL "\x1b[1;93m" +#define cLBL "\x1b[1;94m" +#define cPIN "\x1b[1;95m" +#define cLCY "\x1b[1;96m" +#define cBRI "\x1b[1;97m" +#define cRST "\x1b[0m" -# define bgBLK "\x1b[40m" -# define bgRED "\x1b[41m" -# define bgGRN "\x1b[42m" -# define bgBRN "\x1b[43m" -# define bgBLU "\x1b[44m" -# define bgMGN "\x1b[45m" -# define bgCYA "\x1b[46m" -# define bgLGR "\x1b[47m" -# define bgGRA "\x1b[100m" -# define bgLRD "\x1b[101m" -# define bgLGN "\x1b[102m" -# define bgYEL "\x1b[103m" -# define bgLBL "\x1b[104m" -# define bgPIN "\x1b[105m" -# define bgLCY "\x1b[106m" -# define bgBRI "\x1b[107m" +#define bgBLK "\x1b[40m" +#define bgRED "\x1b[41m" +#define bgGRN "\x1b[42m" +#define bgBRN "\x1b[43m" +#define bgBLU "\x1b[44m" +#define bgMGN "\x1b[45m" +#define bgCYA "\x1b[46m" +#define bgLGR "\x1b[47m" +#define bgGRA "\x1b[100m" +#define bgLRD "\x1b[101m" +#define bgLGN "\x1b[102m" +#define bgYEL "\x1b[103m" +#define bgLBL "\x1b[104m" +#define bgPIN "\x1b[105m" +#define bgLCY "\x1b[106m" +#define bgBRI "\x1b[107m" #else -# define cBLK "" -# define cRED "" -# define cGRN "" -# define cBRN "" -# define cBLU "" -# define cMGN "" -# define cCYA "" -# define cLGR "" -# define cGRA "" -# define cLRD "" -# define cLGN "" -# define cYEL "" -# define cLBL "" -# define cPIN "" -# define cLCY "" -# define cBRI "" -# define cRST "" +#define cBLK "" +#define cRED "" +#define cGRN "" +#define cBRN "" +#define cBLU "" +#define cMGN "" +#define cCYA "" +#define cLGR "" +#define cGRA "" +#define cLRD "" +#define cLGN "" +#define cYEL "" +#define cLBL "" +#define cPIN "" +#define cLCY "" +#define cBRI "" +#define cRST "" -# define bgBLK "" -# define bgRED "" -# define bgGRN "" -# define bgBRN "" -# define bgBLU "" -# define bgMGN "" -# define bgCYA "" -# define bgLGR "" -# define bgGRA "" -# define bgLRD "" -# define bgLGN "" -# define bgYEL "" -# define bgLBL "" -# define bgPIN "" -# define bgLCY "" -# define bgBRI "" +#define bgBLK "" +#define bgRED "" +#define bgGRN "" +#define bgBRN "" +#define bgBLU "" +#define bgMGN "" +#define bgCYA "" +#define bgLGR "" +#define bgGRA "" +#define bgLRD "" +#define bgLGN "" +#define bgYEL "" +#define bgLBL "" +#define bgPIN "" +#define bgLCY "" +#define bgBRI "" #endif /* ^USE_COLOR */ @@ -108,39 +108,39 @@ #ifdef FANCY_BOXES -# define SET_G1 "\x1b)0" /* Set G1 for box drawing */ -# define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ -# define bSTART "\x0e" /* Enter G1 drawing mode */ -# define bSTOP "\x0f" /* Leave G1 drawing mode */ -# define bH "q" /* Horizontal line */ -# define bV "x" /* Vertical line */ -# define bLT "l" /* Left top corner */ -# define bRT "k" /* Right top corner */ -# define bLB "m" /* Left bottom corner */ -# define bRB "j" /* Right bottom corner */ -# define bX "n" /* Cross */ -# define bVR "t" /* Vertical, branch right */ -# define bVL "u" /* Vertical, branch left */ -# define bHT "v" /* Horizontal, branch top */ -# define bHB "w" /* Horizontal, branch bottom */ +#define SET_G1 "\x1b)0" /* Set G1 for box drawing */ +#define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ +#define bSTART "\x0e" /* Enter G1 drawing mode */ +#define bSTOP "\x0f" /* Leave G1 drawing mode */ +#define bH "q" /* Horizontal line */ +#define bV "x" /* Vertical line */ +#define bLT "l" /* Left top corner */ +#define bRT "k" /* Right top corner */ +#define bLB "m" /* Left bottom corner */ +#define bRB "j" /* Right bottom corner */ +#define bX "n" /* Cross */ +#define bVR "t" /* Vertical, branch right */ +#define bVL "u" /* Vertical, branch left */ +#define bHT "v" /* Horizontal, branch top */ +#define bHB "w" /* Horizontal, branch bottom */ #else -# define SET_G1 "" -# define RESET_G1 "" -# define bSTART "" -# define bSTOP "" -# define bH "-" -# define bV "|" -# define bLT "+" -# define bRT "+" -# define bLB "+" -# define bRB "+" -# define bX "+" -# define bVR "+" -# define bVL "+" -# define bHT "+" -# define bHB "+" +#define SET_G1 "" +#define RESET_G1 "" +#define bSTART "" +#define bSTOP "" +#define bH "-" +#define bV "|" +#define bLT "+" +#define bRT "+" +#define bLB "+" +#define bRB "+" +#define bX "+" +#define bVR "+" +#define bVL "+" +#define bHT "+" +#define bHB "+" #endif /* ^FANCY_BOXES */ @@ -161,9 +161,9 @@ /* Just print stuff to the appropriate stream. */ #ifdef MESSAGES_TO_STDOUT -# define SAYF(x...) printf(x) +#define SAYF(x...) printf(x) #else -# define SAYF(x...) fprintf(stderr, x) +#define SAYF(x...) fprintf(stderr, x) #endif /* ^MESSAGES_TO_STDOUT */ /* Show a prefixed warning. */ diff --git a/include/forkserver.h b/include/forkserver.h index af5dab72..bfc35800 100644 --- a/include/forkserver.h +++ b/include/forkserver.h @@ -5,20 +5,20 @@ void handle_timeout(int sig); void init_forkserver(char **argv); #ifdef __APPLE__ -# define MSG_FORK_ON_APPLE \ +#define MSG_FORK_ON_APPLE \ " - On MacOS X, the semantics of fork() syscalls are non-standard and " \ "may\n" \ " break afl-fuzz performance optimizations when running " \ "platform-specific\n" \ " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.\n\n" #else -# define MSG_FORK_ON_APPLE "" +#define MSG_FORK_ON_APPLE "" #endif #ifdef RLIMIT_AS -# define MSG_ULIMIT_USAGE " ( ulimit -Sv $[%llu << 10];" +#define MSG_ULIMIT_USAGE " ( ulimit -Sv $[%llu << 10];" #else -# define MSG_ULIMIT_USAGE " ( ulimit -Sd $[%llu << 10];" +#define MSG_ULIMIT_USAGE " ( ulimit -Sd $[%llu << 10];" #endif /* ^RLIMIT_AS */ #endif diff --git a/include/hash.h b/include/hash.h index 5d0512a6..f60839ea 100644 --- a/include/hash.h +++ b/include/hash.h @@ -31,7 +31,7 @@ #ifdef __x86_64__ -# define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r)))) +#define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r)))) static inline u32 hash32(const void* key, u32 len, u32 seed) { @@ -66,7 +66,7 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { #else -# define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r)))) +#define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r)))) static inline u32 hash32(const void* key, u32 len, u32 seed) { diff --git a/include/types.h b/include/types.h index 60ae64c2..dba666b6 100644 --- a/include/types.h +++ b/include/types.h @@ -52,8 +52,8 @@ typedef int32_t s32; typedef int64_t s64; #ifndef MIN -# define MIN(_a, _b) ((_a) > (_b) ? (_b) : (_a)) -# define MAX(_a, _b) ((_a) > (_b) ? (_a) : (_b)) +#define MIN(_a, _b) ((_a) > (_b) ? (_b) : (_a)) +#define MAX(_a, _b) ((_a) > (_b) ? (_a) : (_b)) #endif /* !MIN */ #define SWAP16(_x) \ @@ -74,9 +74,9 @@ typedef int64_t s64; }) #ifdef AFL_LLVM_PASS -# define AFL_R(x) (random() % (x)) +#define AFL_R(x) (random() % (x)) #else -# define R(x) (random() % (x)) +#define R(x) (random() % (x)) #endif /* ^AFL_LLVM_PASS */ #define STRINGIFY_INTERNAL(x) #x @@ -85,11 +85,11 @@ typedef int64_t s64; #define MEM_BARRIER() __asm__ volatile("" ::: "memory") #if __GNUC__ < 6 -# define likely(_x) (_x) -# define unlikely(_x) (_x) +#define likely(_x) (_x) +#define unlikely(_x) (_x) #else -# define likely(_x) __builtin_expect(!!(_x), 1) -# define unlikely(_x) __builtin_expect(!!(_x), 0) +#define likely(_x) __builtin_expect(!!(_x), 1) +#define unlikely(_x) __builtin_expect(!!(_x), 0) #endif #endif /* ! _HAVE_TYPES_H */ diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c index 5104fed4..256506be 100644 --- a/libdislocator/libdislocator.so.c +++ b/libdislocator/libdislocator.so.c @@ -29,11 +29,11 @@ #include "types.h" #ifndef PAGE_SIZE -# define PAGE_SIZE 4096 +#define PAGE_SIZE 4096 #endif /* !PAGE_SIZE */ #ifndef MAP_ANONYMOUS -# define MAP_ANONYMOUS MAP_ANON +#define MAP_ANONYMOUS MAP_ANON #endif /* !MAP_ANONYMOUS */ /* Error / message handling: */ diff --git a/libtokencap/libtokencap.so.c b/libtokencap/libtokencap.so.c index fa26447e..51c36c4d 100644 --- a/libtokencap/libtokencap.so.c +++ b/libtokencap/libtokencap.so.c @@ -27,7 +27,7 @@ #include "../config.h" #ifndef __linux__ -# error "Sorry, this library is Linux-specific for now!" +#error "Sorry, this library is Linux-specific for now!" #endif /* !__linux__ */ /* Mapping data and such */ diff --git a/llvm_mode/MarkNodes.h b/llvm_mode/MarkNodes.h index 23316652..8ddc978d 100644 --- a/llvm_mode/MarkNodes.h +++ b/llvm_mode/MarkNodes.h @@ -1,9 +1,9 @@ #ifndef __MARK_NODES__ -# define __MARK_NODES__ +#define __MARK_NODES__ -# include "llvm/IR/BasicBlock.h" -# include "llvm/IR/Function.h" -# include +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Function.h" +#include std::pair, std::vector> markNodes(llvm::Function *F); diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index bc38f1ec..8d435d21 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -20,7 +20,7 @@ */ #ifdef __ANDROID__ -# include "android-ashmem.h" +#include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -42,9 +42,9 @@ the LLVM-generated runtime initialization pass, not before. */ #ifdef USE_TRACE_PC -# define CONST_PRIO 5 +#define CONST_PRIO 5 #else -# define CONST_PRIO 0 +#define CONST_PRIO 0 #endif /* ^USE_TRACE_PC */ #include diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c index e758c034..9d61848e 100644 --- a/qemu_mode/libcompcov/libcompcov.so.c +++ b/qemu_mode/libcompcov/libcompcov.so.c @@ -33,7 +33,7 @@ #include "pmparser.h" #ifndef __linux__ -# error "Sorry, this library is Linux-specific for now!" +#error "Sorry, this library is Linux-specific for now!" #endif /* !__linux__ */ /* Change this value to tune the compare coverage */ diff --git a/qemu_mode/libcompcov/pmparser.h b/qemu_mode/libcompcov/pmparser.h index 91dfd032..e7fcf187 100644 --- a/qemu_mode/libcompcov/pmparser.h +++ b/qemu_mode/libcompcov/pmparser.h @@ -13,19 +13,19 @@ implied warranty. */ #ifndef H_PMPARSER -# define H_PMPARSER -# include -# include -# include -# include -# include -# include -# include -# include -# include +#define H_PMPARSER +#include +#include +#include +#include +#include +#include +#include +#include +#include // maximum line length in a procmaps file -# define PROCMAPS_LINE_MAX_LENGTH (PATH_MAX + 100) +#define PROCMAPS_LINE_MAX_LENGTH (PATH_MAX + 100) /** * procmaps_struct * @desc hold all the information about an area in the process's VM diff --git a/qemu_mode/patches/afl-qemu-common.h b/qemu_mode/patches/afl-qemu-common.h index c87bacb6..88c110b4 100644 --- a/qemu_mode/patches/afl-qemu-common.h +++ b/qemu_mode/patches/afl-qemu-common.h @@ -36,7 +36,7 @@ /* NeverZero */ #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) -# define INC_AFL_AREA(loc) \ +#define INC_AFL_AREA(loc) \ asm volatile( \ "incb (%0, %1, 1)\n" \ "adcb $0, (%0, %1, 1)\n" \ @@ -44,6 +44,6 @@ : "r"(afl_area_ptr), "r"(loc) \ : "memory", "eax") #else -# define INC_AFL_AREA(loc) afl_area_ptr[loc]++ +#define INC_AFL_AREA(loc) afl_area_ptr[loc]++ #endif diff --git a/src/afl-analyze.c b/src/afl-analyze.c index e3014256..4cfebe3b 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -22,7 +22,7 @@ #define AFL_MAIN #ifdef __ANDROID__ -# include "android-ashmem.h" +#include "android-ashmem.h" #endif #include "config.h" #include "types.h" diff --git a/src/afl-as.c b/src/afl-as.c index 57f4c4a3..96b00d37 100644 --- a/src/afl-as.c +++ b/src/afl-as.c @@ -74,9 +74,9 @@ static u8 use_64bit = 1; static u8 use_64bit = 0; -# ifdef __APPLE__ -# error "Sorry, 32-bit Apple platforms are not supported." -# endif /* __APPLE__ */ +#ifdef __APPLE__ +#error "Sorry, 32-bit Apple platforms are not supported." +#endif /* __APPLE__ */ #endif /* ^__x86_64__ */ diff --git a/src/afl-common.c b/src/afl-common.c index 9f1f45eb..69c1a77d 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -13,7 +13,7 @@ /* Detect @@ in args. */ #ifndef __glibc__ -# include +#include #endif void detect_file_args(char** argv, u8* prog_in) { diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 8a3ee6fa..8588dde7 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -1491,25 +1491,25 @@ void get_core_count(void) { /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */ -# ifdef __APPLE__ +#ifdef __APPLE__ if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) return; -# else +#else int s_name[2] = {CTL_HW, HW_NCPU}; if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return; -# endif /* ^__APPLE__ */ +#endif /* ^__APPLE__ */ #else -# ifdef HAVE_AFFINITY +#ifdef HAVE_AFFINITY cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN); -# else +#else FILE* f = fopen("/proc/stat", "r"); u8 tmp[1024]; @@ -1521,7 +1521,7 @@ void get_core_count(void) { fclose(f); -# endif /* ^HAVE_AFFINITY */ +#endif /* ^HAVE_AFFINITY */ #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ @@ -1772,10 +1772,10 @@ void check_binary(u8* fname) { #else -# if !defined(__arm__) && !defined(__arm64__) +#if !defined(__arm__) && !defined(__arm64__) if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED) FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path); -# endif +#endif #endif /* ^!__APPLE__ */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 1b7abedd..bc63c226 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2242,11 +2242,11 @@ retry_splicing: out_buf = ck_alloc_nozero(len); memcpy(out_buf, in_buf, len); -# ifdef USE_PYTHON +#ifdef USE_PYTHON goto python_stage; -# else +#else goto havoc_stage; -# endif +#endif } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 685840c6..d04058ca 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -27,10 +27,10 @@ static void usage(u8* argv0) { #ifdef USE_PYTHON -# define PHYTON_SUPPORT\ +#define PHYTON_SUPPORT \ "Compiled with Python 2.7 module support, see docs/python_mutators.txt\n" #else -# define PHYTON_SUPPORT "" +#define PHYTON_SUPPORT "" #endif SAYF( @@ -616,9 +616,9 @@ int main(int argc, char** argv) { get_core_count(); -# ifdef HAVE_AFFINITY +#ifdef HAVE_AFFINITY bind_to_free_cpu(); -# endif /* HAVE_AFFINITY */ +#endif /* HAVE_AFFINITY */ check_crash_handling(); check_cpu_governor(); @@ -635,12 +635,12 @@ int main(int argc, char** argv) { setup_dirs_fds(); -# ifdef USE_PYTHON +#ifdef USE_PYTHON if (init_py()) FATAL("Failed to initialize Python module"); -# else +#else if (getenv("AFL_PYTHON_MODULE")) FATAL("Your AFL binary was built without Python support"); -# endif +#endif setup_cmdline_file(argv + optind); @@ -867,9 +867,9 @@ stop_fuzzing: alloc_report(); -# ifdef USE_PYTHON +#ifdef USE_PYTHON finalize_py(); -# endif +#endif OKF("We're done here. Have a nice day!\n"); diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c index 5aa9b35c..fd96d25d 100644 --- a/src/afl-gotcpu.c +++ b/src/afl-gotcpu.c @@ -27,11 +27,11 @@ #define AFL_MAIN #ifndef _GNU_SOURCE -# define _GNU_SOURCE +#define _GNU_SOURCE #endif #ifdef __ANDROID__ -# include "android-ashmem.h" +#include "android-ashmem.h" #endif #include #include @@ -48,7 +48,7 @@ #include "debug.h" #ifdef __linux__ -# define HAVE_AFFINITY 1 +#define HAVE_AFFINITY 1 #endif /* __linux__ */ /* Get unix time in microseconds. */ diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c index 9c7ac7c3..60764779 100644 --- a/src/afl-sharedmem.c +++ b/src/afl-sharedmem.c @@ -5,7 +5,7 @@ #define AFL_MAIN #ifdef __ANDROID__ -# include "android-ashmem.h" +#include "android-ashmem.h" #endif #include "config.h" #include "types.h" @@ -32,8 +32,8 @@ #include #ifndef USEMMAP -# include -# include +#include +#include #endif extern unsigned char *trace_bits; diff --git a/src/afl-showmap.c b/src/afl-showmap.c index ac3d687d..5c367339 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -24,7 +24,7 @@ #define AFL_MAIN #ifdef __ANDROID__ -# include "android-ashmem.h" +#include "android-ashmem.h" #endif #include "config.h" #include "types.h" diff --git a/src/afl-tmin.c b/src/afl-tmin.c index 9decdb4d..ce4a3d76 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -22,7 +22,7 @@ #define AFL_MAIN #ifdef __ANDROID__ -# include "android-ashmem.h" +#include "android-ashmem.h" #endif #include "config.h" diff --git a/unicorn_mode/patches/afl-unicorn-common.h b/unicorn_mode/patches/afl-unicorn-common.h index d5038d06..fd88e21b 100644 --- a/unicorn_mode/patches/afl-unicorn-common.h +++ b/unicorn_mode/patches/afl-unicorn-common.h @@ -35,7 +35,7 @@ /* NeverZero */ #if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) -# define INC_AFL_AREA(loc) \ +#define INC_AFL_AREA(loc) \ asm volatile( \ "incb (%0, %1, 1)\n" \ "adcb $0, (%0, %1, 1)\n" \ @@ -43,6 +43,6 @@ : "r"(afl_area_ptr), "r"(loc) \ : "memory", "eax") #else -# define INC_AFL_AREA(loc) afl_area_ptr[loc]++ +#define INC_AFL_AREA(loc) afl_area_ptr[loc]++ #endif From 9705ccee677eb3009c6d06d1bff4d2b6cf80c4a7 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 4 Sep 2019 09:43:09 +0200 Subject: [PATCH 77/83] credits and license header for src/* and include/* --- include/afl-as.h | 15 +++++++++------ include/afl-fuzz.h | 15 +++++++++------ include/alloc-inl.h | 13 +++++++++---- include/android-ashmem.h | 24 ++++++++++++++++++++++++ include/common.h | 25 +++++++++++++++++++++++++ include/config.h | 13 +++++++++---- include/debug.h | 13 +++++++++---- include/forkserver.h | 26 ++++++++++++++++++++++++++ include/hash.h | 4 ++-- include/sharedmem.h | 26 ++++++++++++++++++++++++++ include/types.h | 13 +++++++++---- src/afl-analyze.c | 11 ++++++++--- src/afl-as.c | 13 +++++++++---- src/afl-common.c | 23 +++++++++++++++++++++-- src/afl-forkserver.c | 26 ++++++++++++++++++++++++++ src/afl-fuzz-bitmap.c | 15 +++++++++------ src/afl-fuzz-extras.c | 15 +++++++++------ src/afl-fuzz-globals.c | 15 +++++++++------ src/afl-fuzz-init.c | 15 +++++++++------ src/afl-fuzz-misc.c | 15 +++++++++------ src/afl-fuzz-one.c | 15 +++++++++------ src/afl-fuzz-python.c | 15 +++++++++------ src/afl-fuzz-queue.c | 16 +++++++++------- src/afl-fuzz-run.c | 15 +++++++++------ src/afl-fuzz-stats.c | 15 +++++++++------ src/afl-fuzz.c | 11 +++++++---- src/afl-gcc.c | 13 +++++++++---- src/afl-gotcpu.c | 9 +++++++-- src/afl-sharedmem.c | 22 ++++++++++++++++++++++ src/afl-showmap.c | 15 +++++++++++---- src/afl-tmin.c | 15 +++++++++++---- 31 files changed, 378 insertions(+), 118 deletions(-) diff --git a/include/afl-as.h b/include/afl-as.h index 55d8694d..f7e5c4dd 100644 --- a/include/afl-as.h +++ b/include/afl-as.h @@ -1,12 +1,15 @@ /* - american fuzzy lop - injectable parts - ------------------------------------- + american fuzzy lop++ - injectable parts + --------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 3206ee72..17bdf95d 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - fuzzer header + ------------------------------------ - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 6e46ae19..de9ac1fc 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -1,10 +1,15 @@ /* - american fuzzy lop - error-checking, memory-zeroing alloc routines - ------------------------------------------------------------------ + american fuzzy lop++ - error-checking, memory-zeroing alloc routines + -------------------------------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Copyright 2013, 2014, 2015 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/android-ashmem.h b/include/android-ashmem.h index 02a84d11..bfe7166e 100644 --- a/include/android-ashmem.h +++ b/include/android-ashmem.h @@ -1,3 +1,27 @@ +/* + american fuzzy lop++ - android shared memory compatibility layer + ---------------------------------------------------------------- + + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This header re-defines the shared memory routines used by AFL++ + using the Andoid API. + + */ + #ifndef _ANDROID_ASHMEM_H #define _ANDROID_ASHMEM_H diff --git a/include/common.h b/include/common.h index 9845c2af..443d1418 100644 --- a/include/common.h +++ b/include/common.h @@ -1,3 +1,28 @@ +/* + american fuzzy lop++ - common routines header + --------------------------------------------- + + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Gather some functions common to multiple executables + + - detect_file_args + + */ + #ifndef __AFLCOMMON_H #define __AFLCOMMON_H #include "types.h" diff --git a/include/config.h b/include/config.h index d85a503d..3ea75bb5 100644 --- a/include/config.h +++ b/include/config.h @@ -1,10 +1,15 @@ /* - american fuzzy lop plus plus - vaguely configurable bits - ---------------------------------------------- + american fuzzy lop++ - vaguely configurable bits + ------------------------------------------------ - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/debug.h b/include/debug.h index 5ff2e23e..201ff943 100644 --- a/include/debug.h +++ b/include/debug.h @@ -1,10 +1,15 @@ /* - american fuzzy lop - debug / error handling macros - -------------------------------------------------- + american fuzzy lop++ - debug / error handling macros + ---------------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/forkserver.h b/include/forkserver.h index bfc35800..d40af87c 100644 --- a/include/forkserver.h +++ b/include/forkserver.h @@ -1,3 +1,29 @@ +/* + american fuzzy lop++ - forkserver header + ---------------------------------------- + + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Shared code that implements a forkserver. This is used by the fuzzer + as well the other components like afl-tmin. + + */ + #ifndef __AFL_FORKSERVER_H #define __AFL_FORKSERVER_H diff --git a/include/hash.h b/include/hash.h index f60839ea..0ffaa410 100644 --- a/include/hash.h +++ b/include/hash.h @@ -1,6 +1,6 @@ /* - american fuzzy lop - hashing function - ------------------------------------- + american fuzzy lop++ - hashing function + --------------------------------------- The hash32() function is a variant of MurmurHash3, a good non-cryptosafe hashing function developed by Austin Appleby. diff --git a/include/sharedmem.h b/include/sharedmem.h index 7e13b13b..3fbfe664 100644 --- a/include/sharedmem.h +++ b/include/sharedmem.h @@ -1,3 +1,29 @@ +/* + american fuzzy lop++ - shared memory related header + --------------------------------------------------- + + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Shared code to handle the shared memory. This is used by the fuzzer + as well the other components like afl-tmin, afl-showmap, etc... + + */ + #ifndef __AFL_SHAREDMEM_H #define __AFL_SHAREDMEM_H diff --git a/include/types.h b/include/types.h index dba666b6..baf68401 100644 --- a/include/types.h +++ b/include/types.h @@ -1,10 +1,15 @@ /* - american fuzzy lop - type definitions and minor macros - ------------------------------------------------------ + american fuzzy lop++ - type definitions and minor macros + -------------------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Copyright 2013, 2014, 2015 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-analyze.c b/src/afl-analyze.c index 4cfebe3b..7b647595 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -1,10 +1,15 @@ /* - american fuzzy lop - file format analyzer - ----------------------------------------- + american fuzzy lop++ - file format analyzer + ------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-as.c b/src/afl-as.c index 96b00d37..fed1882b 100644 --- a/src/afl-as.c +++ b/src/afl-as.c @@ -1,10 +1,15 @@ /* - american fuzzy lop - wrapper for GNU as - --------------------------------------- + american fuzzy lop++ - wrapper for GNU as + ----------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Copyright 2013, 2014, 2015 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-common.c b/src/afl-common.c index 69c1a77d..7859a74f 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -1,7 +1,26 @@ /* - gather some functions common to multiple executables + american fuzzy lop++ - common routines + -------------------------------------- + + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Gather some functions common to multiple executables + + - detect_file_args - detect_file_args */ #include diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 152ae802..2a4e0819 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -1,3 +1,29 @@ +/* + american fuzzy lop++ - forkserver code + -------------------------------------- + + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Shared code that implements a forkserver. This is used by the fuzzer + as well the other components like afl-tmin. + + */ + #include "config.h" #include "types.h" #include "debug.h" diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index be187fff..34e1dacb 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - bitmap related routines + ---------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index f43c86f4..97f3ac5c 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - extras relates routines + ---------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-globals.c b/src/afl-fuzz-globals.c index 8fded173..0d073154 100644 --- a/src/afl-fuzz-globals.c +++ b/src/afl-fuzz-globals.c @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - globals declarations + ------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 8588dde7..b45d3783 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - initialization related routines + ------------------------------------------------------ - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-misc.c b/src/afl-fuzz-misc.c index eb0cc187..09b02345 100644 --- a/src/afl-fuzz-misc.c +++ b/src/afl-fuzz-misc.c @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - misc stuffs from Mordor + ---------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index bc63c226..6dc7b658 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - fuzze_one routines in different flavours + --------------------------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index e22291b5..9a1309e0 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - python extension routines + ------------------------------------------------ - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 22a9ccb0..a77b5799 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -1,13 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - queue relates routines + --------------------------------------------- - Written and maintained by Michal Zalewski - - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 4093d991..9f4fafe4 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - target execution related routines + -------------------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 3614599d..e68dc980 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -1,12 +1,15 @@ /* - american fuzzy lop - fuzzer code - -------------------------------- + american fuzzy lop++ - stats related routines + --------------------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index d04058ca..398d1b80 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -2,11 +2,14 @@ american fuzzy lop - fuzzer code -------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Forkserver design by Jann Horn - - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-gcc.c b/src/afl-gcc.c index 750f9b72..08705a36 100644 --- a/src/afl-gcc.c +++ b/src/afl-gcc.c @@ -1,10 +1,15 @@ /* - american fuzzy lop - wrapper for GCC and clang - ---------------------------------------------- + american fuzzy lop++ - wrapper for GCC and clang + ------------------------------------------------ - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Copyright 2013, 2014, 2015 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c index fd96d25d..6ca129c2 100644 --- a/src/afl-gotcpu.c +++ b/src/afl-gotcpu.c @@ -2,9 +2,14 @@ american fuzzy lop - free CPU gizmo ----------------------------------- - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Copyright 2015, 2016 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c index 60764779..9fd5fb01 100644 --- a/src/afl-sharedmem.c +++ b/src/afl-sharedmem.c @@ -1,4 +1,26 @@ /* + american fuzzy lop++ - shared memory related code + ------------------------------------------------- + + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + Shared code to handle the shared memory. This is used by the fuzzer + as well the other components like afl-tmin, afl-showmap, etc... */ diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 5c367339..649684d3 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -1,10 +1,17 @@ /* - american fuzzy lop - map display utility - ---------------------------------------- + american fuzzy lop++ - map display utility + ------------------------------------------ - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-tmin.c b/src/afl-tmin.c index ce4a3d76..a33966a0 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -1,10 +1,17 @@ /* - american fuzzy lop - test case minimizer - ---------------------------------------- + american fuzzy lop++ - test case minimizer + ------------------------------------------ - Written and maintained by Michal Zalewski + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by by Marc Heuse , + Heiko Eißfeldt and + Andrea Fioraldi - Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From e1f18f6212fdab581c3bf732a51dcc5a9cdaa8e7 Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 4 Sep 2019 10:03:51 +0200 Subject: [PATCH 78/83] fix typo in custom format --- include/afl-as.h | 30 ++++++++++----------- include/afl-fuzz.h | 10 +++---- include/alloc-inl.h | 27 +++++++++++-------- include/android-ashmem.h | 2 +- include/common.h | 2 +- include/config.h | 12 ++++----- include/debug.h | 40 ++++++++++++++-------------- include/forkserver.h | 6 ++--- include/hash.h | 4 +-- include/sharedmem.h | 4 +-- include/types.h | 10 +++---- libdislocator/libdislocator.so.c | 4 +-- libtokencap/libtokencap.so.c | 2 +- llvm_mode/afl-clang-fast.c | 10 +++---- llvm_mode/afl-llvm-rt.o.c | 2 +- qemu_mode/libcompcov/libcompcov.so.c | 2 +- qemu_mode/patches/afl-qemu-tcg-inl.h | 8 +++--- src/afl-analyze.c | 28 +++++++++---------- src/afl-as.c | 18 ++++++------- src/afl-common.c | 2 +- src/afl-forkserver.c | 6 ++--- src/afl-fuzz-bitmap.c | 22 +++++++-------- src/afl-fuzz-extras.c | 2 +- src/afl-fuzz-globals.c | 4 +-- src/afl-fuzz-init.c | 28 +++++++++---------- src/afl-fuzz-misc.c | 2 +- src/afl-fuzz-one.c | 16 +++++------ src/afl-fuzz-python.c | 4 +-- src/afl-fuzz-queue.c | 2 +- src/afl-fuzz-run.c | 6 ++--- src/afl-fuzz-stats.c | 6 ++--- src/afl-fuzz.c | 6 ++--- src/afl-gcc.c | 4 +-- src/afl-gotcpu.c | 6 ++--- src/afl-sharedmem.c | 4 +-- src/afl-showmap.c | 6 ++--- src/afl-tmin.c | 4 +-- 37 files changed, 178 insertions(+), 173 deletions(-) diff --git a/include/afl-as.h b/include/afl-as.h index f7e5c4dd..048866db 100644 --- a/include/afl-as.h +++ b/include/afl-as.h @@ -3,7 +3,7 @@ --------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -186,14 +186,14 @@ static const u8* main_payload_32 = " movl %ecx, __afl_prev_loc\n" #else " movl %ecx, %edi\n" -#endif /* ^!COVERAGE_ONLY */ +#endif /* ^!COVERAGE_ONLY */ "\n" #ifdef SKIP_COUNTS " orb $1, (%edx, %edi, 1)\n" #else " incb (%edx, %edi, 1)\n" " adcb $0, (%edx, %edi, 1)\n" // never zero counter implementation. slightly better path discovery and little performance impact -#endif /* ^SKIP_COUNTS */ +#endif /* ^SKIP_COUNTS */ "\n" "__afl_return:\n" "\n" @@ -382,7 +382,7 @@ static const u8* main_payload_32 = " .comm __afl_setup_failure, 1, 32\n" #ifndef COVERAGE_ONLY " .comm __afl_prev_loc, 4, 32\n" -#endif /* !COVERAGE_ONLY */ +#endif /* !COVERAGE_ONLY */ " .comm __afl_fork_pid, 4, 32\n" " .comm __afl_temp, 4, 32\n" "\n" @@ -404,7 +404,7 @@ static const u8* main_payload_32 = #define CALL_L64(str) "call _" str "\n" #else #define CALL_L64(str) "call " str "@PLT\n" -#endif /* ^__APPLE__ */ +#endif /* ^__APPLE__ */ static const u8* main_payload_64 = @@ -422,7 +422,7 @@ static const u8* main_payload_64 = " .byte 0x9f /* lahf */\n" #else " lahf\n" -#endif /* ^__OpenBSD__, etc */ +#endif /* ^__OpenBSD__, etc */ " seto %al\n" "\n" " /* Check if SHM region is already mapped. */\n" @@ -439,14 +439,14 @@ static const u8* main_payload_64 = " xorq __afl_prev_loc(%rip), %rcx\n" " xorq %rcx, __afl_prev_loc(%rip)\n" " shrq $1, __afl_prev_loc(%rip)\n" -#endif /* ^!COVERAGE_ONLY */ +#endif /* ^!COVERAGE_ONLY */ "\n" #ifdef SKIP_COUNTS " orb $1, (%rdx, %rcx, 1)\n" #else " incb (%rdx, %rcx, 1)\n" " adcb $0, (%rdx, %rcx, 1)\n" // never zero counter implementation. slightly better path discovery and little performance impact -#endif /* ^SKIP_COUNTS */ +#endif /* ^SKIP_COUNTS */ "\n" "__afl_return:\n" "\n" @@ -455,7 +455,7 @@ static const u8* main_payload_64 = " .byte 0x9e /* sahf */\n" #else " sahf\n" -#endif /* ^__OpenBSD__, etc */ +#endif /* ^__OpenBSD__, etc */ " ret\n" "\n" ".align 8\n" @@ -474,7 +474,7 @@ static const u8* main_payload_64 = " movq (%rdx), %rdx\n" #else " movq __afl_global_area_ptr(%rip), %rdx\n" -#endif /* !^__APPLE__ */ +#endif /* !^__APPLE__ */ " testq %rdx, %rdx\n" " je __afl_setup_first\n" "\n" @@ -573,7 +573,7 @@ static const u8* main_payload_64 = #else " movq __afl_global_area_ptr@GOTPCREL(%rip), %rdx\n" " movq %rax, (%rdx)\n" -#endif /* ^__APPLE__ */ +#endif /* ^__APPLE__ */ " movq %rax, %rdx\n" "\n" "__afl_forkserver:\n" @@ -742,7 +742,7 @@ static const u8* main_payload_64 = " .comm __afl_area_ptr, 8\n" #ifndef COVERAGE_ONLY " .comm __afl_prev_loc, 8\n" -#endif /* !COVERAGE_ONLY */ +#endif /* !COVERAGE_ONLY */ " .comm __afl_fork_pid, 4\n" " .comm __afl_temp, 4\n" " .comm __afl_setup_failure, 1\n" @@ -752,12 +752,12 @@ static const u8* main_payload_64 = " .lcomm __afl_area_ptr, 8\n" #ifndef COVERAGE_ONLY " .lcomm __afl_prev_loc, 8\n" -#endif /* !COVERAGE_ONLY */ +#endif /* !COVERAGE_ONLY */ " .lcomm __afl_fork_pid, 4\n" " .lcomm __afl_temp, 4\n" " .lcomm __afl_setup_failure, 1\n" -#endif /* ^__APPLE__ */ +#endif /* ^__APPLE__ */ " .comm __afl_global_area_ptr, 8, 8\n" "\n" @@ -767,5 +767,5 @@ static const u8* main_payload_64 = "/* --- END --- */\n" "\n"; -#endif /* !_HAVE_AFL_AS_H */ +#endif /* !_HAVE_AFL_AS_H */ diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 17bdf95d..571ca879 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -3,7 +3,7 @@ ------------------------------------ Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -74,20 +74,20 @@ #if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) #include #define HAVE_ARC4RANDOM 1 -#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ +#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ /* For systems that have sched_setaffinity; right now just Linux, but one can hope... */ #ifdef __linux__ #define HAVE_AFFINITY 1 -#endif /* __linux__ */ +#endif /* __linux__ */ #ifndef SIMPLE_FILES #define CASE_PREFIX "id:" #else #define CASE_PREFIX "id_" -#endif /* ^!SIMPLE_FILES */ +#endif /* ^!SIMPLE_FILES */ struct queue_entry { @@ -400,7 +400,7 @@ extern s32 cpu_core_count; /* CPU core count */ extern s32 cpu_aff; /* Selected CPU core */ -#endif /* HAVE_AFFINITY */ +#endif /* HAVE_AFFINITY */ extern FILE* plot_file; /* Gnuplot output file */ diff --git a/include/alloc-inl.h b/include/alloc-inl.h index de9ac1fc..d851fd61 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -3,7 +3,7 @@ -------------------------------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -69,9 +69,9 @@ /* Magic tokens used to mark used / freed chunks. */ -#define ALLOC_MAGIC_C1 0xFF00FF00 /* Used head (dword) */ -#define ALLOC_MAGIC_F 0xFE00FE00 /* Freed head (dword) */ -#define ALLOC_MAGIC_C2 0xF0 /* Used tail (byte) */ +#define ALLOC_MAGIC_C1 0xFF00FF00 /* Used head (dword) */ +#define ALLOC_MAGIC_F 0xFE00FE00 /* Freed head (dword) */ +#define ALLOC_MAGIC_C2 0xF0 /* Used tail (byte) */ /* Positions of guard tokens in relation to the user-visible pointer. */ @@ -111,14 +111,17 @@ \ \ \ + \ if (_p) { \ \ \ \ + \ if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\ \ \ \ + \ if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \ ABORT("Use after free."); \ else ABORT("Corrupted head alloc canary."); \ @@ -126,6 +129,7 @@ } \ \ \ + \ if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \ ABORT("Corrupted tail alloc canary."); \ \ @@ -134,6 +138,7 @@ \ \ \ + \ } while (0) */ @@ -197,7 +202,7 @@ static inline void DFL_ck_free(void* mem) { /* Catch pointer issues sooner. */ memset(mem, 0xFF, ALLOC_S(mem)); -#endif /* DEBUG_BUILD */ +#endif /* DEBUG_BUILD */ ALLOC_C1(mem) = ALLOC_MAGIC_F; @@ -228,7 +233,7 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { #ifndef DEBUG_BUILD ALLOC_C1(orig) = ALLOC_MAGIC_F; -#endif /* !DEBUG_BUILD */ +#endif /* !DEBUG_BUILD */ old_size = ALLOC_S(orig); u8* origu8 = orig; @@ -266,7 +271,7 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) { } -#endif /* ^!DEBUG_BUILD */ +#endif /* ^!DEBUG_BUILD */ ret += ALLOC_OFF_HEAD; @@ -297,7 +302,7 @@ static inline void* DFL_ck_realloc_block(void* orig, u32 size) { } -#endif /* !DEBUG_BUILD */ +#endif /* !DEBUG_BUILD */ return DFL_ck_realloc(orig, size); @@ -424,7 +429,7 @@ extern u32 TRK_cnt[ALLOC_BUCKETS]; #define alloc_report() -#endif /* ^AFL_MAIN */ +#endif /* ^AFL_MAIN */ /* Bucket-assigning function for a given pointer: */ @@ -600,7 +605,7 @@ static inline void TRK_ck_free(void* ptr, const char* file, const char* func, #define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__) -#endif /* ^!DEBUG_BUILD */ +#endif /* ^!DEBUG_BUILD */ -#endif /* ! _HAVE_ALLOC_INL_H */ +#endif /* ! _HAVE_ALLOC_INL_H */ diff --git a/include/android-ashmem.h b/include/android-ashmem.h index bfe7166e..6c7a98db 100644 --- a/include/android-ashmem.h +++ b/include/android-ashmem.h @@ -3,7 +3,7 @@ ---------------------------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi diff --git a/include/common.h b/include/common.h index 443d1418..905830b5 100644 --- a/include/common.h +++ b/include/common.h @@ -3,7 +3,7 @@ --------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi diff --git a/include/config.h b/include/config.h index 3ea75bb5..9a263c86 100644 --- a/include/config.h +++ b/include/config.h @@ -3,7 +3,7 @@ ------------------------------------------------ Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -58,7 +58,7 @@ #define MEM_LIMIT 25 #else #define MEM_LIMIT 50 -#endif /* ^!__x86_64__ */ +#endif /* ^!__x86_64__ */ /* Default memory limit when running in QEMU mode (MB): */ @@ -128,9 +128,9 @@ /* Probabilities of skipping non-favored entries in the queue, expressed as percentages: */ -#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */ -#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */ -#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */ +#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */ +#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */ +#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */ /* Splicing cycle count: */ @@ -371,5 +371,5 @@ // #define IGNORE_FINDS -#endif /* ! _HAVE_CONFIG_H */ +#endif /* ! _HAVE_CONFIG_H */ diff --git a/include/debug.h b/include/debug.h index 201ff943..cccfc284 100644 --- a/include/debug.h +++ b/include/debug.h @@ -3,7 +3,7 @@ ---------------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -105,7 +105,7 @@ #define bgLCY "" #define bgBRI "" -#endif /* ^USE_COLOR */ +#endif /* ^USE_COLOR */ /************************* * Box drawing sequences * @@ -113,21 +113,21 @@ #ifdef FANCY_BOXES -#define SET_G1 "\x1b)0" /* Set G1 for box drawing */ -#define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ -#define bSTART "\x0e" /* Enter G1 drawing mode */ -#define bSTOP "\x0f" /* Leave G1 drawing mode */ -#define bH "q" /* Horizontal line */ -#define bV "x" /* Vertical line */ -#define bLT "l" /* Left top corner */ -#define bRT "k" /* Right top corner */ -#define bLB "m" /* Left bottom corner */ -#define bRB "j" /* Right bottom corner */ -#define bX "n" /* Cross */ -#define bVR "t" /* Vertical, branch right */ -#define bVL "u" /* Vertical, branch left */ -#define bHT "v" /* Horizontal, branch top */ -#define bHB "w" /* Horizontal, branch bottom */ +#define SET_G1 "\x1b)0" /* Set G1 for box drawing */ +#define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ +#define bSTART "\x0e" /* Enter G1 drawing mode */ +#define bSTOP "\x0f" /* Leave G1 drawing mode */ +#define bH "q" /* Horizontal line */ +#define bV "x" /* Vertical line */ +#define bLT "l" /* Left top corner */ +#define bRT "k" /* Right top corner */ +#define bLB "m" /* Left bottom corner */ +#define bRB "j" /* Right bottom corner */ +#define bX "n" /* Cross */ +#define bVR "t" /* Vertical, branch right */ +#define bVL "u" /* Vertical, branch left */ +#define bHT "v" /* Horizontal, branch top */ +#define bHB "w" /* Horizontal, branch bottom */ #else @@ -147,7 +147,7 @@ #define bHT "+" #define bHB "+" -#endif /* ^FANCY_BOXES */ +#endif /* ^FANCY_BOXES */ /*********************** * Misc terminal codes * @@ -169,7 +169,7 @@ #define SAYF(x...) printf(x) #else #define SAYF(x...) fprintf(stderr, x) -#endif /* ^MESSAGES_TO_STDOUT */ +#endif /* ^MESSAGES_TO_STDOUT */ /* Show a prefixed warning. */ @@ -286,5 +286,5 @@ \ } while (0) -#endif /* ! _HAVE_DEBUG_H */ +#endif /* ! _HAVE_DEBUG_H */ diff --git a/include/forkserver.h b/include/forkserver.h index d40af87c..9a099888 100644 --- a/include/forkserver.h +++ b/include/forkserver.h @@ -3,9 +3,9 @@ ---------------------------------------- Originally written by Michal Zalewski - + Forkserver design by Jann Horn - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -45,7 +45,7 @@ void init_forkserver(char **argv); #define MSG_ULIMIT_USAGE " ( ulimit -Sv $[%llu << 10];" #else #define MSG_ULIMIT_USAGE " ( ulimit -Sd $[%llu << 10];" -#endif /* ^RLIMIT_AS */ +#endif /* ^RLIMIT_AS */ #endif diff --git a/include/hash.h b/include/hash.h index 0ffaa410..7085df32 100644 --- a/include/hash.h +++ b/include/hash.h @@ -99,7 +99,7 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) { } -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ -#endif /* !_HAVE_HASH_H */ +#endif /* !_HAVE_HASH_H */ diff --git a/include/sharedmem.h b/include/sharedmem.h index 3fbfe664..18e4ee9f 100644 --- a/include/sharedmem.h +++ b/include/sharedmem.h @@ -3,9 +3,9 @@ --------------------------------------------------- Originally written by Michal Zalewski - + Forkserver design by Jann Horn - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi diff --git a/include/types.h b/include/types.h index baf68401..07fc7e91 100644 --- a/include/types.h +++ b/include/types.h @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -49,7 +49,7 @@ typedef uint32_t u32; typedef unsigned long long u64; #else typedef uint64_t u64; -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ typedef int8_t s8; typedef int16_t s16; @@ -59,7 +59,7 @@ typedef int64_t s64; #ifndef MIN #define MIN(_a, _b) ((_a) > (_b) ? (_b) : (_a)) #define MAX(_a, _b) ((_a) > (_b) ? (_a) : (_b)) -#endif /* !MIN */ +#endif /* !MIN */ #define SWAP16(_x) \ ({ \ @@ -82,7 +82,7 @@ typedef int64_t s64; #define AFL_R(x) (random() % (x)) #else #define R(x) (random() % (x)) -#endif /* ^AFL_LLVM_PASS */ +#endif /* ^AFL_LLVM_PASS */ #define STRINGIFY_INTERNAL(x) #x #define STRINGIFY(x) STRINGIFY_INTERNAL(x) @@ -97,5 +97,5 @@ typedef int64_t s64; #define unlikely(_x) __builtin_expect(!!(_x), 0) #endif -#endif /* ! _HAVE_TYPES_H */ +#endif /* ! _HAVE_TYPES_H */ diff --git a/libdislocator/libdislocator.so.c b/libdislocator/libdislocator.so.c index 256506be..8834a1fc 100644 --- a/libdislocator/libdislocator.so.c +++ b/libdislocator/libdislocator.so.c @@ -30,11 +30,11 @@ #ifndef PAGE_SIZE #define PAGE_SIZE 4096 -#endif /* !PAGE_SIZE */ +#endif /* !PAGE_SIZE */ #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON -#endif /* !MAP_ANONYMOUS */ +#endif /* !MAP_ANONYMOUS */ /* Error / message handling: */ diff --git a/libtokencap/libtokencap.so.c b/libtokencap/libtokencap.so.c index 51c36c4d..17b6190c 100644 --- a/libtokencap/libtokencap.so.c +++ b/libtokencap/libtokencap.so.c @@ -28,7 +28,7 @@ #ifndef __linux__ #error "Sorry, this library is Linux-specific for now!" -#endif /* !__linux__ */ +#endif /* !__linux__ */ /* Mapping data and such */ diff --git a/llvm_mode/afl-clang-fast.c b/llvm_mode/afl-clang-fast.c index ed320716..2c25f4de 100644 --- a/llvm_mode/afl-clang-fast.c +++ b/llvm_mode/afl-clang-fast.c @@ -183,7 +183,7 @@ static void edit_params(u32 argc, char** argv) { cc_params[cc_par_cnt++] = alloc_printf("%s/libLLVMInsTrim.so", obj_path); else cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-pass.so", obj_path); -#endif /* ^USE_TRACE_PC */ +#endif /* ^USE_TRACE_PC */ cc_params[cc_par_cnt++] = "-Qunused-arguments"; @@ -256,7 +256,7 @@ static void edit_params(u32 argc, char** argv) { if (getenv("AFL_INST_RATIO")) FATAL("AFL_INST_RATIO not available at compile time with 'trace-pc'."); -#endif /* USE_TRACE_PC */ +#endif /* USE_TRACE_PC */ if (!getenv("AFL_DONT_OPTIMIZE")) { @@ -315,7 +315,7 @@ static void edit_params(u32 argc, char** argv) { #else "__attribute__((visibility(\"default\"))) " "int _L(unsigned int) __asm__(\"__afl_persistent_loop\"); " -#endif /* ^__APPLE__ */ +#endif /* ^__APPLE__ */ "_L(_A); })"; cc_params[cc_par_cnt++] = @@ -329,7 +329,7 @@ static void edit_params(u32 argc, char** argv) { #else "__attribute__((visibility(\"default\"))) " "void _I(void) __asm__(\"__afl_manual_init\"); " -#endif /* ^__APPLE__ */ +#endif /* ^__APPLE__ */ "_I(); } while (0)"; if (maybe_linking) { @@ -382,7 +382,7 @@ int main(int argc, char** argv) { " [tpcg] by \n"); #else SAYF(cCYA "afl-clang-fast" VERSION cRST " by \n"); -#endif /* ^USE_TRACE_PC */ +#endif /* ^USE_TRACE_PC */ } diff --git a/llvm_mode/afl-llvm-rt.o.c b/llvm_mode/afl-llvm-rt.o.c index 8d435d21..20b34336 100644 --- a/llvm_mode/afl-llvm-rt.o.c +++ b/llvm_mode/afl-llvm-rt.o.c @@ -45,7 +45,7 @@ #define CONST_PRIO 5 #else #define CONST_PRIO 0 -#endif /* ^USE_TRACE_PC */ +#endif /* ^USE_TRACE_PC */ #include #include diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c index 9d61848e..dd9e2773 100644 --- a/qemu_mode/libcompcov/libcompcov.so.c +++ b/qemu_mode/libcompcov/libcompcov.so.c @@ -34,7 +34,7 @@ #ifndef __linux__ #error "Sorry, this library is Linux-specific for now!" -#endif /* !__linux__ */ +#endif /* !__linux__ */ /* Change this value to tune the compare coverage */ diff --git a/qemu_mode/patches/afl-qemu-tcg-inl.h b/qemu_mode/patches/afl-qemu-tcg-inl.h index d53a1ccf..d45ffac9 100644 --- a/qemu_mode/patches/afl-qemu-tcg-inl.h +++ b/qemu_mode/patches/afl-qemu-tcg-inl.h @@ -103,7 +103,7 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) { } -#endif /* TCG_TARGET_EXTEND_ARGS */ +#endif /* TCG_TARGET_EXTEND_ARGS */ op = tcg_emit_op(INDEX_op_call); @@ -187,7 +187,7 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) { #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 int is_64bit = sizemask & (1 << 2); if (!is_64bit) { tcg_temp_free_internal(arg); } -#endif /* TCG_TARGET_EXTEND_ARGS */ +#endif /* TCG_TARGET_EXTEND_ARGS */ } @@ -270,7 +270,7 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, } -#endif /* TCG_TARGET_EXTEND_ARGS */ +#endif /* TCG_TARGET_EXTEND_ARGS */ op = tcg_emit_op(INDEX_op_call); @@ -367,7 +367,7 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, } -#endif /* TCG_TARGET_EXTEND_ARGS */ +#endif /* TCG_TARGET_EXTEND_ARGS */ } diff --git a/src/afl-analyze.c b/src/afl-analyze.c index 7b647595..357672b1 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -3,7 +3,7 @@ ------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -85,14 +85,14 @@ static volatile u8 stop_soon, /* Ctrl-C pressed? */ /* Constants used for describing byte behavior. */ -#define RESP_NONE 0x00 /* Changing byte is a no-op. */ -#define RESP_MINOR 0x01 /* Some changes have no effect. */ -#define RESP_VARIABLE 0x02 /* Changes produce variable paths. */ -#define RESP_FIXED 0x03 /* Changes produce fixed patterns. */ +#define RESP_NONE 0x00 /* Changing byte is a no-op. */ +#define RESP_MINOR 0x01 /* Some changes have no effect. */ +#define RESP_VARIABLE 0x02 /* Changes produce variable paths. */ +#define RESP_FIXED 0x03 /* Changes produce fixed patterns. */ -#define RESP_LEN 0x04 /* Potential length field */ -#define RESP_CKSUM 0x05 /* Potential checksum */ -#define RESP_SUSPECT 0x06 /* Potential "suspect" blob */ +#define RESP_LEN 0x04 /* Potential length field */ +#define RESP_CKSUM 0x05 /* Potential checksum */ +#define RESP_SUSPECT 0x06 /* Potential "suspect" blob */ /* Classify tuple counts. This is a slow & naive version, but good enough here. */ @@ -260,7 +260,7 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) { setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ -#endif /* ^RLIMIT_AS */ +#endif /* ^RLIMIT_AS */ } @@ -371,7 +371,7 @@ static void show_legend(void) { } -#endif /* USE_COLOR */ +#endif /* USE_COLOR */ /* Interpret and report a pattern in the input file. */ @@ -385,7 +385,7 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { u32 rlen = 1, off; #else u32 rlen = 1; -#endif /* ^USE_COLOR */ +#endif /* ^USE_COLOR */ u8 rtype = b_data[i] & 0x0f; @@ -527,7 +527,7 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { } -#endif /* ^USE_COLOR */ +#endif /* ^USE_COLOR */ i += rlen - 1; @@ -535,7 +535,7 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) { #ifdef USE_COLOR SAYF(cRST "\n"); -#endif /* USE_COLOR */ +#endif /* USE_COLOR */ } @@ -555,7 +555,7 @@ static void analyze(char** argv) { #ifdef USE_COLOR show_legend(); -#endif /* USE_COLOR */ +#endif /* USE_COLOR */ for (i = 0; i < in_len; i++) { diff --git a/src/afl-as.c b/src/afl-as.c index fed1882b..b5a5ed58 100644 --- a/src/afl-as.c +++ b/src/afl-as.c @@ -3,7 +3,7 @@ ----------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -81,9 +81,9 @@ static u8 use_64bit = 0; #ifdef __APPLE__ #error "Sorry, 32-bit Apple platforms are not supported." -#endif /* __APPLE__ */ +#endif /* __APPLE__ */ -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ /* Examine and modify parameters to pass to 'as'. Note that the file name is always the last parameter passed by GCC, so we exploit this property @@ -120,7 +120,7 @@ static void edit_params(int argc, char** argv) { } -#endif /* __APPLE__ */ +#endif /* __APPLE__ */ /* Although this is not documented, GCC also uses TEMP and TMP when TMPDIR is not set. We need to check these non-standard variables to properly @@ -162,7 +162,7 @@ static void edit_params(int argc, char** argv) { if (clang_mode && (!strcmp(argv[i], "-q") || !strcmp(argv[i], "-Q"))) continue; -#endif /* __APPLE__ */ +#endif /* __APPLE__ */ as_params[as_par_cnt++] = argv[i]; @@ -181,7 +181,7 @@ static void edit_params(int argc, char** argv) { } -#endif /* __APPLE__ */ +#endif /* __APPLE__ */ input_file = argv[argc - 1]; @@ -242,7 +242,7 @@ static void add_instrumentation(void) { u8* colon_pos; -#endif /* __APPLE__ */ +#endif /* __APPLE__ */ if (input_file) { @@ -413,7 +413,7 @@ static void add_instrumentation(void) { if (line[0] == '.') { -#endif /* __APPLE__ */ +#endif /* __APPLE__ */ /* .L0: or LBB0_0: style jump destination */ @@ -432,7 +432,7 @@ static void add_instrumentation(void) { (clang_mode && !strncmp(line + 1, "LBB", 3))) && R(100) < inst_ratio) { -#endif /* __APPLE__ */ +#endif /* __APPLE__ */ /* An optimization is possible here by adding the code only if the label is mentioned in the code in contexts other than call / jmp. diff --git a/src/afl-common.c b/src/afl-common.c index 7859a74f..62722cb9 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -3,7 +3,7 @@ -------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 2a4e0819..f2f3c0f0 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -3,9 +3,9 @@ -------------------------------------- Originally written by Michal Zalewski - + Forkserver design by Jann Horn - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -187,7 +187,7 @@ void init_forkserver(char **argv) { maps - so we should be getting good protection against OOM bugs. */ setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ -#endif /* ^RLIMIT_AS */ +#endif /* ^RLIMIT_AS */ } diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 34e1dacb..d867a318 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -3,7 +3,7 @@ ---------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -87,7 +87,7 @@ u8 has_new_bits(u8* virgin_map) { u32 i = (MAP_SIZE >> 2); -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ u8 ret = 0; @@ -125,7 +125,7 @@ u8 has_new_bits(u8* virgin_map) { else ret = 1; -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ } @@ -306,7 +306,7 @@ void simplify_trace(u32* mem) { } -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ /* Destructively classify execution counts in a trace. This is used as a preprocessing step for any newly acquired traces. Called on every exec, @@ -391,7 +391,7 @@ void classify_counts(u32* mem) { } -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ /* Compact trace bytes into a smaller bitmap. We effectively just drop the count information here. This is called only sporadically, for some @@ -453,7 +453,7 @@ u8* describe_op(u8 hnb) { } -#endif /* !SIMPLE_FILES */ +#endif /* !SIMPLE_FILES */ /* Write a message accompanying the crash directory :-) */ @@ -551,7 +551,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths); -#endif /* ^!SIMPLE_FILES */ +#endif /* ^!SIMPLE_FILES */ add_to_queue(fn, len, 0); @@ -599,7 +599,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { simplify_trace((u64*)trace_bits); #else simplify_trace((u32*)trace_bits); -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ if (!has_new_bits(virgin_tmout)) return keeping; @@ -636,7 +636,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { fn = alloc_printf("%s/hangs/id_%06llu", out_dir, unique_hangs); -#endif /* ^!SIMPLE_FILES */ +#endif /* ^!SIMPLE_FILES */ ++unique_hangs; @@ -662,7 +662,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { simplify_trace((u64*)trace_bits); #else simplify_trace((u32*)trace_bits); -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ if (!has_new_bits(virgin_crash)) return keeping; @@ -680,7 +680,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes, kill_signal); -#endif /* ^!SIMPLE_FILES */ +#endif /* ^!SIMPLE_FILES */ ++unique_crashes; diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 97f3ac5c..1a0e2eff 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -3,7 +3,7 @@ ---------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi diff --git a/src/afl-fuzz-globals.c b/src/afl-fuzz-globals.c index 0d073154..9aaa03cc 100644 --- a/src/afl-fuzz-globals.c +++ b/src/afl-fuzz-globals.c @@ -3,7 +3,7 @@ ------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -220,7 +220,7 @@ s32 cpu_core_count; /* CPU core count */ s32 cpu_aff = -1; /* Selected CPU core */ -#endif /* HAVE_AFFINITY */ +#endif /* HAVE_AFFINITY */ FILE *plot_file; /* Gnuplot output file */ diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index b45d3783..55464a36 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -3,7 +3,7 @@ ------------------------------------------------------ Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -142,7 +142,7 @@ void bind_to_free_cpu(void) { } -#endif /* HAVE_AFFINITY */ +#endif /* HAVE_AFFINITY */ /* Load postprocessor, if available. */ @@ -651,7 +651,7 @@ void pivot_inputs(void) { nfn = alloc_printf("%s/queue/id_%06u", out_dir, id); -#endif /* ^!SIMPLE_FILES */ +#endif /* ^!SIMPLE_FILES */ } @@ -827,7 +827,7 @@ double get_runnable_processes(void) { } -#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ +#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ return res; @@ -901,7 +901,7 @@ void maybe_delete_out_dir(void) { } -#endif /* !__sun */ +#endif /* !__sun */ f = fopen(fn, "r"); @@ -1043,7 +1043,7 @@ void maybe_delete_out_dir(void) { t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); -#endif /* ^!SIMPLE_FILES */ +#endif /* ^!SIMPLE_FILES */ rename(fn, nfn); /* Ignore errors. */ ck_free(nfn); @@ -1074,7 +1074,7 @@ void maybe_delete_out_dir(void) { t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); -#endif /* ^!SIMPLE_FILES */ +#endif /* ^!SIMPLE_FILES */ rename(fn, nfn); /* Ignore errors. */ ck_free(nfn); @@ -1174,7 +1174,7 @@ void setup_dirs_fds(void) { if (out_dir_fd < 0 || flock(out_dir_fd, LOCK_EX | LOCK_NB)) PFATAL("Unable to flock() output directory."); -#endif /* !__sun */ +#endif /* !__sun */ } @@ -1390,7 +1390,7 @@ void check_crash_handling(void) { close(fd); -#endif /* ^__APPLE__ */ +#endif /* ^__APPLE__ */ } @@ -1504,7 +1504,7 @@ void get_core_count(void) { if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return; -#endif /* ^__APPLE__ */ +#endif /* ^__APPLE__ */ #else @@ -1524,9 +1524,9 @@ void get_core_count(void) { fclose(f); -#endif /* ^HAVE_AFFINITY */ +#endif /* ^HAVE_AFFINITY */ -#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ +#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ if (cpu_core_count > 0) { @@ -1540,7 +1540,7 @@ void get_core_count(void) { ++cur_runnable; -#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ +#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ OKF("You have %d CPU core%s and %u runnable tasks (utilization: %0.0f%%).", cpu_core_count, cpu_core_count > 1 ? "s" : "", cur_runnable, @@ -1780,7 +1780,7 @@ void check_binary(u8* fname) { FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path); #endif -#endif /* ^!__APPLE__ */ +#endif /* ^!__APPLE__ */ if (!qemu_mode && !unicorn_mode && !dumb_mode && !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { diff --git a/src/afl-fuzz-misc.c b/src/afl-fuzz-misc.c index 09b02345..a7372b7d 100644 --- a/src/afl-fuzz-misc.c +++ b/src/afl-fuzz-misc.c @@ -3,7 +3,7 @@ ---------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 6dc7b658..9a7a5938 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -3,7 +3,7 @@ --------------------------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -328,7 +328,7 @@ static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { } -#endif /* !IGNORE_FINDS */ +#endif /* !IGNORE_FINDS */ /* Take the current entry from the queue, fuzz it for a while. This function is a tad too long... returns 0 if fuzzed successfully, 1 if @@ -385,7 +385,7 @@ u8 fuzz_one_original(char** argv) { } -#endif /* ^IGNORE_FINDS */ +#endif /* ^IGNORE_FINDS */ if (not_on_tty) { @@ -2253,7 +2253,7 @@ retry_splicing: } -#endif /* !IGNORE_FINDS */ +#endif /* !IGNORE_FINDS */ ret_val = 0; @@ -2337,7 +2337,7 @@ u8 pilot_fuzzing(char** argv) { } -#endif /* ^IGNORE_FINDS */ +#endif /* ^IGNORE_FINDS */ if (not_on_tty) { @@ -4015,7 +4015,7 @@ pacemaker_fuzzing: } -#endif /* !IGNORE_FINDS */ +#endif /* !IGNORE_FINDS */ ret_val = 0; @@ -4198,7 +4198,7 @@ u8 core_fuzzing(char** argv) { } -#endif /* ^IGNORE_FINDS */ +#endif /* ^IGNORE_FINDS */ if (not_on_tty) { @@ -5860,7 +5860,7 @@ pacemaker_fuzzing: } -#endif /* !IGNORE_FINDS */ +#endif /* !IGNORE_FINDS */ ret_val = 0; abandon_entry: diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index 9a1309e0..1a28f603 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -3,7 +3,7 @@ ------------------------------------------------ Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -401,5 +401,5 @@ abort_trimming: } -#endif /* USE_PYTHON */ +#endif /* USE_PYTHON */ diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index a77b5799..905fd931 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -3,7 +3,7 @@ --------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 9f4fafe4..3c3a1d37 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -73,7 +73,7 @@ u8 run_target(char** argv, u32 timeout) { setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ -#endif /* ^RLIMIT_AS */ +#endif /* ^RLIMIT_AS */ } @@ -210,7 +210,7 @@ u8 run_target(char** argv, u32 timeout) { classify_counts((u64*)trace_bits); #else classify_counts((u32*)trace_bits); -#endif /* ^__x86_64__ */ +#endif /* ^__x86_64__ */ prev_timed_out = child_timed_out; diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index e68dc980..7f171279 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -3,7 +3,7 @@ --------------------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -310,7 +310,7 @@ void show_stats(void) { sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]", crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop", use_banner, power_name); -#endif /* HAVE_AFFINITY */ +#endif /* HAVE_AFFINITY */ SAYF("\n%s\n", tmp); @@ -674,7 +674,7 @@ void show_stats(void) { SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color, MIN(cur_utilization, 999)); -#endif /* ^HAVE_AFFINITY */ +#endif /* ^HAVE_AFFINITY */ } else diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 398d1b80..e94116f5 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -3,7 +3,7 @@ -------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -621,7 +621,7 @@ int main(int argc, char** argv) { #ifdef HAVE_AFFINITY bind_to_free_cpu(); -#endif /* HAVE_AFFINITY */ +#endif /* HAVE_AFFINITY */ check_crash_handling(); check_cpu_governor(); @@ -880,5 +880,5 @@ stop_fuzzing: } -#endif /* !AFL_LIB */ +#endif /* !AFL_LIB */ diff --git a/src/afl-gcc.c b/src/afl-gcc.c index 08705a36..2dc17baf 100644 --- a/src/afl-gcc.c +++ b/src/afl-gcc.c @@ -3,7 +3,7 @@ ------------------------------------------------ Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -201,7 +201,7 @@ static void edit_params(u32 argc, char** argv) { } -#endif /* __APPLE__ */ +#endif /* __APPLE__ */ } diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c index 6ca129c2..a39659bb 100644 --- a/src/afl-gotcpu.c +++ b/src/afl-gotcpu.c @@ -3,7 +3,7 @@ ----------------------------------- Originally written by Michal Zalewski - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -54,7 +54,7 @@ #ifdef __linux__ #define HAVE_AFFINITY 1 -#endif /* __linux__ */ +#endif /* __linux__ */ /* Get unix time in microseconds. */ @@ -255,7 +255,7 @@ int main(int argc, char** argv) { return (util_perc > 105) + (util_perc > 130); -#endif /* ^HAVE_AFFINITY */ +#endif /* ^HAVE_AFFINITY */ } diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c index 9fd5fb01..0bd1ff2f 100644 --- a/src/afl-sharedmem.c +++ b/src/afl-sharedmem.c @@ -3,9 +3,9 @@ ------------------------------------------------- Originally written by Michal Zalewski - + Forkserver design by Jann Horn - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 649684d3..f3b6c561 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -3,9 +3,9 @@ ------------------------------------------ Originally written by Michal Zalewski - + Forkserver design by Jann Horn - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi @@ -261,7 +261,7 @@ static void run_target(char** argv) { setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ -#endif /* ^RLIMIT_AS */ +#endif /* ^RLIMIT_AS */ } diff --git a/src/afl-tmin.c b/src/afl-tmin.c index a33966a0..a501b068 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -3,9 +3,9 @@ ------------------------------------------ Originally written by Michal Zalewski - + Forkserver design by Jann Horn - + Now maintained by by Marc Heuse , Heiko Eißfeldt and Andrea Fioraldi From f7a400878a4b979513de50e50ec599a3376216af Mon Sep 17 00:00:00 2001 From: Andrea Fioraldi Date: Wed, 4 Sep 2019 10:04:35 +0200 Subject: [PATCH 79/83] fix typo in custom format --- .custom-format.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.custom-format.py b/.custom-format.py index 81adbd85..b7416843 100755 --- a/.custom-format.py +++ b/.custom-format.py @@ -54,7 +54,7 @@ def custom_format(filename): if line.startswith("#define"): in_define = True - elif "/*" in line and not line.strip().startswith("/*") and line.endswith("*/") and len(line) < (COLUMN_LIMIT-2): + if "/*" in line and not line.strip().startswith("/*") and line.endswith("*/") and len(line) < (COLUMN_LIMIT-2): cmt_start = line.rfind("/*") line = line[:cmt_start] + " " * (COLUMN_LIMIT-2 - len(line)) + line[cmt_start:] From a8d96967c426f031d61dd91287906a28975334fb Mon Sep 17 00:00:00 2001 From: van Hauser Date: Wed, 4 Sep 2019 10:32:32 +0200 Subject: [PATCH 80/83] fixed maxrss stat --- src/afl-fuzz-stats.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 7f171279..803faced 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -30,7 +30,7 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) { static double last_bcvg, last_stab, last_eps; - static struct rusage usage; + static struct rusage rus; u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); s32 fd; @@ -63,6 +63,8 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) { } + if (getrusage(RUSAGE_CHILDREN, &rus)) rus.ru_maxrss = 0; + fprintf(f, "start_time : %llu\n" "last_update : %llu\n" @@ -102,8 +104,12 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) { stability, bitmap_cvg, unique_crashes, unique_hangs, last_path_time / 1000, last_crash_time / 1000, last_hang_time / 1000, total_execs - last_crash_execs, exec_tmout, slowest_exec_ms, - (unsigned long int)usage.ru_maxrss, use_banner, - unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", +#ifdef __APPLE__ + (unsigned long int)(rus.ru_maxrss >> 20), +#else + (unsigned long int)(rus.ru_maxrss >> 10), +#endif + use_banner, unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "", no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "", persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "", From 52bfd1fc3d6c1e6610469dbddad19aacb4e7f848 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Wed, 4 Sep 2019 12:14:35 +0200 Subject: [PATCH 81/83] added man pages --- Makefile | 32 +++++++++++++++++++++++++++++--- TODO | 3 +++ afl-cmin | 5 ++++- afl-plot | 4 ++-- afl-system-config | 13 +++++++++++++ afl-whatsup | 7 +++++++ docs/ChangeLog | 5 +++-- qemu_mode/build_qemu_support.sh | 10 +++++++++- qemu_mode/libcompcov/Makefile | 6 +++--- src/README.src | 22 ++++++++++++++++++++++ src/afl-analyze.c | 7 ++++++- src/afl-fuzz.c | 7 ++++++- src/afl-gcc.c | 8 ++++++++ src/afl-gotcpu.c | 8 ++++++++ src/afl-showmap.c | 7 ++++++- src/afl-tmin.c | 7 ++++++- 16 files changed, 135 insertions(+), 16 deletions(-) create mode 100644 src/README.src diff --git a/Makefile b/Makefile index edf3d99b..455facf2 100644 --- a/Makefile +++ b/Makefile @@ -24,11 +24,13 @@ BIN_PATH = $(PREFIX)/bin HELPER_PATH = $(PREFIX)/lib/afl DOC_PATH = $(PREFIX)/share/doc/afl MISC_PATH = $(PREFIX)/share/afl +MAN_PATH = $(PREFIX)/man/man8 # PROGS intentionally omit afl-as, which gets installed elsewhere. PROGS = afl-gcc afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze SH_PROGS = afl-plot afl-cmin afl-whatsup afl-system-config +MANPAGES=$(foreach p, $(PROGS) $(SH_PROGS), $(p).8) CFLAGS ?= -O3 -funroll-loops CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I include/ \ @@ -203,13 +205,33 @@ all_done: test_build .NOTPARALLEL: clean clean: - rm -f $(PROGS) afl-as as afl-g++ afl-clang afl-clang++ *.o *~ a.out core core.[1-9][0-9]* *.stackdump test .test .test1 .test2 test-instr .test-instr0 .test-instr1 qemu_mode/qemu-3.1.0.tar.xz afl-qemu-trace afl-gcc-fast afl-gcc-pass.so afl-gcc-rt.o afl-g++-fast - rm -rf out_dir qemu_mode/qemu-3.1.0 + rm -f $(PROGS) afl-as as afl-g++ afl-clang afl-clang++ *.o *~ a.out core core.[1-9][0-9]* *.stackdump test .test .test1 .test2 test-instr .test-instr0 .test-instr1 qemu_mode/qemu-3.1.0.tar.xz afl-qemu-trace afl-gcc-fast afl-gcc-pass.so afl-gcc-rt.o afl-g++-fast *.so unicorn_mode/24f55a7973278f20f0de21b904851d99d4716263.tar.gz *.8 + rm -rf out_dir qemu_mode/qemu-3.1.0 unicorn_mode/unicorn $(MAKE) -C llvm_mode clean $(MAKE) -C libdislocator clean $(MAKE) -C libtokencap clean + $(MAKE) -C qemu_mode/libcompcov clean -install: all +%.8: % + @echo .TH $* 8 `date --iso-8601` "afl++" > $@ + @echo .SH NAME >> $@ + @echo .B $* >> $@ + @echo >> $@ + @echo .SH SYNOPSIS >> $@ + @./$* -h 2>&1 | head -n 3 | tail -n 1 | sed 's/^\.\///' >> $@ + @echo >> $@ + @echo .SH OPTIONS >> $@ + @echo .nf >> $@ + @./$* -h 2>&1 | tail -n +4 >> $@ + @echo >> $@ + @echo .SH AUTHOR >> $@ + @echo "afl++ was written by Michal \"lcamtuf\" Zalewski and is maintained by Marc \"van Hauser\" Heuse , Heiko \"hexc0der\" Eissfeldt and Andrea Fioraldi " >> $@ + @echo The homepage of afl++ is: https://github.com/vanhauser-thc/AFLplusplus >> $@ + @echo >> $@ + @echo .SH LICENSE >> $@ + @echo Apache License Version 2.0, January 2004 >> $@ + +install: all $(MANPAGES) mkdir -p -m 755 $${DESTDIR}$(BIN_PATH) $${DESTDIR}$(HELPER_PATH) $${DESTDIR}$(DOC_PATH) $${DESTDIR}$(MISC_PATH) rm -f $${DESTDIR}$(BIN_PATH)/afl-plot.sh install -m 755 $(PROGS) $(SH_PROGS) $${DESTDIR}$(BIN_PATH) @@ -226,10 +248,14 @@ endif if [ -f compare-transform-pass.so ]; then set -e; install -m 755 compare-transform-pass.so $${DESTDIR}$(HELPER_PATH); fi if [ -f split-compares-pass.so ]; then set -e; install -m 755 split-compares-pass.so $${DESTDIR}$(HELPER_PATH); fi if [ -f split-switches-pass.so ]; then set -e; install -m 755 split-switches-pass.so $${DESTDIR}$(HELPER_PATH); fi + if [ -f libcompcov.so ]; then set -e; install -m 755 libcompcov.so $${DESTDIR}$(HELPER_PATH); fi set -e; ln -sf afl-gcc $${DESTDIR}$(BIN_PATH)/afl-g++ set -e; if [ -f afl-clang-fast ] ; then ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang ; ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang++ ; else ln -sf afl-gcc $${DESTDIR}$(BIN_PATH)/afl-clang ; ln -sf afl-gcc $${DESTDIR}$(BIN_PATH)/afl-clang++; fi + mkdir -m 0755 -p $(MAN_PATH) + install -m0644 -D *.8 $(MAN_PATH) + install -m 755 afl-as $${DESTDIR}$(HELPER_PATH) ln -sf afl-as $${DESTDIR}$(HELPER_PATH)/as install -m 644 docs/README.md docs/ChangeLog docs/*.txt $${DESTDIR}$(DOC_PATH) diff --git a/TODO b/TODO index df32db84..26311713 100644 --- a/TODO +++ b/TODO @@ -4,6 +4,9 @@ Roadmap 2.53d: afl-fuzz: - custom mutator lib: example and readme +man: + - man page for afl-clang-fast + Roadmap 2.54d: ============== diff --git a/afl-cmin b/afl-cmin index a9ec4082..88635550 100755 --- a/afl-cmin +++ b/afl-cmin @@ -51,10 +51,13 @@ TIMEOUT=none unset IN_DIR OUT_DIR STDIN_FILE EXTRA_PAR MEM_LIMIT_GIVEN \ AFL_CMIN_CRASHES_ONLY AFL_CMIN_ALLOW_ANY QEMU_MODE UNICORN_MODE -while getopts "+i:o:f:m:t:eQUC" opt; do +while getopts "+i:o:f:m:t:eQUCh" opt; do case "$opt" in + "h") + ;; + "i") IN_DIR="$OPTARG" ;; diff --git a/afl-plot b/afl-plot index 25ffde64..bc86fb85 100755 --- a/afl-plot +++ b/afl-plot @@ -21,10 +21,10 @@ echo if [ ! "$#" = "2" ]; then cat 1>&2 <<_EOF_ -This program generates gnuplot images from afl-fuzz output data. Usage: - $0 afl_state_dir graph_output_dir +This program generates gnuplot images from afl-fuzz output data. Usage: + The afl_state_dir parameter should point to an existing state directory for any active or stopped instance of afl-fuzz; while graph_output_dir should point to an empty directory where this tool can write the resulting plots to. diff --git a/afl-system-config b/afl-system-config index 28793c5b..6a495f0a 100755 --- a/afl-system-config +++ b/afl-system-config @@ -1,4 +1,17 @@ #!/bin/sh +test "$1" = "-h" && { + echo afl-system-config by Marc Heuse + echo + echo $0 + echo + echo afl-system-config has no command line options + echo + echo afl-system reconfigures the system to a high performance fuzzing state + echo WARNING: this reduces the security of the system + echo + exit 1 +} + PLATFORM=`uname -s` echo This reconfigures the system to have a better fuzzing performance if [ '!' "$EUID" = 0 ] && [ '!' `id -u` = 0 ] ; then diff --git a/afl-whatsup b/afl-whatsup index c1e41529..505f7eba 100755 --- a/afl-whatsup +++ b/afl-whatsup @@ -19,6 +19,13 @@ echo "status check tool for afl-fuzz by " echo +test "$1" = "-h" && { + echo $0 + echo + echo afl-whatsup has no command line options + echo + exit 1 +} if [ "$1" = "-s" ]; then diff --git a/docs/ChangeLog b/docs/ChangeLog index 2fc4efbc..66f71a42 100644 --- a/docs/ChangeLog +++ b/docs/ChangeLog @@ -19,11 +19,12 @@ Version ++2.53d (dev): - big code refactoring: * all includes are now in include/ - * all afl sources are now in src/ - see src/README + * all afl sources are now in src/ - see src/README.src * afl-fuzz was splitted up in various individual files for including functionality in other programs (e.g. forkserver, memory map, etc.) - or better readability. + for better readability. * new code indention everywhere + - auto-generating man pages for all (main) tools - added AFL_FORCE_UI to show the UI even if the terminal is not detected - llvm 9 is now supported (still needs testing) - Android is now supported (thank to JoeyJiao!) - still need to modify the Makefile though diff --git a/qemu_mode/build_qemu_support.sh b/qemu_mode/build_qemu_support.sh index 35f5b8ca..88726be4 100755 --- a/qemu_mode/build_qemu_support.sh +++ b/qemu_mode/build_qemu_support.sh @@ -112,7 +112,8 @@ if [ "$CKSUM" = "$QEMU_SHA384" ]; then else - echo "[-] Error: signature mismatch on $ARCHIVE (perhaps download error?)." + echo "[-] Error: signature mismatch on $ARCHIVE (perhaps download error?), removing archive ..." + rm -f "$ARCHIVE" exit 1 fi @@ -200,6 +201,8 @@ if [ "$ORIG_CPU_TARGET" = "" ]; then echo "[+] Instrumentation tests passed. " echo "[+] All set, you can now use the -Q mode in afl-fuzz!" + cd qemu_mode || exit 1 + else echo "[!] Note: can't test instrumentation when CPU_TARGET set." @@ -207,4 +210,9 @@ else fi +echo "[+] Building libcompcov ..." +make -C libcompcov +echo "[+] libcompcov ready" +echo "[+] All done for qemu_mode, enjoy!" + exit 0 diff --git a/qemu_mode/libcompcov/Makefile b/qemu_mode/libcompcov/Makefile index a1f4e31f..d078ae06 100644 --- a/qemu_mode/libcompcov/Makefile +++ b/qemu_mode/libcompcov/Makefile @@ -25,18 +25,18 @@ LDFLAGS += -ldl all: libcompcov.so compcovtest libcompcov.so: libcompcov.so.c ../../config.h - $(CC) $(CFLAGS) -shared -fPIC $< -o $@ $(LDFLAGS) + $(CC) $(CFLAGS) -shared -fPIC $< -o ../../$@ $(LDFLAGS) .NOTPARALLEL: clean clean: rm -f *.o *.so *~ a.out core core.[1-9][0-9]* - rm -f libcompcov.so compcovtest + rm -f ../../libcompcov.so compcovtest compcovtest: compcovtest.cc $(CXX) $< -o $@ install: all - install -m 755 libcompcov.so $${DESTDIR}$(HELPER_PATH) + install -m 755 ../../libcompcov.so $${DESTDIR}$(HELPER_PATH) install -m 644 README.compcov $${DESTDIR}$(HELPER_PATH) diff --git a/src/README.src b/src/README.src new file mode 100644 index 00000000..244f5ddd --- /dev/null +++ b/src/README.src @@ -0,0 +1,22 @@ +Quick explanation about the files here: + +afl-analyze.c - afl-analyze binary tool +afl-as.c - afl-as binary tool +afl-gotcpu.c - afl-gotcpu binary tool +afl-showmap.c - afl-showmap binary tool +afl-tmin.c - afl-tmin binary tool +afl-fuzz.c - afl-fuzz binary tool (just main() and usage()) +afl-fuzz-bitmap.c - afl-fuzz bitmap handling +afl-fuzz-extras.c - afl-fuzz the *extra* function calls +afl-fuzz-globals.c - afl-fuzz global variables +afl-fuzz-init.c - afl-fuzz initialization +afl-fuzz-misc.c - afl-fuzz misc functions +afl-fuzz-one.c - afl-fuzz fuzzer_one big loop, this is where the mutation is happening +afl-fuzz-python.c - afl-fuzz the python mutator extension +afl-fuzz-queue.c - afl-fuzz handling the queue +afl-fuzz-run.c - afl-fuzz running the target +afl-fuzz-stats.c - afl-fuzz writing the statistics file +afl-gcc.c - afl-gcc binary tool (deprecated) +afl-common.c - common functions, used by afl-analyze, afl-fuzz, afl-showmap and afl-tmin +afl-forkserver.c - forkserver implementation, used by afl-fuzz and afl-tmin +afl-sharedmem.c - sharedmem implementation, used by afl-fuzz and afl-tmin diff --git a/src/afl-analyze.c b/src/afl-analyze.c index 357672b1..e30f53b8 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -900,7 +900,7 @@ int main(int argc, char** argv) { SAYF(cCYA "afl-analyze" VERSION cRST " by \n"); - while ((opt = getopt(argc, argv, "+i:f:m:t:eQU")) > 0) + while ((opt = getopt(argc, argv, "+i:f:m:t:eQUh")) > 0) switch (opt) { @@ -988,6 +988,11 @@ int main(int argc, char** argv) { unicorn_mode = 1; break; + + case 'h': + usage(argv[0]); + return -1; + break; default: usage(argv[0]); diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index e94116f5..eb0060a4 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -131,7 +131,7 @@ int main(int argc, char** argv) { gettimeofday(&tv, &tz); init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid(); - while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) > + while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:h")) > 0) switch (opt) { @@ -490,6 +490,11 @@ int main(int argc, char** argv) { } break; + case 'h': + usage(argv[0]); + return -1; + break; // not needed + default: usage(argv[0]); } diff --git a/src/afl-gcc.c b/src/afl-gcc.c index 2dc17baf..2f72ef34 100644 --- a/src/afl-gcc.c +++ b/src/afl-gcc.c @@ -333,6 +333,14 @@ static void edit_params(u32 argc, char** argv) { int main(int argc, char** argv) { + if (argc == 2 && strcmp(argv[1], "-h") == 0) { + printf("afl-cc" VERSION" by \n\n"); + printf("%s \n\n", argv[0]); + printf("afl-gcc has no command line options\n"); + printf("NOTE: afl-gcc is deprecated, llvm_mode is much faster and has more options\n"); + return -1; + } + if (isatty(2) && !getenv("AFL_QUIET")) { SAYF(cCYA "afl-cc" VERSION cRST " by \n"); diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c index a39659bb..85864c6f 100644 --- a/src/afl-gotcpu.c +++ b/src/afl-gotcpu.c @@ -127,6 +127,14 @@ repeat_loop: int main(int argc, char** argv) { + if (argc > 1) { + printf("afl-gotcpu" VERSION " by \n"); + printf("\n%s \n\n", argv[0]); + printf("afl-gotcpu does not have command line options\n"); + printf("afl-gotcpu prints out which CPUs are available\n"); + return -1; + } + #ifdef HAVE_AFFINITY u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), idle_cpus = 0, maybe_cpus = 0, i; diff --git a/src/afl-showmap.c b/src/afl-showmap.c index f3b6c561..6aa72746 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -563,7 +563,7 @@ int main(int argc, char** argv) { doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH; - while ((opt = getopt(argc, argv, "+o:m:t:A:eqZQUbcr")) > 0) + while ((opt = getopt(argc, argv, "+o:m:t:A:eqZQUbcrh")) > 0) switch (opt) { @@ -691,6 +691,11 @@ int main(int argc, char** argv) { if (edges_only) FATAL("-e and -r are mutually exclusive"); raw_instr_output = 1; break; + + case 'h': + usage(argv[0]); + return -1; + break; default: usage(argv[0]); diff --git a/src/afl-tmin.c b/src/afl-tmin.c index a501b068..baf22557 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -1091,7 +1091,7 @@ int main(int argc, char** argv) { SAYF(cCYA "afl-tmin" VERSION cRST " by \n"); - while ((opt = getopt(argc, argv, "+i:o:f:m:t:B:xeQU")) > 0) + while ((opt = getopt(argc, argv, "+i:o:f:m:t:B:xeQUh")) > 0) switch (opt) { @@ -1211,6 +1211,11 @@ int main(int argc, char** argv) { mask_bitmap = ck_alloc(MAP_SIZE); read_bitmap(optarg); break; + + case 'h': + usage(argv[0]); + return -1; + break; default: usage(argv[0]); From 71bf2d88268240cd93b211ad8ae9324356fcd46c Mon Sep 17 00:00:00 2001 From: van Hauser Date: Wed, 4 Sep 2019 13:15:44 +0200 Subject: [PATCH 82/83] README update --- README.md | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 14e1ae59..4b3909e2 100644 --- a/README.md +++ b/README.md @@ -9,8 +9,9 @@ Repository: [https://github.com/vanhauser-thc/AFLplusplus](https://github.com/vanhauser-thc/AFLplusplus) - afl++ is maintained by Marc Heuse , Heiko Eißfeldt - and Andrea Fioraldi . + afl++ is maintained by Marc "van Hauser" Heuse , + Heiko "hexc0der" Eißfeldt and + Andrea Fioraldi . Note that although afl now has a Google afl repository [https://github.com/Google/afl](https://github.com/Google/afl), it is unlikely to receive any noteable enhancements: [https://twitter.com/Dor3s/status/1154737061787660288](https://twitter.com/Dor3s/status/1154737061787660288) @@ -21,25 +22,28 @@ Many improvements were made over the official afl release - which did not get any improvements since November 2017. - Among others afl++ has, e.g. more performant llvm_mode, supporting + Among others afl++ has a more performant llvm_mode, supporting llvm up to version 9, Qemu 3.1, more speed and crashfixes for Qemu, - laf-intel feature for Qemu (with libcompcov), better *BSD and Android - support and more. + better *BSD and Android support and much, much more. Additionally the following patches have been integrated: * AFLfast's power schedules by Marcel Böhme: [https://github.com/mboehme/aflfast](https://github.com/mboehme/aflfast) - * C. Hollers afl-fuzz Python mutator module and llvm_mode whitelist support: [https://github.com/choller/afl](https://github.com/choller/afl) - * the new excellent MOpt mutator: [https://github.com/puppet-meteor/MOpt-AFL](https://github.com/puppet-meteor/MOpt-AFL) * instrim, a very effective CFG llvm_mode instrumentation implementation for large targets: [https://github.com/csienslab/instrim](https://github.com/csienslab/instrim) - * unicorn_mode which allows fuzzing of binaries from completely different platforms (integration provided by domenukk) + * C. Holler's afl-fuzz Python mutator module and llvm_mode whitelist support: [https://github.com/choller/afl](https://github.com/choller/afl) * Custom mutator by a library (instead of Python) by kyakdan + * unicorn_mode which allows fuzzing of binaries from completely different platforms (integration provided by domenukk) + + * laf-intel (compcov) support for llvm_mode, qemu_mode and unicorn_mode + + * neverZero patch for afl-gcc, llvm_mode, qemu_mode and unicorn_mode which prevents a wrapping map value to zero, increases coverage (by Andrea Fioraldi) + A more thorough list is available in the PATCHES file. So all in all this is the best-of AFL that is currently out there :-) From abf61ecc8f1b4ea3de59f818d859139637b29f32 Mon Sep 17 00:00:00 2001 From: van Hauser Date: Wed, 4 Sep 2019 16:15:42 +0200 Subject: [PATCH 83/83] add to docs --- docs/status_screen.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/status_screen.txt b/docs/status_screen.txt index 37a39a99..c6f9f791 100644 --- a/docs/status_screen.txt +++ b/docs/status_screen.txt @@ -407,6 +407,9 @@ directory. This includes: - variable_paths - number of test cases showing variable behavior - unique_crashes - number of unique crashes recorded - unique_hangs - number of unique hangs encountered + - command_line - full command line used for the fuzzing session + - slowest_exec_ms- real time of the slowest execution in seconds + - peak_rss_mb - max rss usage reached during fuzzing in MB Most of these map directly to the UI elements discussed earlier on.