mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-08 00:01:33 +00:00
* Output afl-clang-fast stuffs only if necessary (#1912) * afl-cc header * afl-cc common declarations - Add afl-cc-state.c - Strip includes, find_object, debug/be_quiet/have_*/callname setting from afl-cc.c - Use debugf_args in main - Modify execvp stuffs to fit new aflcc struct * afl-cc show usage * afl-cc mode selecting 1. compiler_mode by callname in argv[0] 2. compiler_mode by env "AFL_CC_COMPILER" 3. compiler_mode/instrument_mode by command line options "--afl-..." 4. instrument_mode/compiler_mode by various env vars including "AFL_LLVM_INSTRUMENT" 5. final checking steps 6. print "... - mode: %s-%s\n" 7. determine real argv[0] according to compiler_mode * afl-cc macro defs * afl-cc linking behaviors * afl-cc fsanitize behaviors * afl-cc misc * afl-cc body update * afl-cc all-in-one formated with custom-format.py * nits --------- Co-authored-by: vanhauser-thc <vh@thc.org> * changelog * update grammar mutator * lto llvm 12+ * docs(custom_mutators): fix missing ':' (#1953) * Fix broken LTO mode and response file support (#1948) * Strip `-Wl,-no-undefined` during compilation (#1952) Make the compiler wrapper stripping `-Wl,-no-undefined` in addition to `-Wl,--no-undefined`. Both versions of the flag are accepted by clang and, therefore, used by building systems in the wild (e.g., samba will not build without this fix). * Remove dead code in write_to_testcase (#1955) The custom_mutators_count check in if case is duplicate with if condition. The else case is custom_mutators_count == 0, neither custom_mutator_list iteration nor sent check needed. Signed-off-by: Xeonacid <h.dwwwwww@gmail.com> * update qemuafl * WIP: Add ability to generate drcov trace using QEMU backend (#1956) * Document new drcov QEMU plugin * Add link to lightkeeper for QEMU drcov file loading --------- Co-authored-by: Jean-Romain Garnier <jean-romain.garnier@airbus.com> * code format * changelog * sleep on uid != 0 afl-system-config * fix segv about skip_next, warn on unsupported cases of linking options (#1958) * todos * ensure afl-cc only allows available compiler modes * update grammar mutator * disable aslr on apple * fix for arm64 * help selective instrumentation * typos * macos * add compiler test script * apple fixes * bump nyx submodules (#1963) * fix docs * update changelog * update grammar mutator * improve compiler test script * gcc asan workaround (#1966) * fix github merge fuckup * fix * Fix afl-cc (#1968) - Check if too many cmdline params here, each time before insert a new param. - Check if it is "-fsanitize=..." before we do sth. - Remove improper param_st transfer. * Avoid adding llvmnative instrumentation when linking rust sanitizer runtime (#1969) * Dynamic instrumentation filtering for LLVM native (#1971) * Add two dynamic instrumentation filter methods to runtime * Always use pc-table with native pcguard * Add make_symbol_list.py and README * changelog * todos * new forkserver check * fix * nyx test for CI * improve nyx docs * Fixes to afl-cc and documentation (#1974) * Always compile with -ldl when building for CODE_COVERAGE When building with CODE_COVERAGE, the afl runtime contains code that calls `dladdr` which requires -ldl. Under most circumstances, clang already adds this (e.g. when building with pc-table), but there are some circumstances where it isn't added automatically. * Add visibility declaration to __afl_connected When building with hidden visibility, the use of __AFL_LOOP inside such code can cause linker errors due to __afl_connected being declared "hidden". * Update docs to clarify that CODE_COVERAGE=1 is required for dynamic_covfilter * nits * nyx build script updates * test error output * debug ci * debug ci * Improve afl-cc (#1975) * update response file support - full support of rsp file - fix some segv issues * Improve afl-cc - remove dead code about allow/denylist options of sancov - missing `if (!aflcc->have_msan)` - add docs for each function - typo * enable nyx * debug ci * debug ci * debug ci * debug ci * debug ci * debug ci * debug ci * debug ci * fix ci * clean test script * NO_NYX * NO_NYX * fix ci * debug ci * fix ci * finalize ci fix * Enhancement on Deterministic stage (#1972) * fuzzer: init commit based on aflpp 60dc37a8cf09f8e9048e4b6a2204d6c90b27655a * fuzzers: adding the skip variables and initialize * log: profile the det/havoc finding * log: add profile log output * fuzzers: sperate log/skipdet module * fuzzers: add quick eff_map calc * fuzzers: add skip_eff_map in fuzz_one * fuzzers: mark whole input space in eff_map * fuzzers: add undet bit threshold to skip some seeds * fuzzers: fix one byte overflow * fuzzers: fix overflow * fix code format * add havoc only again * code format * remove log to INTROSPECTION, rename skipdet module * rename skipdet module * remove log to stats * clean redundant code * code format * remove redundant code format check * remove redundant doc * remove redundant objects * clean files * change -d to default skipdet * disable deterministic when using CUSTOM_MUTATOR * revert fix * final touches for skipdet * remove unused var * remove redundant eff struct (#1977) * update QEMU-Nyx submodule (#1978) * update QEMU-Nyx submodule (#1980) * Fix type in AFL_NOOPT env variable in afl-cc help message (#1982) * nits * 2024 v4.10c release * fixes --------- Signed-off-by: Xeonacid <h.dwwwwww@gmail.com> Co-authored-by: Sonic <50692172+SonicStark@users.noreply.github.com> Co-authored-by: Xeonacid <h.dwwwwww@gmail.com> Co-authored-by: Nils Bars <nils.bars@rub.de> Co-authored-by: Jean-Romain Garnier <7504819+JRomainG@users.noreply.github.com> Co-authored-by: Jean-Romain Garnier <jean-romain.garnier@airbus.com> Co-authored-by: Sergej Schumilo <sergej@schumilo.de> Co-authored-by: Christian Holler (:decoder) <choller@mozilla.com> Co-authored-by: Han Zheng <35988108+kdsjZh@users.noreply.github.com> Co-authored-by: Khaled Yakdan <yakdan@code-intelligence.com>
404 lines
9.0 KiB
C
404 lines
9.0 KiB
C
|
|
|
|
#include "afl-fuzz.h"
|
|
|
|
void flip_range(u8 *input, u32 pos, u32 size) {
|
|
|
|
for (u32 i = 0; i < size; i++)
|
|
input[pos + i] ^= 0xFF;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#define MAX_EFF_TIMEOUT (10 * 60 * 1000)
|
|
#define MAX_DET_TIMEOUT (15 * 60 * 1000)
|
|
u8 is_det_timeout(u64 cur_ms, u8 is_flip) {
|
|
|
|
if (is_flip) {
|
|
|
|
if (unlikely(get_cur_time() - cur_ms > MAX_EFF_TIMEOUT)) return 1;
|
|
|
|
} else {
|
|
|
|
if (unlikely(get_cur_time() - cur_ms > MAX_DET_TIMEOUT)) return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* decide if the seed should be deterministically fuzzed */
|
|
|
|
u8 should_det_fuzz(afl_state_t *afl, struct queue_entry *q) {
|
|
|
|
if (!afl->skipdet_g->virgin_det_bits) {
|
|
|
|
afl->skipdet_g->virgin_det_bits =
|
|
(u8 *)ck_alloc(sizeof(u8) * afl->fsrv.map_size);
|
|
|
|
}
|
|
|
|
if (!q->favored || q->passed_det) return 0;
|
|
if (!q->trace_mini) return 0;
|
|
|
|
if (!afl->skipdet_g->last_cov_undet)
|
|
afl->skipdet_g->last_cov_undet = get_cur_time();
|
|
|
|
if (get_cur_time() - afl->skipdet_g->last_cov_undet >= THRESHOLD_DEC_TIME) {
|
|
|
|
if (afl->skipdet_g->undet_bits_threshold >= 2) {
|
|
|
|
afl->skipdet_g->undet_bits_threshold *= 0.75;
|
|
afl->skipdet_g->last_cov_undet = get_cur_time();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
u32 new_det_bits = 0;
|
|
|
|
for (u32 i = 0; i < afl->fsrv.map_size; i++) {
|
|
|
|
if (unlikely(q->trace_mini[i >> 3] & (1 << (i & 7)))) {
|
|
|
|
if (!afl->skipdet_g->virgin_det_bits[i]) { new_det_bits++; }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!afl->skipdet_g->undet_bits_threshold)
|
|
afl->skipdet_g->undet_bits_threshold = new_det_bits * 0.05;
|
|
|
|
if (new_det_bits >= afl->skipdet_g->undet_bits_threshold) {
|
|
|
|
afl->skipdet_g->last_cov_undet = get_cur_time();
|
|
q->skipdet_e->undet_bits = new_det_bits;
|
|
|
|
for (u32 i = 0; i < afl->fsrv.map_size; i++) {
|
|
|
|
if (unlikely(q->trace_mini[i >> 3] & (1 << (i & 7)))) {
|
|
|
|
if (!afl->skipdet_g->virgin_det_bits[i])
|
|
afl->skipdet_g->virgin_det_bits[i] = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/*
|
|
consists of two stages that
|
|
return 0 if exec failed.
|
|
*/
|
|
|
|
u8 skip_deterministic_stage(afl_state_t *afl, u8 *orig_buf, u8 *out_buf,
|
|
u32 len, u64 before_det_time) {
|
|
|
|
u64 orig_hit_cnt, new_hit_cnt;
|
|
|
|
if (afl->queue_cur->skipdet_e->done_eff) return 1;
|
|
|
|
if (!should_det_fuzz(afl, afl->queue_cur)) return 1;
|
|
|
|
/* Add check to make sure that for seeds without too much undet bits,
|
|
we ignore them */
|
|
|
|
/******************
|
|
* SKIP INFERENCE *
|
|
******************/
|
|
|
|
afl->stage_short = "inf";
|
|
afl->stage_name = "inference";
|
|
afl->stage_cur = 0;
|
|
orig_hit_cnt = afl->queued_items + afl->saved_crashes;
|
|
|
|
u8 *inf_eff_map = (u8 *)ck_alloc(sizeof(u8) * len);
|
|
memset(inf_eff_map, 1, sizeof(u8) * len);
|
|
|
|
if (common_fuzz_stuff(afl, orig_buf, len)) { return 0; }
|
|
|
|
u64 prev_cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
|
|
u64 _prev_cksum = prev_cksum;
|
|
|
|
if (MINIMAL_BLOCK_SIZE * 8 < len) {
|
|
|
|
// u64 size_skiped = 0, quick_skip_exec = total_execs, quick_skip_time =
|
|
// get_cur_time();
|
|
u64 pre_inf_exec = afl->fsrv.total_execs, pre_inf_time = get_cur_time();
|
|
|
|
/* if determine stage time / input size is too small, just go ahead */
|
|
|
|
u32 pos = 0, cur_block_size = MINIMAL_BLOCK_SIZE, max_block_size = len / 8;
|
|
|
|
while (pos < len - 1) {
|
|
|
|
cur_block_size = MINIMAL_BLOCK_SIZE;
|
|
|
|
while (cur_block_size < max_block_size) {
|
|
|
|
u32 flip_block_size =
|
|
(cur_block_size + pos < len) ? cur_block_size : len - 1 - pos;
|
|
|
|
afl->stage_cur += 1;
|
|
|
|
flip_range(out_buf, pos, flip_block_size);
|
|
|
|
if (common_fuzz_stuff(afl, out_buf, len)) return 0;
|
|
|
|
flip_range(out_buf, pos, flip_block_size);
|
|
|
|
u64 cksum =
|
|
hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
|
|
|
|
// printf("Now trying range %d with %d, %s.\n", pos, cur_block_size,
|
|
// (cksum == prev_cksum) ? (u8*)"Yes" : (u8*) "Not");
|
|
|
|
/* continue until we fail or exceed length */
|
|
if (cksum == _prev_cksum) {
|
|
|
|
cur_block_size *= 2;
|
|
|
|
if (cur_block_size >= len - 1 - pos) break;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (cur_block_size == MINIMAL_BLOCK_SIZE) {
|
|
|
|
/* we failed early on*/
|
|
|
|
pos += cur_block_size;
|
|
|
|
} else {
|
|
|
|
u32 cur_skip_len = (cur_block_size / 2 + pos < len)
|
|
? (cur_block_size / 2)
|
|
: (len - pos - 1);
|
|
|
|
memset(inf_eff_map + pos, 0, cur_skip_len);
|
|
|
|
afl->skipdet_g->inf_prof->inf_skipped_bytes += cur_skip_len;
|
|
|
|
pos += cur_skip_len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
afl->skipdet_g->inf_prof->inf_execs_cost +=
|
|
(afl->fsrv.total_execs - pre_inf_exec);
|
|
afl->skipdet_g->inf_prof->inf_time_cost += (get_cur_time() - pre_inf_time);
|
|
// PFATAL("Done, now have %d bytes skipped, with exec %lld, time %lld.\n",
|
|
// afl->inf_skipped_bytes, afl->inf_execs_cost, afl->inf_time_cost);
|
|
|
|
} else
|
|
|
|
memset(inf_eff_map, 1, len);
|
|
|
|
new_hit_cnt = afl->queued_items + afl->saved_crashes;
|
|
|
|
afl->stage_finds[STAGE_INF] += new_hit_cnt - orig_hit_cnt;
|
|
afl->stage_cycles[STAGE_INF] += afl->stage_cur;
|
|
|
|
/****************************
|
|
* Quick Skip Effective Map *
|
|
****************************/
|
|
|
|
/* Quick Effective Map Calculation */
|
|
|
|
afl->stage_short = "quick";
|
|
afl->stage_name = "quick eff";
|
|
afl->stage_cur = 0;
|
|
afl->stage_max = 32 * 1024;
|
|
|
|
orig_hit_cnt = afl->queued_items + afl->saved_crashes;
|
|
|
|
u32 before_skip_inf = afl->queued_items;
|
|
|
|
/* clean all the eff bytes, since previous eff bytes are already fuzzed */
|
|
u8 *skip_eff_map = afl->queue_cur->skipdet_e->skip_eff_map,
|
|
*done_inf_map = afl->queue_cur->skipdet_e->done_inf_map;
|
|
|
|
if (!skip_eff_map) {
|
|
|
|
skip_eff_map = (u8 *)ck_alloc(sizeof(u8) * len);
|
|
afl->queue_cur->skipdet_e->skip_eff_map = skip_eff_map;
|
|
|
|
} else {
|
|
|
|
memset(skip_eff_map, 0, sizeof(u8) * len);
|
|
|
|
}
|
|
|
|
/* restore the starting point */
|
|
if (!done_inf_map) {
|
|
|
|
done_inf_map = (u8 *)ck_alloc(sizeof(u8) * len);
|
|
afl->queue_cur->skipdet_e->done_inf_map = done_inf_map;
|
|
|
|
} else {
|
|
|
|
for (afl->stage_cur = 0; afl->stage_cur < len; afl->stage_cur++) {
|
|
|
|
if (done_inf_map[afl->stage_cur] == 0) break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* depending on the seed's performance, we could search eff bytes
|
|
for multiple rounds */
|
|
|
|
u8 eff_round_continue = 1, eff_round_done = 0, done_eff = 0, repeat_eff = 0,
|
|
fuzz_nearby = 0, *non_eff_bytes = 0;
|
|
|
|
u64 before_eff_execs = afl->fsrv.total_execs;
|
|
|
|
if (getenv("REPEAT_EFF")) repeat_eff = 1;
|
|
if (getenv("FUZZ_NEARBY")) fuzz_nearby = 1;
|
|
|
|
if (fuzz_nearby) {
|
|
|
|
non_eff_bytes = (u8 *)ck_alloc(sizeof(u8) * len);
|
|
|
|
// clean exec cksum
|
|
if (common_fuzz_stuff(afl, out_buf, len)) { return 0; }
|
|
prev_cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
|
|
|
|
}
|
|
|
|
do {
|
|
|
|
eff_round_continue = 0;
|
|
afl->stage_max = 32 * 1024;
|
|
|
|
for (; afl->stage_cur < afl->stage_max && afl->stage_cur < len;
|
|
++afl->stage_cur) {
|
|
|
|
afl->stage_cur_byte = afl->stage_cur;
|
|
|
|
if (!inf_eff_map[afl->stage_cur_byte] ||
|
|
skip_eff_map[afl->stage_cur_byte])
|
|
continue;
|
|
|
|
if (is_det_timeout(before_det_time, 1)) { goto cleanup_skipdet; }
|
|
|
|
u8 orig = out_buf[afl->stage_cur_byte], replace = rand_below(afl, 256);
|
|
|
|
while (replace == orig) {
|
|
|
|
replace = rand_below(afl, 256);
|
|
|
|
}
|
|
|
|
out_buf[afl->stage_cur_byte] = replace;
|
|
|
|
before_skip_inf = afl->queued_items;
|
|
|
|
if (common_fuzz_stuff(afl, out_buf, len)) { return 0; }
|
|
|
|
out_buf[afl->stage_cur_byte] = orig;
|
|
|
|
if (fuzz_nearby) {
|
|
|
|
if (prev_cksum ==
|
|
hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST)) {
|
|
|
|
non_eff_bytes[afl->stage_cur_byte] = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (afl->queued_items != before_skip_inf) {
|
|
|
|
skip_eff_map[afl->stage_cur_byte] = 1;
|
|
afl->queue_cur->skipdet_e->quick_eff_bytes += 1;
|
|
|
|
if (afl->stage_max < MAXIMUM_QUICK_EFF_EXECS) { afl->stage_max *= 2; }
|
|
|
|
if (afl->stage_max == MAXIMUM_QUICK_EFF_EXECS && repeat_eff)
|
|
eff_round_continue = 1;
|
|
|
|
}
|
|
|
|
done_inf_map[afl->stage_cur_byte] = 1;
|
|
|
|
}
|
|
|
|
afl->stage_cur = 0;
|
|
done_eff = 1;
|
|
|
|
if (++eff_round_done >= 8) break;
|
|
|
|
} while (eff_round_continue);
|
|
|
|
new_hit_cnt = afl->queued_items + afl->saved_crashes;
|
|
|
|
afl->stage_finds[STAGE_QUICK] += new_hit_cnt - orig_hit_cnt;
|
|
afl->stage_cycles[STAGE_QUICK] += (afl->fsrv.total_execs - before_eff_execs);
|
|
|
|
cleanup_skipdet:
|
|
|
|
if (fuzz_nearby) {
|
|
|
|
u8 *nearby_bytes = (u8 *)ck_alloc(sizeof(u8) * len);
|
|
|
|
u32 i = 3;
|
|
while (i < len) {
|
|
|
|
// assume DWORD size, from i - 3 -> i + 3
|
|
if (skip_eff_map[i]) {
|
|
|
|
u32 fill_length = (i + 3 < len) ? 7 : len - i + 2;
|
|
memset(nearby_bytes + i - 3, 1, fill_length);
|
|
i += 3;
|
|
|
|
} else
|
|
|
|
i += 1;
|
|
|
|
}
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
if (nearby_bytes[i] && !non_eff_bytes[i]) skip_eff_map[i] = 1;
|
|
|
|
}
|
|
|
|
ck_free(nearby_bytes);
|
|
ck_free(non_eff_bytes);
|
|
|
|
}
|
|
|
|
if (done_eff) {
|
|
|
|
afl->queue_cur->skipdet_e->continue_inf = 0;
|
|
afl->queue_cur->skipdet_e->done_eff = 1;
|
|
|
|
} else {
|
|
|
|
afl->queue_cur->skipdet_e->continue_inf = 1;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|