Enhancement on Deterministic stage (#1972)

* fuzzer: init commit based on aflpp 60dc37a8cf

* fuzzers: adding the skip variables and initialize

* log: profile the det/havoc finding

* log: add profile log output

* fuzzers: sperate log/skipdet module

* fuzzers: add quick eff_map calc

* fuzzers: add skip_eff_map in fuzz_one

* fuzzers: mark whole input space in eff_map

* fuzzers: add undet bit threshold to skip some seeds

* fuzzers: fix one byte overflow

* fuzzers: fix overflow

* fix code format

* add havoc only again

* code format

* remove log to INTROSPECTION, rename skipdet module

* rename skipdet module

* remove log to stats

* clean redundant code

* code format

* remove redundant code format check

* remove redundant doc

* remove redundant objects

* clean files

* change -d to default skipdet

* disable deterministic when using CUSTOM_MUTATOR

* revert fix
This commit is contained in:
Han Zheng
2024-02-01 15:13:21 +01:00
committed by GitHub
parent 37d2039211
commit 06f0982f0f
10 changed files with 682 additions and 51 deletions

View File

@ -545,12 +545,37 @@ u8 fuzz_one_original(afl_state_t *afl) {
}
u64 before_det_time = get_cur_time();
#ifdef INTROSPECTION
u64 before_havoc_time;
u32 before_det_findings = afl->queued_items,
before_det_edges = count_non_255_bytes(afl, afl->virgin_bits),
before_havoc_findings, before_havoc_edges;
u8 is_logged = 0;
#endif
if (!afl->skip_deterministic) {
if (!skip_deterministic_stage(afl, in_buf, out_buf, len, before_det_time)) {
goto abandon_entry;
}
}
u8 *skip_eff_map = afl->queue_cur->skipdet_e->skip_eff_map;
/* Skip right away if -d is given, if it has not been chosen sufficiently
often to warrant the expensive deterministic stage (fuzz_level), or
if it has gone through deterministic testing in earlier, resumed runs
(passed_det). */
/* if skipdet decide to skip the seed or no interesting bytes found,
we skip the whole deterministic stage as well */
if (likely(afl->skip_deterministic) || likely(afl->queue_cur->passed_det) ||
likely(!afl->queue_cur->skipdet_e->quick_eff_bytes) ||
likely(perf_score <
(afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100
? afl->queue_cur->depth * 30
@ -609,6 +634,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
afl->stage_cur_byte = afl->stage_cur >> 3;
if (!skip_eff_map[afl->stage_cur_byte]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
FLIP_BIT(out_buf, afl->stage_cur);
#ifdef INTROSPECTION
@ -725,6 +754,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
afl->stage_cur_byte = afl->stage_cur >> 3;
if (!skip_eff_map[afl->stage_cur_byte]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
FLIP_BIT(out_buf, afl->stage_cur);
FLIP_BIT(out_buf, afl->stage_cur + 1);
@ -760,6 +793,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
afl->stage_cur_byte = afl->stage_cur >> 3;
if (!skip_eff_map[afl->stage_cur_byte]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
FLIP_BIT(out_buf, afl->stage_cur);
FLIP_BIT(out_buf, afl->stage_cur + 1);
FLIP_BIT(out_buf, afl->stage_cur + 2);
@ -828,6 +865,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
afl->stage_cur_byte = afl->stage_cur;
if (!skip_eff_map[afl->stage_cur_byte]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
out_buf[afl->stage_cur] ^= 0xFF;
#ifdef INTROSPECTION
@ -837,37 +878,6 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; }
/* We also use this stage to pull off a simple trick: we identify
bytes that seem to have no effect on the current execution path
even when fully flipped - and we skip them during more expensive
deterministic stages, such as arithmetics or known ints. */
if (!eff_map[EFF_APOS(afl->stage_cur)]) {
u64 cksum;
/* If in non-instrumented mode or if the file is very short, just flag
everything without wasting time on checksums. */
if (!afl->non_instrumented_mode && len >= EFF_MIN_LEN) {
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
} else {
cksum = ~prev_cksum;
}
if (cksum != prev_cksum) {
eff_map[EFF_APOS(afl->stage_cur)] = 1;
++eff_cnt;
}
}
out_buf[afl->stage_cur] ^= 0xFF;
}
@ -876,18 +886,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
whole thing as worth fuzzing, since we wouldn't be saving much time
anyway. */
if (eff_cnt != (u32)EFF_ALEN(len) &&
eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
memset(eff_map, 1, EFF_ALEN(len));
afl->blocks_eff_select += EFF_ALEN(len);
} else {
afl->blocks_eff_select += eff_cnt;
}
memset(eff_map, 1, EFF_ALEN(len));
afl->blocks_eff_select += EFF_ALEN(len);
afl->blocks_eff_total += EFF_ALEN(len);
@ -921,6 +921,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
*(u16 *)(out_buf + i) ^= 0xFFFF;
@ -967,6 +971,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
*(u32 *)(out_buf + i) ^= 0xFFFFFFFF;
@ -1023,6 +1031,10 @@ skip_bitflip:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 1; j <= ARITH_MAX; ++j) {
@ -1110,6 +1122,10 @@ skip_bitflip:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 1; j <= ARITH_MAX; ++j) {
@ -1244,6 +1260,10 @@ skip_bitflip:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 1; j <= ARITH_MAX; ++j) {
@ -1381,6 +1401,10 @@ skip_arith:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < (u32)sizeof(interesting_8); ++j) {
@ -1444,6 +1468,10 @@ skip_arith:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
@ -1536,6 +1564,10 @@ skip_arith:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
@ -1626,6 +1658,10 @@ skip_interest:
u32 last_len = 0;
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
/* Extras are sorted by size, from smallest to largest. This means
@ -1693,6 +1729,10 @@ skip_interest:
for (i = 0; i <= (u32)len; ++i) {
if (!skip_eff_map[i % len]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < afl->extras_cnt; ++j) {
@ -1755,6 +1795,10 @@ skip_user_extras:
u32 last_len = 0;
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
u32 min_extra_len = MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS);
@ -1813,6 +1857,10 @@ skip_user_extras:
for (i = 0; i <= (u32)len; ++i) {
if (!skip_eff_map[i % len]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < afl->a_extras_cnt; ++j) {
@ -2020,6 +2068,19 @@ custom_mutator_stage:
havoc_stage:
#ifdef INTROSPECTION
if (!is_logged) {
is_logged = 1;
before_havoc_findings = afl->queued_items;
before_havoc_edges = count_non_255_bytes(afl, afl->virgin_bits);
before_havoc_time = get_cur_time();
}
#endif
if (unlikely(afl->custom_only)) {
/* Force UI update */
@ -3430,6 +3491,25 @@ retry_splicing:
ret_val = 0;
#ifdef INTROSPECTION
afl->havoc_prof->queued_det_stage =
before_havoc_findings - before_det_findings;
afl->havoc_prof->queued_havoc_stage =
afl->queued_items - before_havoc_findings;
afl->havoc_prof->total_queued_det += afl->havoc_prof->queued_det_stage;
afl->havoc_prof->edge_det_stage = before_havoc_edges - before_det_edges;
afl->havoc_prof->edge_havoc_stage =
count_non_255_bytes(afl, afl->virgin_bits) - before_havoc_edges;
afl->havoc_prof->total_det_edge += afl->havoc_prof->edge_det_stage;
afl->havoc_prof->det_stage_time = before_havoc_time - before_det_time;
afl->havoc_prof->havoc_stage_time = get_cur_time() - before_havoc_time;
afl->havoc_prof->total_det_time += afl->havoc_prof->det_stage_time;
plot_profile_data(afl, afl->queue_cur);
#endif
/* we are through with this queue entry - for this iteration */
abandon_entry: