Enhancement on Deterministic stage (#1972)

* fuzzer: init commit based on aflpp 60dc37a8cf09f8e9048e4b6a2204d6c90b27655a

* fuzzers: adding the skip variables and initialize

* log: profile the det/havoc finding

* log: add profile log output

* fuzzers: sperate log/skipdet module

* fuzzers: add quick eff_map calc

* fuzzers: add skip_eff_map in fuzz_one

* fuzzers: mark whole input space in eff_map

* fuzzers: add undet bit threshold to skip some seeds

* fuzzers: fix one byte overflow

* fuzzers: fix overflow

* fix code format

* add havoc only again

* code format

* remove log to INTROSPECTION, rename skipdet module

* rename skipdet module

* remove log to stats

* clean redundant code

* code format

* remove redundant code format check

* remove redundant doc

* remove redundant objects

* clean files

* change -d to default skipdet

* disable deterministic when using CUSTOM_MUTATOR

* revert fix
This commit is contained in:
Han Zheng 2024-02-01 15:13:21 +01:00 committed by GitHub
parent 37d2039211
commit 06f0982f0f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 682 additions and 51 deletions

View File

@ -149,6 +149,48 @@ struct tainted {
};
struct inf_profile {
u32 inf_skipped_bytes; /* Inference Stage Profiling */
u64 inf_execs_cost, inf_time_cost;
};
/* ToDo: add cmplog profile as well */
struct havoc_profile {
u32 queued_det_stage, /* Det/Havoc Stage Profiling */
queued_havoc_stage, total_queued_det, edge_det_stage, edge_havoc_stage,
total_det_edge;
u64 det_stage_time, havoc_stage_time, total_det_time;
};
struct skipdet_entry {
u8 continue_inf, done_eff;
u32 undet_bits, quick_eff_bytes;
u8 *skip_eff_map, /* we'v finish the eff_map */
*done_inf_map; /* some bytes are not done yet */
};
struct skipdet_global {
u8 use_skip_havoc;
u32 undet_bits_threshold;
u64 last_cov_undet;
u8 *virgin_det_bits; /* global fuzzed bits */
struct inf_profile *inf_prof;
};
struct queue_entry {
u8 *fname; /* File name for the test case */
@ -203,6 +245,8 @@ struct queue_entry {
struct queue_entry *mother; /* queue entry this based on */
struct skipdet_entry *skipdet_e;
};
struct extra_data {
@ -247,6 +291,8 @@ enum {
/* 19 */ STAGE_CUSTOM_MUTATOR,
/* 20 */ STAGE_COLORIZATION,
/* 21 */ STAGE_ITS,
/* 22 */ STAGE_INF,
/* 23 */ STAGE_QUICK,
STAGE_NUM_MAX
@ -782,6 +828,11 @@ typedef struct afl_state {
* is too large) */
struct queue_entry **q_testcase_cache;
/* Global Profile Data for deterministic/havoc-splice stage */
struct havoc_profile *havoc_prof;
struct skipdet_global *skipdet_g;
#ifdef INTROSPECTION
char mutation[8072];
char m_tmp[4096];
@ -1232,6 +1283,13 @@ AFL_RAND_RETURN rand_next(afl_state_t *afl);
/* probability between 0.0 and 1.0 */
double rand_next_percent(afl_state_t *afl);
/* SkipDet Functions */
u8 skip_deterministic_stage(afl_state_t *, u8 *, u8 *, u32, u64);
u8 is_det_timeout(u64, u8);
void plot_profile_data(afl_state_t *, struct queue_entry *);
/**** Inline routines ****/
/* Generate a random number (from 0 to limit - 1). This may

View File

@ -52,6 +52,18 @@
/* Default file permission umode when creating files (default: 0600) */
#define DEFAULT_PERMISSION 0600
/* SkipDet's global configuration */
#define MINIMAL_BLOCK_SIZE 64
#define SMALL_DET_TIME (60 * 1000 * 1000U)
#define MAXIMUM_INF_EXECS (16 * 1024U)
#define MAXIMUM_QUICK_EFF_EXECS (64 * 1024U)
#define THRESHOLD_DEC_TIME (20 * 60 * 1000U)
/* Set the Prob of selecting eff_bytes 3 times more than original,
Now disabled */
#define EFF_HAVOC_RATE 3
/* CMPLOG/REDQUEEN TUNING
*
* Here you can modify tuning and solving options for CMPLOG.

View File

@ -126,7 +126,8 @@ typedef struct afl_forkserver {
u8 *out_file, /* File to fuzz, if any */
*target_path; /* Path of the target */
FILE *plot_file; /* Gnuplot output file */
FILE *plot_file, /* Gnuplot output file */
*det_plot_file;
/* Note: last_run_timed_out is u32 to send it to the child as 4 byte array */
u32 last_run_timed_out; /* Traced process timed out? */

View File

@ -2236,6 +2236,21 @@ void setup_dirs_fds(afl_state_t *afl) {
fflush(afl->fsrv.plot_file);
#ifdef INTROSPECTION
tmp = alloc_printf("%s/plot_det_data", afl->out_dir);
int fd = open(tmp, O_WRONLY | O_CREAT, DEFAULT_PERMISSION);
if (fd < 0) { PFATAL("Unable to create '%s'", tmp); }
ck_free(tmp);
afl->fsrv.det_plot_file = fdopen(fd, "w");
if (!afl->fsrv.det_plot_file) { PFATAL("fdopen() failed"); }
if (afl->in_place_resume) { fseek(afl->fsrv.det_plot_file, 0, SEEK_END); }
#endif
/* ignore errors */
}

View File

@ -545,12 +545,37 @@ u8 fuzz_one_original(afl_state_t *afl) {
}
u64 before_det_time = get_cur_time();
#ifdef INTROSPECTION
u64 before_havoc_time;
u32 before_det_findings = afl->queued_items,
before_det_edges = count_non_255_bytes(afl, afl->virgin_bits),
before_havoc_findings, before_havoc_edges;
u8 is_logged = 0;
#endif
if (!afl->skip_deterministic) {
if (!skip_deterministic_stage(afl, in_buf, out_buf, len, before_det_time)) {
goto abandon_entry;
}
}
u8 *skip_eff_map = afl->queue_cur->skipdet_e->skip_eff_map;
/* Skip right away if -d is given, if it has not been chosen sufficiently
often to warrant the expensive deterministic stage (fuzz_level), or
if it has gone through deterministic testing in earlier, resumed runs
(passed_det). */
/* if skipdet decide to skip the seed or no interesting bytes found,
we skip the whole deterministic stage as well */
if (likely(afl->skip_deterministic) || likely(afl->queue_cur->passed_det) ||
likely(!afl->queue_cur->skipdet_e->quick_eff_bytes) ||
likely(perf_score <
(afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100
? afl->queue_cur->depth * 30
@ -609,6 +634,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
afl->stage_cur_byte = afl->stage_cur >> 3;
if (!skip_eff_map[afl->stage_cur_byte]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
FLIP_BIT(out_buf, afl->stage_cur);
#ifdef INTROSPECTION
@ -725,6 +754,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
afl->stage_cur_byte = afl->stage_cur >> 3;
if (!skip_eff_map[afl->stage_cur_byte]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
FLIP_BIT(out_buf, afl->stage_cur);
FLIP_BIT(out_buf, afl->stage_cur + 1);
@ -760,6 +793,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
afl->stage_cur_byte = afl->stage_cur >> 3;
if (!skip_eff_map[afl->stage_cur_byte]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
FLIP_BIT(out_buf, afl->stage_cur);
FLIP_BIT(out_buf, afl->stage_cur + 1);
FLIP_BIT(out_buf, afl->stage_cur + 2);
@ -828,6 +865,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
afl->stage_cur_byte = afl->stage_cur;
if (!skip_eff_map[afl->stage_cur_byte]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
out_buf[afl->stage_cur] ^= 0xFF;
#ifdef INTROSPECTION
@ -837,37 +878,6 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (common_fuzz_stuff(afl, out_buf, len)) { goto abandon_entry; }
/* We also use this stage to pull off a simple trick: we identify
bytes that seem to have no effect on the current execution path
even when fully flipped - and we skip them during more expensive
deterministic stages, such as arithmetics or known ints. */
if (!eff_map[EFF_APOS(afl->stage_cur)]) {
u64 cksum;
/* If in non-instrumented mode or if the file is very short, just flag
everything without wasting time on checksums. */
if (!afl->non_instrumented_mode && len >= EFF_MIN_LEN) {
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
} else {
cksum = ~prev_cksum;
}
if (cksum != prev_cksum) {
eff_map[EFF_APOS(afl->stage_cur)] = 1;
++eff_cnt;
}
}
out_buf[afl->stage_cur] ^= 0xFF;
}
@ -876,18 +886,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
whole thing as worth fuzzing, since we wouldn't be saving much time
anyway. */
if (eff_cnt != (u32)EFF_ALEN(len) &&
eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) {
memset(eff_map, 1, EFF_ALEN(len));
afl->blocks_eff_select += EFF_ALEN(len);
} else {
afl->blocks_eff_select += eff_cnt;
}
memset(eff_map, 1, EFF_ALEN(len));
afl->blocks_eff_select += EFF_ALEN(len);
afl->blocks_eff_total += EFF_ALEN(len);
@ -921,6 +921,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
*(u16 *)(out_buf + i) ^= 0xFFFF;
@ -967,6 +971,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
*(u32 *)(out_buf + i) ^= 0xFFFFFFFF;
@ -1023,6 +1031,10 @@ skip_bitflip:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 1; j <= ARITH_MAX; ++j) {
@ -1110,6 +1122,10 @@ skip_bitflip:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 1; j <= ARITH_MAX; ++j) {
@ -1244,6 +1260,10 @@ skip_bitflip:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 1; j <= ARITH_MAX; ++j) {
@ -1381,6 +1401,10 @@ skip_arith:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < (u32)sizeof(interesting_8); ++j) {
@ -1444,6 +1468,10 @@ skip_arith:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < sizeof(interesting_16) / 2; ++j) {
@ -1536,6 +1564,10 @@ skip_arith:
}
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < sizeof(interesting_32) / 4; ++j) {
@ -1626,6 +1658,10 @@ skip_interest:
u32 last_len = 0;
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
/* Extras are sorted by size, from smallest to largest. This means
@ -1693,6 +1729,10 @@ skip_interest:
for (i = 0; i <= (u32)len; ++i) {
if (!skip_eff_map[i % len]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < afl->extras_cnt; ++j) {
@ -1755,6 +1795,10 @@ skip_user_extras:
u32 last_len = 0;
if (!skip_eff_map[i]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
u32 min_extra_len = MIN(afl->a_extras_cnt, (u32)USE_AUTO_EXTRAS);
@ -1813,6 +1857,10 @@ skip_user_extras:
for (i = 0; i <= (u32)len; ++i) {
if (!skip_eff_map[i % len]) continue;
if (is_det_timeout(before_det_time, 0)) { goto custom_mutator_stage; }
afl->stage_cur_byte = i;
for (j = 0; j < afl->a_extras_cnt; ++j) {
@ -2020,6 +2068,19 @@ custom_mutator_stage:
havoc_stage:
#ifdef INTROSPECTION
if (!is_logged) {
is_logged = 1;
before_havoc_findings = afl->queued_items;
before_havoc_edges = count_non_255_bytes(afl, afl->virgin_bits);
before_havoc_time = get_cur_time();
}
#endif
if (unlikely(afl->custom_only)) {
/* Force UI update */
@ -3430,6 +3491,25 @@ retry_splicing:
ret_val = 0;
#ifdef INTROSPECTION
afl->havoc_prof->queued_det_stage =
before_havoc_findings - before_det_findings;
afl->havoc_prof->queued_havoc_stage =
afl->queued_items - before_havoc_findings;
afl->havoc_prof->total_queued_det += afl->havoc_prof->queued_det_stage;
afl->havoc_prof->edge_det_stage = before_havoc_edges - before_det_edges;
afl->havoc_prof->edge_havoc_stage =
count_non_255_bytes(afl, afl->virgin_bits) - before_havoc_edges;
afl->havoc_prof->total_det_edge += afl->havoc_prof->edge_det_stage;
afl->havoc_prof->det_stage_time = before_havoc_time - before_det_time;
afl->havoc_prof->havoc_stage_time = get_cur_time() - before_havoc_time;
afl->havoc_prof->total_det_time += afl->havoc_prof->det_stage_time;
plot_profile_data(afl, afl->queue_cur);
#endif
/* we are through with this queue entry - for this iteration */
abandon_entry:

View File

@ -664,6 +664,8 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
}
q->skipdet_e = (struct skipdet_entry *)ck_alloc(sizeof(struct skipdet_entry));
}
/* Destroy the entire queue. */
@ -679,6 +681,15 @@ void destroy_queue(afl_state_t *afl) {
q = afl->queue_buf[i];
ck_free(q->fname);
ck_free(q->trace_mini);
if (q->skipdet_e) {
if (q->skipdet_e->done_inf_map) ck_free(q->skipdet_e->done_inf_map);
if (q->skipdet_e->skip_eff_map) ck_free(q->skipdet_e->skip_eff_map);
ck_free(q->skipdet_e);
}
ck_free(q);
}

403
src/afl-fuzz-skipdet.c Normal file
View File

@ -0,0 +1,403 @@
#include "afl-fuzz.h"
void flip_range(u8 *input, u32 pos, u32 size) {
for (u32 i = 0; i < size; i++)
input[pos + i] ^= 0xFF;
return;
}
#define MAX_EFF_TIMEOUT (10 * 60 * 1000)
#define MAX_DET_TIMEOUT (15 * 60 * 1000)
u8 is_det_timeout(u64 cur_ms, u8 is_flip) {
if (is_flip) {
if (unlikely(get_cur_time() - cur_ms > MAX_EFF_TIMEOUT)) return 1;
} else {
if (unlikely(get_cur_time() - cur_ms > MAX_DET_TIMEOUT)) return 1;
}
return 0;
}
/* decide if the seed should be deterministically fuzzed */
u8 should_det_fuzz(afl_state_t *afl, struct queue_entry *q) {
if (!afl->skipdet_g->virgin_det_bits) {
afl->skipdet_g->virgin_det_bits =
(u8 *)ck_alloc(sizeof(u8) * afl->fsrv.map_size);
}
if (!q->favored || q->passed_det) return 0;
if (!q->trace_mini) return 0;
if (!afl->skipdet_g->last_cov_undet)
afl->skipdet_g->last_cov_undet = get_cur_time();
if (get_cur_time() - afl->skipdet_g->last_cov_undet >= THRESHOLD_DEC_TIME) {
if (afl->skipdet_g->undet_bits_threshold >= 2) {
afl->skipdet_g->undet_bits_threshold *= 0.75;
afl->skipdet_g->last_cov_undet = get_cur_time();
}
}
u32 new_det_bits = 0;
for (u32 i = 0; i < afl->fsrv.map_size; i++) {
if (unlikely(q->trace_mini[i >> 3] & (1 << (i & 7)))) {
if (!afl->skipdet_g->virgin_det_bits[i]) { new_det_bits++; }
}
}
if (!afl->skipdet_g->undet_bits_threshold)
afl->skipdet_g->undet_bits_threshold = new_det_bits * 0.05;
if (new_det_bits >= afl->skipdet_g->undet_bits_threshold) {
afl->skipdet_g->last_cov_undet = get_cur_time();
q->skipdet_e->undet_bits = new_det_bits;
for (u32 i = 0; i < afl->fsrv.map_size; i++) {
if (unlikely(q->trace_mini[i >> 3] & (1 << (i & 7)))) {
if (!afl->skipdet_g->virgin_det_bits[i])
afl->skipdet_g->virgin_det_bits[i] = 1;
}
}
return 1;
}
return 0;
}
/*
consists of two stages that
return 0 if exec failed.
*/
u8 skip_deterministic_stage(afl_state_t *afl, u8 *orig_buf, u8 *out_buf,
u32 len, u64 before_det_time) {
u64 orig_hit_cnt, new_hit_cnt;
if (afl->queue_cur->skipdet_e->done_eff) return 1;
if (!should_det_fuzz(afl, afl->queue_cur)) return 1;
/* Add check to make sure that for seeds without too much undet bits,
we ignore them */
/******************
* SKIP INFERENCE *
******************/
afl->stage_short = "inf";
afl->stage_name = "inference";
afl->stage_cur = 0;
orig_hit_cnt = afl->queued_items + afl->saved_crashes;
u8 *inf_eff_map = (u8 *)ck_alloc(sizeof(u8) * len);
memset(inf_eff_map, 1, sizeof(u8) * len);
if (common_fuzz_stuff(afl, orig_buf, len)) { return 0; }
u64 prev_cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
u64 _prev_cksum = prev_cksum;
if (MINIMAL_BLOCK_SIZE * 8 < len) {
// u64 size_skiped = 0, quick_skip_exec = total_execs, quick_skip_time =
// get_cur_time();
u64 pre_inf_exec = afl->fsrv.total_execs, pre_inf_time = get_cur_time();
/* if determine stage time / input size is too small, just go ahead */
u32 pos = 0, cur_block_size = MINIMAL_BLOCK_SIZE, max_block_size = len / 8;
while (pos < len - 1) {
cur_block_size = MINIMAL_BLOCK_SIZE;
while (cur_block_size < max_block_size) {
u32 flip_block_size =
(cur_block_size + pos < len) ? cur_block_size : len - 1 - pos;
afl->stage_cur += 1;
flip_range(out_buf, pos, flip_block_size);
if (common_fuzz_stuff(afl, out_buf, len)) return 0;
flip_range(out_buf, pos, flip_block_size);
u64 cksum =
hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
// printf("Now trying range %d with %d, %s.\n", pos, cur_block_size,
// (cksum == prev_cksum) ? (u8*)"Yes" : (u8*) "Not");
/* continue until we fail or exceed length */
if (cksum == _prev_cksum) {
cur_block_size *= 2;
if (cur_block_size >= len - 1 - pos) break;
} else {
break;
}
}
if (cur_block_size == MINIMAL_BLOCK_SIZE) {
/* we failed early on*/
pos += cur_block_size;
} else {
u32 cur_skip_len = (cur_block_size / 2 + pos < len)
? (cur_block_size / 2)
: (len - pos - 1);
memset(inf_eff_map + pos, 0, cur_skip_len);
afl->skipdet_g->inf_prof->inf_skipped_bytes += cur_skip_len;
pos += cur_skip_len;
}
}
afl->skipdet_g->inf_prof->inf_execs_cost +=
(afl->fsrv.total_execs - pre_inf_exec);
afl->skipdet_g->inf_prof->inf_time_cost += (get_cur_time() - pre_inf_time);
// PFATAL("Done, now have %d bytes skipped, with exec %lld, time %lld.\n",
// afl->inf_skipped_bytes, afl->inf_execs_cost, afl->inf_time_cost);
} else
memset(inf_eff_map, 1, len);
new_hit_cnt = afl->queued_items + afl->saved_crashes;
afl->stage_finds[STAGE_INF] += new_hit_cnt - orig_hit_cnt;
afl->stage_cycles[STAGE_INF] += afl->stage_cur;
/****************************
* Quick Skip Effective Map *
****************************/
/* Quick Effective Map Calculation */
afl->stage_short = "quick";
afl->stage_name = "quick eff";
afl->stage_cur = 0;
afl->stage_max = 32 * 1024;
orig_hit_cnt = afl->queued_items + afl->saved_crashes;
u32 before_skip_inf = afl->queued_items;
/* clean all the eff bytes, since previous eff bytes are already fuzzed */
u8 *skip_eff_map = afl->queue_cur->skipdet_e->skip_eff_map,
*done_inf_map = afl->queue_cur->skipdet_e->done_inf_map;
if (!skip_eff_map) {
skip_eff_map = (u8 *)ck_alloc(sizeof(u8) * len);
afl->queue_cur->skipdet_e->skip_eff_map = skip_eff_map;
} else {
memset(skip_eff_map, 0, sizeof(u8) * len);
}
/* restore the starting point */
if (!done_inf_map) {
done_inf_map = (u8 *)ck_alloc(sizeof(u8) * len);
afl->queue_cur->skipdet_e->done_inf_map = done_inf_map;
} else {
for (afl->stage_cur = 0; afl->stage_cur < len; afl->stage_cur++) {
if (done_inf_map[afl->stage_cur] == 0) break;
}
}
/* depending on the seed's performance, we could search eff bytes
for multiple rounds */
u8 eff_round_continue = 1, eff_round_done = 0, done_eff = 0, repeat_eff = 0,
fuzz_nearby = 0, *non_eff_bytes = 0;
u64 before_eff_execs = afl->fsrv.total_execs;
if (getenv("REPEAT_EFF")) repeat_eff = 1;
if (getenv("FUZZ_NEARBY")) fuzz_nearby = 1;
if (fuzz_nearby) {
non_eff_bytes = (u8 *)ck_alloc(sizeof(u8) * len);
// clean exec cksum
if (common_fuzz_stuff(afl, out_buf, len)) { return 0; }
prev_cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
}
do {
eff_round_continue = 0;
afl->stage_max = 32 * 1024;
for (; afl->stage_cur < afl->stage_max && afl->stage_cur < len;
++afl->stage_cur) {
afl->stage_cur_byte = afl->stage_cur;
if (!inf_eff_map[afl->stage_cur_byte] ||
skip_eff_map[afl->stage_cur_byte])
continue;
if (is_det_timeout(before_det_time, 1)) { goto cleanup_skipdet; }
u8 orig = out_buf[afl->stage_cur_byte], replace = rand_below(afl, 256);
while (replace == orig) {
replace = rand_below(afl, 256);
}
out_buf[afl->stage_cur_byte] = replace;
before_skip_inf = afl->queued_items;
if (common_fuzz_stuff(afl, out_buf, len)) { return 0; }
out_buf[afl->stage_cur_byte] = orig;
if (fuzz_nearby) {
if (prev_cksum ==
hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST)) {
non_eff_bytes[afl->stage_cur_byte] = 1;
}
}
if (afl->queued_items != before_skip_inf) {
skip_eff_map[afl->stage_cur_byte] = 1;
afl->queue_cur->skipdet_e->quick_eff_bytes += 1;
if (afl->stage_max < MAXIMUM_QUICK_EFF_EXECS) { afl->stage_max *= 2; }
if (afl->stage_max == MAXIMUM_QUICK_EFF_EXECS && repeat_eff)
eff_round_continue = 1;
}
done_inf_map[afl->stage_cur_byte] = 1;
}
afl->stage_cur = 0;
done_eff = 1;
if (++eff_round_done >= 8) break;
} while (eff_round_continue);
new_hit_cnt = afl->queued_items + afl->saved_crashes;
afl->stage_finds[STAGE_QUICK] += new_hit_cnt - orig_hit_cnt;
afl->stage_cycles[STAGE_QUICK] += (afl->fsrv.total_execs - before_eff_execs);
cleanup_skipdet:
if (fuzz_nearby) {
u8 *nearby_bytes = (u8 *)ck_alloc(sizeof(u8) * len);
u32 i = 3;
while (i < len) {
// assume DWORD size, from i - 3 -> i + 3
if (skip_eff_map[i]) {
u32 fill_length = (i + 3 < len) ? 7 : len - i + 2;
memset(nearby_bytes + i - 3, 1, fill_length);
i += 3;
} else
i += 1;
}
for (i = 0; i < len; i++) {
if (nearby_bytes[i] && !non_eff_bytes[i]) skip_eff_map[i] = 1;
}
ck_free(nearby_bytes);
ck_free(non_eff_bytes);
}
if (done_eff) {
afl->queue_cur->skipdet_e->continue_inf = 0;
afl->queue_cur->skipdet_e->done_eff = 1;
} else {
afl->queue_cur->skipdet_e->continue_inf = 1;
}
return 1;
}

View File

@ -102,7 +102,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
afl->stats_update_freq = 1;
afl->stats_file_update_freq_msecs = STATS_UPDATE_SEC * 1000;
afl->stats_avg_exec = 0;
afl->skip_deterministic = 1;
afl->skip_deterministic = 0;
afl->sync_time = SYNC_TIME;
afl->cmplog_lvl = 2;
afl->min_length = 1;
@ -140,6 +140,14 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
afl->fsrv.child_pid = -1;
afl->fsrv.out_dir_fd = -1;
/* Init SkipDet */
afl->skipdet_g =
(struct skipdet_global *)ck_alloc(sizeof(struct skipdet_global));
afl->skipdet_g->inf_prof =
(struct inf_profile *)ck_alloc(sizeof(struct inf_profile));
afl->havoc_prof =
(struct havoc_profile *)ck_alloc(sizeof(struct havoc_profile));
init_mopt_globals(afl);
list_append(&afl_states, afl);

View File

@ -502,6 +502,44 @@ void maybe_update_plot_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
}
/* Log deterministic stage efficiency */
void plot_profile_data(afl_state_t *afl, struct queue_entry *q) {
u64 current_ms = get_cur_time() - afl->start_time;
u32 current_edges = count_non_255_bytes(afl, afl->virgin_bits);
double det_finding_rate = (double)afl->havoc_prof->total_det_edge * 100.0 /
(double)current_edges,
det_time_rate = (double)afl->havoc_prof->total_det_time * 100.0 /
(double)current_ms;
u32 ndet_bits = 0;
for (u32 i = 0; i < afl->fsrv.map_size; i++) {
if (afl->skipdet_g->virgin_det_bits[i]) ndet_bits += 1;
}
double det_fuzzed_rate = (double)ndet_bits * 100.0 / (double)current_edges;
fprintf(afl->fsrv.det_plot_file,
"[%02lld:%02lld:%02lld] fuzz %d (%d), find %d/%d among %d(%02.2f) "
"and spend %lld/%lld(%02.2f), cover %02.2f yet, %d/%d undet bits, "
"continue %d.\n",
current_ms / 1000 / 3600, (current_ms / 1000 / 60) % 60,
(current_ms / 1000) % 60, afl->current_entry, q->fuzz_level,
afl->havoc_prof->edge_det_stage, afl->havoc_prof->edge_havoc_stage,
current_edges, det_finding_rate,
afl->havoc_prof->det_stage_time / 1000,
afl->havoc_prof->havoc_stage_time / 1000, det_time_rate,
det_fuzzed_rate, q->skipdet_e->undet_bits,
afl->skipdet_g->undet_bits_threshold, q->skipdet_e->continue_inf);
fflush(afl->fsrv.det_plot_file);
}
/* Check terminal dimensions after resize. */
static void check_term_size(afl_state_t *afl) {

View File

@ -955,16 +955,16 @@ int main(int argc, char **argv_orig, char **envp) {
break;
case 'D': /* enforce deterministic */
afl->skip_deterministic = 0;
break;
case 'd': /* skip deterministic */
case 'D': /* no deterministic */
afl->skip_deterministic = 1;
break;
case 'd': /* partial deterministic */
afl->skip_deterministic = 0;
break;
case 'B': /* load bitmap */
/* This is a secret undocumented option! It is useful if you find
@ -3031,6 +3031,11 @@ stop_fuzzing:
if (frida_afl_preload) { ck_free(frida_afl_preload); }
fclose(afl->fsrv.plot_file);
#ifdef INTROSPECTION
fclose(afl->fsrv.det_plot_file);
#endif
destroy_queue(afl);
destroy_extras(afl);
destroy_custom_mutators(afl);