mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-10 17:21:33 +00:00
50% less globals
This commit is contained in:
parent
fd9587d26e
commit
51a346bcbe
@ -152,7 +152,7 @@ static const u8 *trampoline_fmt_64 =
|
||||
"/* --- END --- */\n"
|
||||
"\n";
|
||||
|
||||
static const u8*main_payload_32 =
|
||||
static const u8 *main_payload_32 =
|
||||
|
||||
"\n"
|
||||
"/* --- AFL MAIN PAYLOAD (32-BIT) --- */\n"
|
||||
@ -409,7 +409,7 @@ static const u8*main_payload_32 =
|
||||
#define CALL_L64(str) "call " str "@PLT\n"
|
||||
#endif /* ^__APPLE__ */
|
||||
|
||||
static const u8* main_payload_64 =
|
||||
static const u8 *main_payload_64 =
|
||||
|
||||
"\n"
|
||||
"/* --- AFL MAIN PAYLOAD (64-BIT) --- */\n"
|
||||
|
@ -109,6 +109,8 @@
|
||||
#define CASE_PREFIX "id_"
|
||||
#endif /* ^!SIMPLE_FILES */
|
||||
|
||||
#define STAGE_BUF_SIZE (64) /* usable size of the stage name buf in afl_state */
|
||||
|
||||
extern s8 interesting_8[INTERESTING_8_LEN];
|
||||
extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
|
||||
extern s32
|
||||
@ -479,7 +481,7 @@ typedef struct afl_state {
|
||||
*stage_short, /* Short stage name */
|
||||
*syncing_party; /* Currently syncing with... */
|
||||
|
||||
u8 stage_name_buf64[64]; /* A name buf with len 64 if needed */
|
||||
u8 stage_name_buf[STAGE_BUF_SIZE]; /* reused stagename buf with len 64 */
|
||||
|
||||
s32 stage_cur, stage_max; /* Stage progression */
|
||||
s32 splicing_with; /* Splicing with which test case? */
|
||||
@ -540,6 +542,7 @@ typedef struct afl_state {
|
||||
|
||||
/* cmplog forkserver ids */
|
||||
s32 cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd;
|
||||
u32 cmplog_prev_timed_out;
|
||||
|
||||
u8 describe_op_buf_256[256]; /* describe_op will use this to return a string
|
||||
up to 256 */
|
||||
@ -555,6 +558,20 @@ typedef struct afl_state {
|
||||
u32 document_counter;
|
||||
#endif
|
||||
|
||||
/* statis file */
|
||||
double last_bitmap_cvg, last_stability, last_eps;
|
||||
|
||||
/* plot file saves from last run */
|
||||
u32 plot_prev_qp, plot_prev_pf, plot_prev_pnf, plot_prev_ce, plot_prev_md;
|
||||
u64 plot_prev_qc, plot_prev_uc, plot_prev_uh;
|
||||
|
||||
u64 stats_last_stats_ms, stats_last_plot_ms, stats_last_ms, stats_last_execs;
|
||||
double stats_avg_exec;
|
||||
|
||||
u8 clean_trace[MAP_SIZE];
|
||||
u8 clean_trace_custom[MAP_SIZE];
|
||||
u8 first_trace[MAP_SIZE];
|
||||
|
||||
} afl_state_t;
|
||||
|
||||
/* A global pointer to all instances is needed (for now) for signals to arrive
|
||||
@ -786,7 +803,7 @@ u8 has_new_bits(afl_state_t *, u8 *);
|
||||
u8 *DI(u64);
|
||||
u8 *DF(double);
|
||||
u8 *DMS(u64);
|
||||
u8 *DTD(u64, u64);
|
||||
void DTD(u8 *, size_t, u64, u64);
|
||||
|
||||
/* Extras */
|
||||
|
||||
|
@ -62,6 +62,8 @@ typedef struct afl_forkserver {
|
||||
|
||||
u8 use_fauxsrv; /* Fauxsrv for non-forking targets? */
|
||||
|
||||
u32 prev_timed_out; /* if prev forkserver run timed out */
|
||||
|
||||
} afl_forkserver_t;
|
||||
|
||||
void handle_timeout(int sig);
|
||||
|
@ -156,6 +156,7 @@ void afl_fsrv_init(afl_forkserver_t *fsrv) {
|
||||
fsrv->out_dir_fd = -1;
|
||||
|
||||
fsrv->use_fauxsrv = 0;
|
||||
fsrv->prev_timed_out = 0;
|
||||
|
||||
list_append(&fsrv_list, fsrv);
|
||||
|
||||
@ -166,7 +167,7 @@ void afl_fsrv_init(afl_forkserver_t *fsrv) {
|
||||
|
||||
static void afl_fauxsrv_execv(afl_forkserver_t *fsrv, char **argv) {
|
||||
|
||||
static unsigned char tmp[4] = {0};
|
||||
unsigned char tmp[4] = {0};
|
||||
pid_t child_pid = -1;
|
||||
|
||||
/* Phone home and tell the parent that we're OK. If parent isn't there,
|
||||
|
@ -413,7 +413,7 @@ void minimize_bits(u8 *dst, u8 *src) {
|
||||
#ifndef SIMPLE_FILES
|
||||
|
||||
/* Construct a file name for a new test case, capturing the operation
|
||||
that led to its discovery. Uses a static buffer. */
|
||||
that led to its discovery. Returns a ptr to afl->describe_op_buf_256. */
|
||||
|
||||
u8 *describe_op(afl_state_t *afl, u8 hnb) {
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
void init_cmplog_forkserver(afl_state_t *afl) {
|
||||
|
||||
static struct timeval timeout;
|
||||
struct timeval timeout;
|
||||
int st_pipe[2], ctl_pipe[2];
|
||||
int status;
|
||||
s32 rlen;
|
||||
@ -372,12 +372,10 @@ void init_cmplog_forkserver(afl_state_t *afl) {
|
||||
|
||||
u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
|
||||
|
||||
static struct timeval it;
|
||||
static u32 prev_timed_out = 0;
|
||||
static u64 exec_ms = 0;
|
||||
|
||||
struct timeval it;
|
||||
int status = 0;
|
||||
int sret;
|
||||
u64 exec_ms;
|
||||
|
||||
u32 tb4;
|
||||
s32 res;
|
||||
@ -396,7 +394,7 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
|
||||
/* Since we always have a forkserver (or a fauxserver) running, we can simply
|
||||
tell them to have at it and read back the pid from it.*/
|
||||
|
||||
if ((res = write(afl->cmplog_fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
|
||||
if ((res = write(afl->cmplog_fsrv_ctl_fd, &afl->cmplog_prev_timed_out, 4)) != 4) {
|
||||
|
||||
if (afl->stop_soon) return 0;
|
||||
RPFATAL(res,
|
||||
@ -483,7 +481,7 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
|
||||
classify_counts((u32 *)afl->fsrv.trace_bits);
|
||||
#endif /* ^WORD_SIZE_64 */
|
||||
|
||||
prev_timed_out = afl->fsrv.child_timed_out;
|
||||
afl->cmplog_prev_timed_out = afl->fsrv.child_timed_out;
|
||||
|
||||
/* Report outcome to caller. */
|
||||
|
||||
|
@ -78,6 +78,8 @@ list_t afl_states = {.element_prealloc_count = 0};
|
||||
|
||||
void afl_state_init(afl_state_t *afl) {
|
||||
|
||||
memset(afl, 0, sizeof(afl_state_t));
|
||||
|
||||
afl->w_init = 0.9;
|
||||
afl->w_end = 0.3;
|
||||
afl->g_max = 5000;
|
||||
@ -114,6 +116,29 @@ void afl_state_init(afl_state_t *afl) {
|
||||
afl->fsrv.child_pid = -1;
|
||||
afl->fsrv.out_dir_fd = -1;
|
||||
|
||||
afl->cmplog_prev_timed_out = 0;
|
||||
|
||||
/* statis file */
|
||||
afl->last_bitmap_cvg = 0;
|
||||
afl->last_stability = 0;
|
||||
afl->last_eps = 0;
|
||||
|
||||
/* plot file saves from last run */
|
||||
afl->plot_prev_qp = 0;
|
||||
afl->plot_prev_pf = 0;
|
||||
afl->plot_prev_pnf = 0;
|
||||
afl->plot_prev_ce = 0;
|
||||
afl->plot_prev_md = 0;
|
||||
afl->plot_prev_qc = 0;
|
||||
afl->plot_prev_uc = 0;
|
||||
afl->plot_prev_uh = 0;
|
||||
|
||||
afl->stats_last_stats_ms = 0;
|
||||
afl->stats_last_plot_ms = 0;
|
||||
afl->stats_last_ms = 0;
|
||||
afl->stats_last_execs = 0;
|
||||
afl->stats_avg_exec = -1;
|
||||
|
||||
init_mopt_globals(afl);
|
||||
|
||||
list_append(&afl_states, afl);
|
||||
|
@ -797,7 +797,7 @@ void pivot_inputs(afl_state_t *afl) {
|
||||
|
||||
u32 find_start_position(afl_state_t *afl) {
|
||||
|
||||
static u8 tmp[4096]; /* Ought to be enough for anybody. */
|
||||
u8 tmp[4096] = {0}; /* Ought to be enough for anybody. */
|
||||
|
||||
u8 *fn, *off;
|
||||
s32 fd, i;
|
||||
@ -834,7 +834,7 @@ u32 find_start_position(afl_state_t *afl) {
|
||||
|
||||
void find_timeout(afl_state_t *afl) {
|
||||
|
||||
static u8 tmp[4096]; /* Ought to be enough for anybody. */
|
||||
u8 tmp[4096] = {0}; /* Ought to be enough for anybody. */
|
||||
|
||||
u8 *fn, *off;
|
||||
s32 fd, i;
|
||||
@ -902,7 +902,7 @@ static u8 delete_files(u8 *path, u8 *prefix) {
|
||||
|
||||
double get_runnable_processes(void) {
|
||||
|
||||
static double res;
|
||||
double res = 0;
|
||||
|
||||
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
|
||||
defined(__NetBSD__) || defined(__DragonFly__)
|
||||
|
@ -162,15 +162,14 @@ u8 *DMS(u64 val) {
|
||||
|
||||
}
|
||||
|
||||
/* Describe time delta. Returns one static buffer, 34 chars of less. */
|
||||
/* Describe time delta as string. */
|
||||
|
||||
u8 *DTD(u64 cur_ms, u64 event_ms) {
|
||||
void DTD(u8 *buf, size_t len, u64 cur_ms, u64 event_ms) {
|
||||
|
||||
static u8 tmp[64];
|
||||
u64 delta;
|
||||
s32 t_d, t_h, t_m, t_s;
|
||||
|
||||
if (!event_ms) return "none seen yet";
|
||||
if (!event_ms) snprintf(buf, len, "none seen yet");
|
||||
|
||||
delta = cur_ms - event_ms;
|
||||
|
||||
@ -179,8 +178,7 @@ u8 *DTD(u64 cur_ms, u64 event_ms) {
|
||||
t_m = (delta / 1000 / 60) % 60;
|
||||
t_s = (delta / 1000) % 60;
|
||||
|
||||
sprintf(tmp, "%s days, %d hrs, %d min, %d sec", DI(t_d), t_h, t_m, t_s);
|
||||
return tmp;
|
||||
snprintf(buf, len, "%s days, %d hrs, %d min, %d sec", DI(t_d), t_h, t_m, t_s);
|
||||
|
||||
}
|
||||
|
||||
|
@ -192,14 +192,11 @@ void load_custom_mutator(afl_state_t *afl, const char *fn) {
|
||||
|
||||
u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
||||
|
||||
static u8 tmp[64];
|
||||
static u8 clean_trace[MAP_SIZE];
|
||||
|
||||
u8 needs_write = 0, fault = 0;
|
||||
u32 trim_exec = 0;
|
||||
u32 orig_len = q->len;
|
||||
|
||||
afl->stage_name = tmp;
|
||||
if (afl->stage_name != afl->stage_name_buf) afl->stage_name = afl->stage_name_buf;
|
||||
afl->bytes_trim_in += q->len;
|
||||
|
||||
/* Initialize trimming in the custom mutator */
|
||||
@ -212,7 +209,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
||||
|
||||
while (afl->stage_cur < afl->stage_max) {
|
||||
|
||||
sprintf(tmp, "ptrim %s", DI(trim_exec));
|
||||
snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "ptrim %s", DI(trim_exec));
|
||||
|
||||
u32 cksum;
|
||||
|
||||
@ -251,7 +248,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
||||
if (!needs_write) {
|
||||
|
||||
needs_write = 1;
|
||||
memcpy(clean_trace, afl->fsrv.trace_bits, MAP_SIZE);
|
||||
memcpy(afl->clean_trace_custom, afl->fsrv.trace_bits, MAP_SIZE);
|
||||
|
||||
}
|
||||
|
||||
@ -299,7 +296,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
||||
ck_write(fd, in_buf, q->len, q->fname);
|
||||
close(fd);
|
||||
|
||||
memcpy(afl->fsrv.trace_bits, clean_trace, MAP_SIZE);
|
||||
memcpy(afl->fsrv.trace_bits, afl->clean_trace_custom, MAP_SIZE);
|
||||
update_bitmap_score(afl, q);
|
||||
|
||||
}
|
||||
|
@ -1679,8 +1679,8 @@ havoc_stage:
|
||||
|
||||
perf_score = orig_perf;
|
||||
|
||||
snprintf(afl->stage_name_buf64, 64, "splice %u", splice_cycle);
|
||||
afl->stage_name = afl->stage_name_buf64;
|
||||
snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "splice %u", splice_cycle);
|
||||
if (afl->stage_name != afl->stage_name_buf) afl->stage_name = afl->stage_name_buf;
|
||||
afl->stage_short = "splice";
|
||||
afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
|
||||
|
||||
@ -3573,9 +3573,9 @@ pacemaker_fuzzing:
|
||||
|
||||
perf_score = orig_perf;
|
||||
|
||||
snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat,
|
||||
snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, MOpt_globals.splice_stageformat,
|
||||
splice_cycle);
|
||||
afl->stage_name = afl->stage_name_buf64;
|
||||
if (afl->stage_name != afl->stage_name_buf) afl->stage_name = afl->stage_name_buf;
|
||||
afl->stage_short = MOpt_globals.splice_stagenameshort;
|
||||
afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
|
||||
|
||||
@ -3623,9 +3623,8 @@ pacemaker_fuzzing:
|
||||
} else {
|
||||
|
||||
perf_score = orig_perf;
|
||||
snprintf(afl->stage_name_buf64, 64, MOpt_globals.splice_stageformat,
|
||||
splice_cycle);
|
||||
afl->stage_name = afl->stage_name_buf64;
|
||||
snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, MOpt_globals.splice_stageformat, splice_cycle);
|
||||
afl->stage_name = afl->stage_name_buf;
|
||||
afl->stage_short = MOpt_globals.splice_stagenameshort;
|
||||
afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100;
|
||||
|
||||
|
@ -254,7 +254,7 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
|
||||
void cull_queue(afl_state_t *afl) {
|
||||
|
||||
struct queue_entry *q;
|
||||
static u8 temp_v[MAP_SIZE >> 3];
|
||||
u8 temp_v[MAP_SIZE >> 3];
|
||||
u32 i;
|
||||
|
||||
if (afl->dumb_mode || !afl->score_changed) return;
|
||||
|
@ -28,14 +28,7 @@
|
||||
#include <signal.h>
|
||||
|
||||
/* Execute target application, monitoring for timeouts. Return status
|
||||
information. The called program will update afl->fsrv.trace_bits[]. */
|
||||
|
||||
void timeout_handle(union sigval timer_data) {
|
||||
|
||||
pid_t child_pid = timer_data.sival_int;
|
||||
if (child_pid > 0) kill(child_pid, SIGKILL);
|
||||
|
||||
}
|
||||
information. The called program will update afl->fsrv.trace_bits. */
|
||||
|
||||
u8 run_target(afl_state_t *afl, u32 timeout) {
|
||||
|
||||
@ -44,9 +37,7 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
|
||||
|
||||
fd_set readfds;
|
||||
|
||||
static struct timeval it;
|
||||
static u32 prev_timed_out = 0;
|
||||
|
||||
struct timeval it;
|
||||
int status = 0;
|
||||
u32 tb4;
|
||||
|
||||
@ -63,7 +54,7 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
|
||||
/* we have the fork server (or faux server) up and running, so simply
|
||||
tell it to have at it, and then read back PID. */
|
||||
|
||||
if ((res = write(afl->fsrv.fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
|
||||
if ((res = write(afl->fsrv.fsrv_ctl_fd, &afl->fsrv.prev_timed_out, 4)) != 4) {
|
||||
|
||||
if (afl->stop_soon) return 0;
|
||||
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
|
||||
@ -144,7 +135,7 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
|
||||
classify_counts((u32 *)afl->fsrv.trace_bits);
|
||||
#endif /* ^WORD_SIZE_64 */
|
||||
|
||||
prev_timed_out = afl->fsrv.child_timed_out;
|
||||
afl->fsrv.prev_timed_out = afl->fsrv.child_timed_out;
|
||||
|
||||
/* Report outcome to caller. */
|
||||
|
||||
@ -299,8 +290,6 @@ static void write_with_gap(afl_state_t *afl, void *mem, u32 len, u32 skip_at,
|
||||
u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
|
||||
u32 handicap, u8 from_queue) {
|
||||
|
||||
static u8 first_trace[MAP_SIZE];
|
||||
|
||||
u8 fault = 0, new_bits = 0, var_detected = 0,
|
||||
first_run = (q->exec_cksum == 0);
|
||||
|
||||
@ -331,7 +320,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
|
||||
afl->shm.cmplog_mode)
|
||||
init_cmplog_forkserver(afl);
|
||||
|
||||
if (q->exec_cksum) memcpy(first_trace, afl->fsrv.trace_bits, MAP_SIZE);
|
||||
if (q->exec_cksum) memcpy(afl->first_trace, afl->fsrv.trace_bits, MAP_SIZE);
|
||||
|
||||
start_us = get_cur_time_us();
|
||||
|
||||
@ -372,7 +361,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
|
||||
|
||||
for (i = 0; i < MAP_SIZE; ++i) {
|
||||
|
||||
if (!afl->var_bytes[i] && first_trace[i] != afl->fsrv.trace_bits[i]) {
|
||||
if (!afl->var_bytes[i] && afl->first_trace[i] != afl->fsrv.trace_bits[i]) {
|
||||
|
||||
afl->var_bytes[i] = 1;
|
||||
afl->stage_max = CAL_CYCLES_LONG;
|
||||
@ -386,7 +375,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
|
||||
} else {
|
||||
|
||||
q->exec_cksum = cksum;
|
||||
memcpy(first_trace, afl->fsrv.trace_bits, MAP_SIZE);
|
||||
memcpy(afl->first_trace, afl->fsrv.trace_bits, MAP_SIZE);
|
||||
|
||||
}
|
||||
|
||||
@ -471,8 +460,6 @@ void sync_fuzzers(afl_state_t *afl) {
|
||||
|
||||
while ((sd_ent = readdir(sd))) {
|
||||
|
||||
static u8 stage_tmp[128];
|
||||
|
||||
DIR * qd;
|
||||
struct dirent *qd_ent;
|
||||
u8 * qd_path, *qd_synced_path;
|
||||
@ -511,8 +498,9 @@ void sync_fuzzers(afl_state_t *afl) {
|
||||
|
||||
/* Show stats */
|
||||
|
||||
sprintf(stage_tmp, "sync %u", ++sync_cnt);
|
||||
afl->stage_name = stage_tmp;
|
||||
snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "sync %u", ++sync_cnt);
|
||||
|
||||
if (afl->stage_name != afl->stage_name_buf) afl->stage_name = afl->stage_name_buf;
|
||||
afl->stage_cur = 0;
|
||||
afl->stage_max = 0;
|
||||
|
||||
@ -608,9 +596,6 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
||||
if (afl->mutator && afl->mutator->afl_custom_trim)
|
||||
return trim_case_custom(afl, q, in_buf);
|
||||
|
||||
static u8 tmp[64];
|
||||
static u8 clean_trace[MAP_SIZE];
|
||||
|
||||
u8 needs_write = 0, fault = 0;
|
||||
u32 trim_exec = 0;
|
||||
u32 remove_len;
|
||||
@ -622,7 +607,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
||||
|
||||
if (q->len < 5) return 0;
|
||||
|
||||
afl->stage_name = tmp;
|
||||
if (afl->stage_name != afl->stage_name_buf) afl->stage_name = afl->stage_name_buf;
|
||||
afl->bytes_trim_in += q->len;
|
||||
|
||||
/* Select initial chunk len, starting with large steps. */
|
||||
@ -638,7 +623,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
||||
|
||||
u32 remove_pos = remove_len;
|
||||
|
||||
sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));
|
||||
snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "trim %s/%s", DI(remove_len), DI(remove_len));
|
||||
|
||||
afl->stage_cur = 0;
|
||||
afl->stage_max = q->len / remove_len;
|
||||
@ -680,7 +665,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
||||
if (!needs_write) {
|
||||
|
||||
needs_write = 1;
|
||||
memcpy(clean_trace, afl->fsrv.trace_bits, MAP_SIZE);
|
||||
memcpy(afl->clean_trace, afl->fsrv.trace_bits, MAP_SIZE);
|
||||
|
||||
}
|
||||
|
||||
@ -722,7 +707,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
||||
ck_write(fd, in_buf, q->len, q->fname);
|
||||
close(fd);
|
||||
|
||||
memcpy(afl->fsrv.trace_bits, clean_trace, MAP_SIZE);
|
||||
memcpy(afl->fsrv.trace_bits, afl->clean_trace, MAP_SIZE);
|
||||
update_bitmap_score(afl, q);
|
||||
|
||||
}
|
||||
|
@ -30,8 +30,7 @@
|
||||
void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
|
||||
double eps) {
|
||||
|
||||
static double last_bcvg, last_stab, last_eps;
|
||||
static struct rusage rus;
|
||||
struct rusage rus;
|
||||
|
||||
u8 * fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
|
||||
s32 fd;
|
||||
@ -52,15 +51,15 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
|
||||
|
||||
if (!bitmap_cvg && !stability && !eps) {
|
||||
|
||||
bitmap_cvg = last_bcvg;
|
||||
stability = last_stab;
|
||||
eps = last_eps;
|
||||
bitmap_cvg = afl->last_bitmap_cvg;
|
||||
stability = afl->last_stability;
|
||||
eps = afl->last_eps;
|
||||
|
||||
} else {
|
||||
|
||||
last_bcvg = bitmap_cvg;
|
||||
last_stab = stability;
|
||||
last_eps = eps;
|
||||
afl->last_bitmap_cvg = bitmap_cvg;
|
||||
afl->last_stability = stability;
|
||||
afl->last_eps = eps;
|
||||
|
||||
}
|
||||
|
||||
@ -137,23 +136,24 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
|
||||
|
||||
void maybe_update_plot_file(afl_state_t *afl, double bitmap_cvg, double eps) {
|
||||
|
||||
static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
|
||||
static u64 prev_qc, prev_uc, prev_uh;
|
||||
|
||||
if (prev_qp == afl->queued_paths && prev_pf == afl->pending_favored &&
|
||||
prev_pnf == afl->pending_not_fuzzed && prev_ce == afl->current_entry &&
|
||||
prev_qc == afl->queue_cycle && prev_uc == afl->unique_crashes &&
|
||||
prev_uh == afl->unique_hangs && prev_md == afl->max_depth)
|
||||
if (afl->plot_prev_qp == afl->queued_paths &&
|
||||
afl->plot_prev_pf == afl->pending_favored &&
|
||||
afl->plot_prev_pnf == afl->pending_not_fuzzed &&
|
||||
afl->plot_prev_ce == afl->current_entry &&
|
||||
afl->plot_prev_qc == afl->queue_cycle &&
|
||||
afl->plot_prev_uc == afl->unique_crashes &&
|
||||
afl->plot_prev_uh == afl->unique_hangs &&
|
||||
afl->plot_prev_md == afl->max_depth)
|
||||
return;
|
||||
|
||||
prev_qp = afl->queued_paths;
|
||||
prev_pf = afl->pending_favored;
|
||||
prev_pnf = afl->pending_not_fuzzed;
|
||||
prev_ce = afl->current_entry;
|
||||
prev_qc = afl->queue_cycle;
|
||||
prev_uc = afl->unique_crashes;
|
||||
prev_uh = afl->unique_hangs;
|
||||
prev_md = afl->max_depth;
|
||||
afl->plot_prev_qp = afl->queued_paths;
|
||||
afl->plot_prev_pf = afl->pending_favored;
|
||||
afl->plot_prev_pnf = afl->pending_not_fuzzed;
|
||||
afl->plot_prev_ce = afl->current_entry;
|
||||
afl->plot_prev_qc = afl->queue_cycle;
|
||||
afl->plot_prev_uc = afl->unique_crashes;
|
||||
afl->plot_prev_uh = afl->unique_hangs;
|
||||
afl->plot_prev_md = afl->max_depth;
|
||||
|
||||
/* Fields in the file:
|
||||
|
||||
@ -192,8 +192,6 @@ static void check_term_size(afl_state_t *afl) {
|
||||
|
||||
void show_stats(afl_state_t *afl) {
|
||||
|
||||
static u64 last_stats_ms, last_plot_ms, last_ms, last_execs;
|
||||
static double avg_exec;
|
||||
double t_byte_ratio, stab_ratio;
|
||||
|
||||
u64 cur_ms;
|
||||
@ -201,12 +199,13 @@ void show_stats(afl_state_t *afl) {
|
||||
|
||||
u32 banner_len, banner_pad;
|
||||
u8 tmp[256];
|
||||
u8 time_tmp[64];
|
||||
|
||||
cur_ms = get_cur_time();
|
||||
|
||||
/* If not enough time has passed since last UI update, bail out. */
|
||||
|
||||
if (cur_ms - last_ms < 1000 / UI_TARGET_HZ && !afl->force_ui_update) return;
|
||||
if (cur_ms - afl->stats_last_ms < 1000 / UI_TARGET_HZ && !afl->force_ui_update) return;
|
||||
|
||||
/* Check if we're past the 10 minute mark. */
|
||||
|
||||
@ -214,31 +213,29 @@ void show_stats(afl_state_t *afl) {
|
||||
|
||||
/* Calculate smoothed exec speed stats. */
|
||||
|
||||
if (!last_execs) {
|
||||
if (!afl->stats_last_execs) {
|
||||
|
||||
avg_exec = ((double)afl->total_execs) * 1000 / (cur_ms - afl->start_time);
|
||||
afl->stats_avg_exec = ((double)afl->total_execs) * 1000 / (cur_ms - afl->start_time);
|
||||
|
||||
} else {
|
||||
|
||||
double cur_avg =
|
||||
((double)(afl->total_execs - last_execs)) * 1000 / (cur_ms - last_ms);
|
||||
double cur_avg = ((double)(afl->total_execs - afl->stats_last_execs)) * 1000 / (cur_ms - afl->stats_last_ms);
|
||||
|
||||
/* If there is a dramatic (5x+) jump in speed, reset the indicator
|
||||
more quickly. */
|
||||
|
||||
if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) avg_exec = cur_avg;
|
||||
if (cur_avg * 5 < afl->stats_avg_exec || cur_avg / 5 > afl->stats_avg_exec) afl->stats_avg_exec = cur_avg;
|
||||
|
||||
avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) +
|
||||
cur_avg * (1.0 / AVG_SMOOTHING);
|
||||
afl->stats_avg_exec = afl->stats_avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) + cur_avg * (1.0 / AVG_SMOOTHING);
|
||||
|
||||
}
|
||||
|
||||
last_ms = cur_ms;
|
||||
last_execs = afl->total_execs;
|
||||
afl->stats_last_ms = cur_ms;
|
||||
afl->stats_last_execs = afl->total_execs;
|
||||
|
||||
/* Tell the callers when to contact us (as measured in execs). */
|
||||
|
||||
afl->stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
|
||||
afl->stats_update_freq = afl->stats_avg_exec / (UI_TARGET_HZ * 10);
|
||||
if (!afl->stats_update_freq) afl->stats_update_freq = 1;
|
||||
|
||||
/* Do some bitmap stats. */
|
||||
@ -253,10 +250,10 @@ void show_stats(afl_state_t *afl) {
|
||||
|
||||
/* Roughly every minute, update fuzzer stats and save auto tokens. */
|
||||
|
||||
if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
|
||||
if (cur_ms - afl->stats_last_stats_ms > STATS_UPDATE_SEC * 1000) {
|
||||
|
||||
last_stats_ms = cur_ms;
|
||||
write_stats_file(afl, t_byte_ratio, stab_ratio, avg_exec);
|
||||
afl->stats_last_stats_ms = cur_ms;
|
||||
write_stats_file(afl, t_byte_ratio, stab_ratio, afl->stats_avg_exec);
|
||||
save_auto(afl);
|
||||
write_bitmap(afl);
|
||||
|
||||
@ -264,10 +261,10 @@ void show_stats(afl_state_t *afl) {
|
||||
|
||||
/* Every now and then, write plot data. */
|
||||
|
||||
if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) {
|
||||
if (cur_ms - afl->stats_last_plot_ms > PLOT_UPDATE_SEC * 1000) {
|
||||
|
||||
last_plot_ms = cur_ms;
|
||||
maybe_update_plot_file(afl, t_byte_ratio, avg_exec);
|
||||
afl->stats_last_plot_ms = cur_ms;
|
||||
maybe_update_plot_file(afl, t_byte_ratio, afl->stats_avg_exec);
|
||||
|
||||
}
|
||||
|
||||
@ -384,9 +381,9 @@ void show_stats(afl_state_t *afl) {
|
||||
|
||||
}
|
||||
|
||||
DTD(time_tmp, sizeof(time_tmp), cur_ms, afl->start_time);
|
||||
SAYF(bV bSTOP " run time : " cRST "%-33s " bSTG bV bSTOP
|
||||
" cycles done : %s%-5s " bSTG bV "\n",
|
||||
DTD(cur_ms, afl->start_time), tmp, DI(afl->queue_cycle - 1));
|
||||
" cycles done : %s%-5s " bSTG bV "\n", time_tmp, tmp, DI(afl->queue_cycle - 1));
|
||||
|
||||
/* We want to warn people about not seeing new paths after a full cycle,
|
||||
except when resuming fuzzing or running in non-instrumented mode. */
|
||||
@ -395,8 +392,8 @@ void show_stats(afl_state_t *afl) {
|
||||
(afl->last_path_time || afl->resuming_fuzz || afl->queue_cycle == 1 ||
|
||||
afl->in_bitmap || afl->crash_mode)) {
|
||||
|
||||
SAYF(bV bSTOP " last new path : " cRST "%-33s ",
|
||||
DTD(cur_ms, afl->last_path_time));
|
||||
DTD(time_tmp, sizeof(time_tmp), cur_ms, afl->last_path_time);
|
||||
SAYF(bV bSTOP " last new path : " cRST "%-33s ", time_tmp);
|
||||
|
||||
} else {
|
||||
|
||||
@ -421,17 +418,16 @@ void show_stats(afl_state_t *afl) {
|
||||
sprintf(tmp, "%s%s", DI(afl->unique_crashes),
|
||||
(afl->unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
|
||||
|
||||
DTD(time_tmp, sizeof(time_tmp), cur_ms, afl->last_crash_time);
|
||||
SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP
|
||||
" uniq crashes : %s%-6s" bSTG bV "\n",
|
||||
DTD(cur_ms, afl->last_crash_time), afl->unique_crashes ? cLRD : cRST,
|
||||
tmp);
|
||||
" uniq crashes : %s%-6s" bSTG bV "\n", time_tmp, afl->unique_crashes ? cLRD : cRST, tmp);
|
||||
|
||||
sprintf(tmp, "%s%s", DI(afl->unique_hangs),
|
||||
(afl->unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
|
||||
|
||||
DTD(time_tmp, sizeof(time_tmp), cur_ms, afl->last_hang_time);
|
||||
SAYF(bV bSTOP " last uniq hang : " cRST "%-33s " bSTG bV bSTOP
|
||||
" uniq hangs : " cRST "%-6s" bSTG bV "\n",
|
||||
DTD(cur_ms, afl->last_hang_time), tmp);
|
||||
" uniq hangs : " cRST "%-6s" bSTG bV "\n", time_tmp, tmp);
|
||||
|
||||
SAYF(bVR bH bSTOP cCYA
|
||||
" cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
|
||||
@ -515,23 +511,22 @@ void show_stats(afl_state_t *afl) {
|
||||
|
||||
/* Show a warning about slow execution. */
|
||||
|
||||
if (avg_exec < 100) {
|
||||
if (afl->stats_avg_exec < 100) {
|
||||
|
||||
sprintf(tmp, "%s/sec (%s)", DF(avg_exec),
|
||||
avg_exec < 20 ? "zzzz..." : "slow!");
|
||||
sprintf(tmp, "%s/sec (%s)", DF(afl->stats_avg_exec),
|
||||
afl->stats_avg_exec < 20 ? "zzzz..." : "slow!");
|
||||
|
||||
SAYF(bV bSTOP " exec speed : " cLRD "%-20s ", tmp);
|
||||
|
||||
} else {
|
||||
|
||||
sprintf(tmp, "%s/sec", DF(avg_exec));
|
||||
sprintf(tmp, "%s/sec", DF(afl->stats_avg_exec));
|
||||
SAYF(bV bSTOP " exec speed : " cRST "%-20s ", tmp);
|
||||
|
||||
}
|
||||
|
||||
sprintf(tmp, "%s (%s%s unique)", DI(afl->total_tmouts),
|
||||
DI(afl->unique_tmouts),
|
||||
(afl->unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
|
||||
DI(afl->unique_tmouts), (afl->unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
|
||||
|
||||
SAYF(bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp);
|
||||
|
||||
|
@ -263,8 +263,7 @@ static void write_to_testcase(afl_forkserver_t *fsrv, void *mem, u32 len) {
|
||||
static u8 run_target_forkserver(afl_forkserver_t *fsrv, char **argv, u8 *mem,
|
||||
u32 len) {
|
||||
|
||||
static struct itimerval it;
|
||||
static u32 prev_timed_out = 0;
|
||||
struct itimerval it;
|
||||
int status = 0;
|
||||
|
||||
memset(fsrv->trace_bits, 0, MAP_SIZE);
|
||||
@ -277,7 +276,7 @@ static u8 run_target_forkserver(afl_forkserver_t *fsrv, char **argv, u8 *mem,
|
||||
/* we have the fork server up and running, so simply
|
||||
tell it to have at it, and then read back PID. */
|
||||
|
||||
if ((res = write(fsrv->fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
|
||||
if ((res = write(fsrv->fsrv_ctl_fd, &fsrv->prev_timed_out, 4)) != 4) {
|
||||
|
||||
if (stop_soon) return 0;
|
||||
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
|
||||
|
@ -398,8 +398,7 @@ static void init_forkserver(char **argv) {
|
||||
static u8 run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len,
|
||||
u8 first_run) {
|
||||
|
||||
static struct itimerval it;
|
||||
static u32 prev_timed_out = 0;
|
||||
struct itimerval it;
|
||||
int status = 0;
|
||||
|
||||
u32 cksum;
|
||||
@ -416,7 +415,7 @@ static u8 run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len,
|
||||
/* we have the fork server up and running, so simply
|
||||
tell it to have at it, and then read back PID. */
|
||||
|
||||
if ((res = write(fsrv->fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
|
||||
if ((res = write(fsrv->fsrv_ctl_fd, &fsrv->prev_timed_out, 4)) != 4) {
|
||||
|
||||
if (stop_soon) return 0;
|
||||
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
|
||||
|
Loading…
x
Reference in New Issue
Block a user