afl-fuzz.c completely splitted

This commit is contained in:
Andrea Fioraldi
2019-09-02 18:41:27 +02:00
parent 1652831f1d
commit e9d968e060
9 changed files with 9795 additions and 9656 deletions

View File

@ -80,6 +80,11 @@
# define HAVE_AFFINITY 1
#endif /* __linux__ */
#ifndef SIMPLE_FILES
# define CASE_PREFIX "id:"
#else
# define CASE_PREFIX "id_"
#endif /* ^!SIMPLE_FILES */
struct queue_entry {
@ -455,16 +460,15 @@ extern PyObject *py_functions[PY_FUNC_COUNT];
/**** Prototypes ****/
/* Python stuff */
/* Python */
#ifdef USE_PYTHON
int init_py();
void finalize_py();
void fuzz_py(char*, size_t, char*, size_t, char**, size_t*);
u32 init_trim_py(char*, size_t);
u32 post_trim_py(char);
void trim_py(char**, size_t*);
u8 trim_case_python(char**, struct queue_entry*, u8*);
#endif
/* Queue */
@ -476,6 +480,7 @@ void add_to_queue(u8*, u32, u8);
void destroy_queue(void);
void update_bitmap_score(struct queue_entry*);
void cull_queue(void);
u32 calculate_score(struct queue_entry*);
/* Bitmap */
@ -494,6 +499,10 @@ void classify_counts(u32*);
#endif
void init_count_class16(void);
void minimize_bits(u8*, u8*);
#ifndef SIMPLE_FILES
u8* describe_op(u8);
#endif
u8 save_if_interesting(char**, void*, u32, u8);
/* Misc */
@ -511,6 +520,61 @@ void save_auto(void);
void load_auto(void);
void destroy_extras(void);
/* Stats */
void write_stats_file(double, double, double);
void maybe_update_plot_file(double, double);
void show_stats(void);
void show_init_stats(void);
/* Run */
u8 run_target(char**, u32);
void write_to_testcase(void*, u32);
void write_with_gap(void*, u32, u32, u32);
u8 calibrate_case(char**, struct queue_entry*, u8*, u32, u8);
void sync_fuzzers(char**);
u8 trim_case(char**, struct queue_entry*, u8*);
u8 common_fuzz_stuff(char**, u8*, u32);
/* Fuzz one */
u8 fuzz_one_original(char**);
static u8 pilot_fuzzing(char**);
u8 core_fuzzing(char**);
void pso_updating(void);
u8 fuzz_one(char**);
/* Init */
#ifdef HAVE_AFFINITY
void bind_to_free_cpu(void);
#endif
void setup_post(void);
void setup_custom_mutator(void);
void read_testcases(void);
void perform_dry_run(char**);
void pivot_inputs(void);
u32 find_start_position(void);
void find_timeout(void);
double get_runnable_processes(void);
void nuke_resume_dir(void);
void maybe_delete_out_dir(void);
void setup_dirs_fds(void);
void setup_cmdline_file(char**);
void setup_stdio_file(void);
void check_crash_handling(void);
void check_cpu_governor(void);
void get_core_count(void);
void fix_up_sync(void);
void check_asan_opts(void);
void check_binary(u8*);
void fix_up_banner(u8*);
void check_if_tty(void);
void setup_signal_handlers(void);
char** get_qemu_argv(u8*, char**, int);
void save_cmdline(u32, char**);
/**** Inline routines ****/
/* Generate a random number (from 0 to limit - 1). This may

View File

@ -408,3 +408,296 @@ void minimize_bits(u8* dst, u8* src) {
}
#ifndef SIMPLE_FILES
/* Construct a file name for a new test case, capturing the operation
that led to its discovery. Uses a static buffer. */
u8* describe_op(u8 hnb) {
static u8 ret[256];
if (syncing_party) {
sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case);
} else {
sprintf(ret, "src:%06u", current_entry);
sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time);
if (splicing_with >= 0)
sprintf(ret + strlen(ret), "+%06d", splicing_with);
sprintf(ret + strlen(ret), ",op:%s", stage_short);
if (stage_cur_byte >= 0) {
sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte);
if (stage_val_type != STAGE_VAL_NONE)
sprintf(ret + strlen(ret), ",val:%s%+d",
(stage_val_type == STAGE_VAL_BE) ? "be:" : "",
stage_cur_val);
} else sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val);
}
if (hnb == 2) strcat(ret, ",+cov");
return ret;
}
#endif /* !SIMPLE_FILES */
/* Write a message accompanying the crash directory :-) */
static void write_crash_readme(void) {
u8* fn = alloc_printf("%s/crashes/README.txt", out_dir);
s32 fd;
FILE* f;
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
ck_free(fn);
/* Do not die on errors here - that would be impolite. */
if (fd < 0) return;
f = fdopen(fd, "w");
if (!f) {
close(fd);
return;
}
fprintf(f, "Command line used to find this crash:\n\n"
"%s\n\n"
"If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n"
"memory limit. The limit used for this fuzzing session was %s.\n\n"
"Need a tool to minimize test cases before investigating the crashes or sending\n"
"them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
"Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n"
"an mail at <afl-users@googlegroups.com> once the issues are fixed\n\n"
" https://github.com/vanhauser-thc/AFLplusplus\n\n",
orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */
fclose(f);
}
/* Check if the result of an execve() during routine fuzzing is interesting,
save or queue the input test case for further analysis if so. Returns 1 if
entry is saved, 0 otherwise. */
u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
if (len == 0) return 0;
u8 *fn = "";
u8 hnb;
s32 fd;
u8 keeping = 0, res;
/* Update path frequency. */
u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
struct queue_entry* q = queue;
while (q) {
if (q->exec_cksum == cksum)
q->n_fuzz = q->n_fuzz + 1;
q = q->next;
}
if (fault == crash_mode) {
/* Keep only if there are new bits in the map, add to queue for
future fuzzing, etc. */
if (!(hnb = has_new_bits(virgin_bits))) {
if (crash_mode) ++total_crashes;
return 0;
}
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths,
describe_op(hnb));
#else
fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths);
#endif /* ^!SIMPLE_FILES */
add_to_queue(fn, len, 0);
if (hnb == 2) {
queue_top->has_new_cov = 1;
++queued_with_cov;
}
queue_top->exec_cksum = cksum;
/* Try to calibrate inline; this also calls update_bitmap_score() when
successful. */
res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
if (res == FAULT_ERROR)
FATAL("Unable to execute target application");
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
ck_write(fd, mem, len, fn);
close(fd);
keeping = 1;
}
switch (fault) {
case FAULT_TMOUT:
/* Timeouts are not very interesting, but we're still obliged to keep
a handful of samples. We use the presence of new bits in the
hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we
just keep everything. */
++total_tmouts;
if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
if (!dumb_mode) {
#ifdef __x86_64__
simplify_trace((u64*)trace_bits);
#else
simplify_trace((u32*)trace_bits);
#endif /* ^__x86_64__ */
if (!has_new_bits(virgin_tmout)) return keeping;
}
++unique_tmouts;
/* Before saving, we make sure that it's a genuine hang by re-running
the target with a more generous timeout (unless the default timeout
is already generous). */
if (exec_tmout < hang_tmout) {
u8 new_fault;
write_to_testcase(mem, len);
new_fault = run_target(argv, hang_tmout);
/* A corner case that one user reported bumping into: increasing the
timeout actually uncovers a crash. Make sure we don't discard it if
so. */
if (!stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash;
if (stop_soon || new_fault != FAULT_TMOUT) return keeping;
}
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir,
unique_hangs, describe_op(0));
#else
fn = alloc_printf("%s/hangs/id_%06llu", out_dir,
unique_hangs);
#endif /* ^!SIMPLE_FILES */
++unique_hangs;
last_hang_time = get_cur_time();
break;
case FAULT_CRASH:
keep_as_crash:
/* This is handled in a manner roughly similar to timeouts,
except for slightly different limits and no need to re-run test
cases. */
++total_crashes;
if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
if (!dumb_mode) {
#ifdef __x86_64__
simplify_trace((u64*)trace_bits);
#else
simplify_trace((u32*)trace_bits);
#endif /* ^__x86_64__ */
if (!has_new_bits(virgin_crash)) return keeping;
}
if (!unique_crashes) write_crash_readme();
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir,
unique_crashes, kill_signal, describe_op(0));
#else
fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes,
kill_signal);
#endif /* ^!SIMPLE_FILES */
++unique_crashes;
last_crash_time = get_cur_time();
last_crash_execs = total_execs;
break;
case FAULT_ERROR: FATAL("Unable to execute target application");
default: return keeping;
}
/* If we're here, we apparently want to save the crash or hang
test case, too. */
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
ck_write(fd, mem, len, fn);
close(fd);
ck_free(fn);
return keeping;
}

1943
src/afl-fuzz-init.c Normal file

File diff suppressed because it is too large Load Diff

5719
src/afl-fuzz-one.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -220,4 +220,110 @@ void trim_py(char** ret, size_t* retlen) {
}
}
u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
static u8 tmp[64];
static u8 clean_trace[MAP_SIZE];
u8 needs_write = 0, fault = 0;
u32 trim_exec = 0;
u32 orig_len = q->len;
stage_name = tmp;
bytes_trim_in += q->len;
/* Initialize trimming in the Python module */
stage_cur = 0;
stage_max = init_trim_py(in_buf, q->len);
if (not_on_tty && debug)
SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, q->len);
while(stage_cur < stage_max) {
sprintf(tmp, "ptrim %s", DI(trim_exec));
u32 cksum;
char* retbuf = NULL;
size_t retlen = 0;
trim_py(&retbuf, &retlen);
if (retlen > orig_len)
FATAL("Trimmed data returned by Python module is larger than original data");
write_to_testcase(retbuf, retlen);
fault = run_target(argv, exec_tmout);
++trim_execs;
if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
if (cksum == q->exec_cksum) {
q->len = retlen;
memcpy(in_buf, retbuf, retlen);
/* Let's save a clean trace, which will be needed by
update_bitmap_score once we're done with the trimming stuff. */
if (!needs_write) {
needs_write = 1;
memcpy(clean_trace, trace_bits, MAP_SIZE);
}
/* Tell the Python module that the trimming was successful */
stage_cur = post_trim_py(1);
if (not_on_tty && debug)
SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", stage_cur, stage_max, q->len);
} else {
/* Tell the Python module that the trimming was unsuccessful */
stage_cur = post_trim_py(0);
if (not_on_tty && debug)
SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, stage_max);
}
/* Since this can be slow, update the screen every now and then. */
if (!(trim_exec++ % stats_update_freq)) show_stats();
}
if (not_on_tty && debug)
SAYF("[Python Trimming] DONE: %u bytes -> %u bytes", orig_len, q->len);
/* If we have made changes to in_buf, we also need to update the on-disk
version of the test case. */
if (needs_write) {
s32 fd;
unlink(q->fname); /* ignore errors */
fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", q->fname);
ck_write(fd, in_buf, q->len, q->fname);
close(fd);
memcpy(trace_bits, clean_trace, MAP_SIZE);
update_bitmap_score(q);
}
abort_trimming:
bytes_trim_out += q->len;
return fault;
}
#endif /* USE_PYTHON */

View File

@ -284,3 +284,139 @@ void cull_queue(void) {
}
/* Calculate case desirability score to adjust the length of havoc fuzzing.
A helper function for fuzz_one(). Maybe some of these constants should
go into config.h. */
u32 calculate_score(struct queue_entry* q) {
u32 avg_exec_us = total_cal_us / total_cal_cycles;
u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries;
u32 perf_score = 100;
/* Adjust score based on execution speed of this path, compared to the
global average. Multiplier ranges from 0.1x to 3x. Fast inputs are
less expensive to fuzz, so we're giving them more air time. */
// TODO BUG FIXME: is this really a good idea?
// This sounds like looking for lost keys under a street light just because
// the light is better there.
// Longer execution time means longer work on the input, the deeper in
// coverage, the better the fuzzing, right? -mh
if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10;
else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25;
else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50;
else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75;
else if (q->exec_us * 4 < avg_exec_us) perf_score = 300;
else if (q->exec_us * 3 < avg_exec_us) perf_score = 200;
else if (q->exec_us * 2 < avg_exec_us) perf_score = 150;
/* Adjust score based on bitmap size. The working theory is that better
coverage translates to better targets. Multiplier from 0.25x to 3x. */
if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3;
else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2;
else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5;
else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25;
else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5;
else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75;
/* Adjust score based on handicap. Handicap is proportional to how late
in the game we learned about this path. Latecomers are allowed to run
for a bit longer until they catch up with the rest. */
if (q->handicap >= 4) {
perf_score *= 4;
q->handicap -= 4;
} else if (q->handicap) {
perf_score *= 2;
--q->handicap;
}
/* Final adjustment based on input depth, under the assumption that fuzzing
deeper test cases is more likely to reveal stuff that can't be
discovered with traditional fuzzers. */
switch (q->depth) {
case 0 ... 3: break;
case 4 ... 7: perf_score *= 2; break;
case 8 ... 13: perf_score *= 3; break;
case 14 ... 25: perf_score *= 4; break;
default: perf_score *= 5;
}
u64 fuzz = q->n_fuzz;
u64 fuzz_total;
u32 n_paths, fuzz_mu;
u32 factor = 1;
switch (schedule) {
case EXPLORE:
break;
case EXPLOIT:
factor = MAX_FACTOR;
break;
case COE:
fuzz_total = 0;
n_paths = 0;
struct queue_entry *queue_it = queue;
while (queue_it) {
fuzz_total += queue_it->n_fuzz;
n_paths ++;
queue_it = queue_it->next;
}
fuzz_mu = fuzz_total / n_paths;
if (fuzz <= fuzz_mu) {
if (q->fuzz_level < 16)
factor = ((u32) (1 << q->fuzz_level));
else
factor = MAX_FACTOR;
} else {
factor = 0;
}
break;
case FAST:
if (q->fuzz_level < 16) {
factor = ((u32) (1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
} else
factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2 (fuzz));
break;
case LIN:
factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
break;
case QUAD:
factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
break;
default:
PFATAL ("Unknown Power Schedule");
}
if (factor > MAX_FACTOR)
factor = MAX_FACTOR;
perf_score *= factor / POWER_BETA;
// MOpt mode
if (limit_time_sig != 0 && max_depth - q->depth < 3) perf_score *= 2;
else if (perf_score < 1) perf_score = 1; // Add a lower bound to AFLFast's energy assignment strategies
/* Make sure that we don't go over limit. */
if (perf_score > havoc_max_mult * 100) perf_score = havoc_max_mult * 100;
return perf_score;
}

775
src/afl-fuzz-run.c Normal file
View File

@ -0,0 +1,775 @@
/*
american fuzzy lop - fuzzer code
--------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Forkserver design by Jann Horn <jannhorn@googlemail.com>
Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
This is the real deal: the program takes an instrumented binary and
attempts a variety of basic fuzzing tricks, paying close attention to
how they affect the execution path.
*/
#include "afl-fuzz.h"
/* Execute target application, monitoring for timeouts. Return status
information. The called program will update trace_bits[]. */
u8 run_target(char** argv, u32 timeout) {
static struct itimerval it;
static u32 prev_timed_out = 0;
static u64 exec_ms = 0;
int status = 0;
u32 tb4;
child_timed_out = 0;
/* After this memset, trace_bits[] are effectively volatile, so we
must prevent any earlier operations from venturing into that
territory. */
memset(trace_bits, 0, MAP_SIZE);
MEM_BARRIER();
/* If we're running in "dumb" mode, we can't rely on the fork server
logic compiled into the target program, so we will just keep calling
execve(). There is a bit of code duplication between here and
init_forkserver(), but c'est la vie. */
if (dumb_mode == 1 || no_forkserver) {
child_pid = fork();
if (child_pid < 0) PFATAL("fork() failed");
if (!child_pid) {
struct rlimit r;
if (mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
#ifdef RLIMIT_AS
setrlimit(RLIMIT_AS, &r); /* Ignore errors */
#else
setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
#endif /* ^RLIMIT_AS */
}
r.rlim_max = r.rlim_cur = 0;
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
/* Isolate the process and configure standard descriptors. If out_file is
specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
setsid();
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
if (out_file) {
dup2(dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
}
/* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
close(dev_null_fd);
close(out_dir_fd);
#ifndef HAVE_ARC4RANDOM
close(dev_urandom_fd);
#endif
close(fileno(plot_file));
/* Set sane defaults for ASAN if nothing else specified. */
setenv("ASAN_OPTIONS", "abort_on_error=1:"
"detect_leaks=0:"
"symbolize=0:"
"allocator_may_return_null=1", 0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"symbolize=0:"
"msan_track_origins=0", 0);
execv(target_path, argv);
/* Use a distinctive bitmap value to tell the parent about execv()
falling through. */
*(u32*)trace_bits = EXEC_FAIL_SIG;
exit(0);
}
} else {
s32 res;
/* In non-dumb mode, we have the fork server up and running, so simply
tell it to have at it, and then read back PID. */
if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
}
/* Configure timeout, as requested by user, then wait for child to terminate. */
it.it_value.tv_sec = (timeout / 1000);
it.it_value.tv_usec = (timeout % 1000) * 1000;
setitimer(ITIMER_REAL, &it, NULL);
/* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */
if (dumb_mode == 1 || no_forkserver) {
if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
} else {
s32 res;
if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to communicate with fork server (OOM?)");
}
}
if (!WIFSTOPPED(status)) child_pid = 0;
getitimer(ITIMER_REAL, &it);
exec_ms = (u64) timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms;
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
setitimer(ITIMER_REAL, &it, NULL);
++total_execs;
/* Any subsequent operations on trace_bits must not be moved by the
compiler below this point. Past this location, trace_bits[] behave
very normally and do not have to be treated as volatile. */
MEM_BARRIER();
tb4 = *(u32*)trace_bits;
#ifdef __x86_64__
classify_counts((u64*)trace_bits);
#else
classify_counts((u32*)trace_bits);
#endif /* ^__x86_64__ */
prev_timed_out = child_timed_out;
/* Report outcome to caller. */
if (WIFSIGNALED(status) && !stop_soon) {
kill_signal = WTERMSIG(status);
if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
return FAULT_CRASH;
}
/* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
must use a special exit code. */
if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
kill_signal = 0;
return FAULT_CRASH;
}
if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
return FAULT_ERROR;
return FAULT_NONE;
}
/* Write modified data to file for testing. If out_file is set, the old file
is unlinked and a new one is created. Otherwise, out_fd is rewound and
truncated. */
void write_to_testcase(void* mem, u32 len) {
s32 fd = out_fd;
if (out_file) {
unlink(out_file); /* Ignore errors. */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
} else lseek(fd, 0, SEEK_SET);
if (pre_save_handler) {
u8* new_data;
size_t new_size = pre_save_handler(mem, len, &new_data);
ck_write(fd, new_data, new_size, out_file);
} else {
ck_write(fd, mem, len, out_file);
}
if (!out_file) {
if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
} else close(fd);
}
/* The same, but with an adjustable gap. Used for trimming. */
void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
s32 fd = out_fd;
u32 tail_len = len - skip_at - skip_len;
if (out_file) {
unlink(out_file); /* Ignore errors. */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
} else lseek(fd, 0, SEEK_SET);
if (skip_at) ck_write(fd, mem, skip_at, out_file);
u8 *memu8 = mem;
if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file);
if (!out_file) {
if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
} else close(fd);
}
/* Calibrate a new test case. This is done when processing the input directory
to warn about flaky or otherwise problematic test cases early on; and when
new paths are discovered to detect variable behavior and so on. */
u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
u32 handicap, u8 from_queue) {
static u8 first_trace[MAP_SIZE];
u8 fault = 0, new_bits = 0, var_detected = 0,
first_run = (q->exec_cksum == 0);
u64 start_us, stop_us;
s32 old_sc = stage_cur, old_sm = stage_max;
u32 use_tmout = exec_tmout;
u8* old_sn = stage_name;
/* Be a bit more generous about timeouts when resuming sessions, or when
trying to calibrate already-added finds. This helps avoid trouble due
to intermittent latency. */
if (!from_queue || resuming_fuzz)
use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,
exec_tmout * CAL_TMOUT_PERC / 100);
++q->cal_failed;
stage_name = "calibration";
stage_max = fast_cal ? 3 : CAL_CYCLES;
/* Make sure the forkserver is up before we do anything, and let's not
count its spin-up time toward binary calibration. */
if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)
init_forkserver(argv);
if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
start_us = get_cur_time_us();
for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
u32 cksum;
if (!first_run && !(stage_cur % stats_update_freq)) show_stats();
write_to_testcase(use_mem, q->len);
fault = run_target(argv, use_tmout);
/* stop_soon is set by the handler for Ctrl+C. When it's pressed,
we want to bail out quickly. */
if (stop_soon || fault != crash_mode) goto abort_calibration;
if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
fault = FAULT_NOINST;
goto abort_calibration;
}
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
if (q->exec_cksum != cksum) {
u8 hnb = has_new_bits(virgin_bits);
if (hnb > new_bits) new_bits = hnb;
if (q->exec_cksum) {
u32 i;
for (i = 0; i < MAP_SIZE; ++i) {
if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
var_bytes[i] = 1;
stage_max = CAL_CYCLES_LONG;
}
}
var_detected = 1;
} else {
q->exec_cksum = cksum;
memcpy(first_trace, trace_bits, MAP_SIZE);
}
}
}
stop_us = get_cur_time_us();
total_cal_us += stop_us - start_us;
total_cal_cycles += stage_max;
/* OK, let's collect some stats about the performance of this test case.
This is used for fuzzing air time calculations in calculate_score(). */
q->exec_us = (stop_us - start_us) / stage_max;
q->bitmap_size = count_bytes(trace_bits);
q->handicap = handicap;
q->cal_failed = 0;
total_bitmap_size += q->bitmap_size;
++total_bitmap_entries;
update_bitmap_score(q);
/* If this case didn't result in new output from the instrumentation, tell
parent. This is a non-critical problem, but something to warn the user
about. */
if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
abort_calibration:
if (new_bits == 2 && !q->has_new_cov) {
q->has_new_cov = 1;
++queued_with_cov;
}
/* Mark variable paths. */
if (var_detected) {
var_byte_count = count_bytes(var_bytes);
if (!q->var_behavior) {
mark_as_variable(q);
++queued_variable;
}
}
stage_name = old_sn;
stage_cur = old_sc;
stage_max = old_sm;
if (!first_run) show_stats();
return fault;
}
/* Grab interesting test cases from other fuzzers. */
void sync_fuzzers(char** argv) {
DIR* sd;
struct dirent* sd_ent;
u32 sync_cnt = 0;
sd = opendir(sync_dir);
if (!sd) PFATAL("Unable to open '%s'", sync_dir);
stage_max = stage_cur = 0;
cur_depth = 0;
/* Look at the entries created for every other fuzzer in the sync directory. */
while ((sd_ent = readdir(sd))) {
static u8 stage_tmp[128];
DIR* qd;
struct dirent* qd_ent;
u8 *qd_path, *qd_synced_path;
u32 min_accept = 0, next_min_accept;
s32 id_fd;
/* Skip dot files and our own output directory. */
if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue;
/* Skip anything that doesn't have a queue/ subdirectory. */
qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name);
if (!(qd = opendir(qd_path))) {
ck_free(qd_path);
continue;
}
/* Retrieve the ID of the last seen test case. */
qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name);
id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600);
if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path);
if (read(id_fd, &min_accept, sizeof(u32)) > 0)
lseek(id_fd, 0, SEEK_SET);
next_min_accept = min_accept;
/* Show stats */
sprintf(stage_tmp, "sync %u", ++sync_cnt);
stage_name = stage_tmp;
stage_cur = 0;
stage_max = 0;
/* For every file queued by this fuzzer, parse ID and see if we have looked at
it before; exec a test case if not. */
while ((qd_ent = readdir(qd))) {
u8* path;
s32 fd;
struct stat st;
if (qd_ent->d_name[0] == '.' ||
sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 ||
syncing_case < min_accept) continue;
/* OK, sounds like a new one. Let's give it a try. */
if (syncing_case >= next_min_accept)
next_min_accept = syncing_case + 1;
path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
/* Allow this to fail in case the other fuzzer is resuming or so... */
fd = open(path, O_RDONLY);
if (fd < 0) {
ck_free(path);
continue;
}
if (fstat(fd, &st)) PFATAL("fstat() failed");
/* Ignore zero-sized or oversized files. */
if (st.st_size && st.st_size <= MAX_FILE) {
u8 fault;
u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (mem == MAP_FAILED) PFATAL("Unable to mmap '%s'", path);
/* See what happens. We rely on save_if_interesting() to catch major
errors and save the test case. */
write_to_testcase(mem, st.st_size);
fault = run_target(argv, exec_tmout);
if (stop_soon) return;
syncing_party = sd_ent->d_name;
queued_imported += save_if_interesting(argv, mem, st.st_size, fault);
syncing_party = 0;
munmap(mem, st.st_size);
if (!(stage_cur++ % stats_update_freq)) show_stats();
}
ck_free(path);
close(fd);
}
ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path);
close(id_fd);
closedir(qd);
ck_free(qd_path);
ck_free(qd_synced_path);
}
closedir(sd);
}
/* Trim all new test cases to save cycles when doing deterministic checks. The
trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
file size, to keep the stage short and sweet. */
u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
#ifdef USE_PYTHON
if (py_functions[PY_FUNC_TRIM])
return trim_case_python(argv, q, in_buf);
#endif
static u8 tmp[64];
static u8 clean_trace[MAP_SIZE];
u8 needs_write = 0, fault = 0;
u32 trim_exec = 0;
u32 remove_len;
u32 len_p2;
/* Although the trimmer will be less useful when variable behavior is
detected, it will still work to some extent, so we don't check for
this. */
if (q->len < 5) return 0;
stage_name = tmp;
bytes_trim_in += q->len;
/* Select initial chunk len, starting with large steps. */
len_p2 = next_p2(q->len);
remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES);
/* Continue until the number of steps gets too high or the stepover
gets too small. */
while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) {
u32 remove_pos = remove_len;
sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));
stage_cur = 0;
stage_max = q->len / remove_len;
while (remove_pos < q->len) {
u32 trim_avail = MIN(remove_len, q->len - remove_pos);
u32 cksum;
write_with_gap(in_buf, q->len, remove_pos, trim_avail);
fault = run_target(argv, exec_tmout);
++trim_execs;
if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
/* Note that we don't keep track of crashes or hangs here; maybe TODO? */
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
/* If the deletion had no impact on the trace, make it permanent. This
isn't perfect for variable-path inputs, but we're just making a
best-effort pass, so it's not a big deal if we end up with false
negatives every now and then. */
if (cksum == q->exec_cksum) {
u32 move_tail = q->len - remove_pos - trim_avail;
q->len -= trim_avail;
len_p2 = next_p2(q->len);
memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail,
move_tail);
/* Let's save a clean trace, which will be needed by
update_bitmap_score once we're done with the trimming stuff. */
if (!needs_write) {
needs_write = 1;
memcpy(clean_trace, trace_bits, MAP_SIZE);
}
} else remove_pos += remove_len;
/* Since this can be slow, update the screen every now and then. */
if (!(trim_exec++ % stats_update_freq)) show_stats();
++stage_cur;
}
remove_len >>= 1;
}
/* If we have made changes to in_buf, we also need to update the on-disk
version of the test case. */
if (needs_write) {
s32 fd;
unlink(q->fname); /* ignore errors */
fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", q->fname);
ck_write(fd, in_buf, q->len, q->fname);
close(fd);
memcpy(trace_bits, clean_trace, MAP_SIZE);
update_bitmap_score(q);
}
abort_trimming:
bytes_trim_out += q->len;
return fault;
}
/* Write a modified test case, run program, process results. Handle
error conditions, returning 1 if it's time to bail out. This is
a helper function for fuzz_one(). */
u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
u8 fault;
if (post_handler) {
out_buf = post_handler(out_buf, &len);
if (!out_buf || !len) return 0;
}
write_to_testcase(out_buf, len);
fault = run_target(argv, exec_tmout);
if (stop_soon) return 1;
if (fault == FAULT_TMOUT) {
if (subseq_tmouts++ > TMOUT_LIMIT) {
++cur_skipped_paths;
return 1;
}
} else subseq_tmouts = 0;
/* Users can hit us with SIGUSR1 to request the current input
to be abandoned. */
if (skip_requested) {
skip_requested = 0;
++cur_skipped_paths;
return 1;
}
/* This handles FAULT_ERROR for us: */
queued_discovered += save_if_interesting(argv, out_buf, len, fault);
if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
show_stats();
return 0;
}

754
src/afl-fuzz-stats.c Normal file
View File

@ -0,0 +1,754 @@
/*
american fuzzy lop - fuzzer code
--------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Forkserver design by Jann Horn <jannhorn@googlemail.com>
Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
This is the real deal: the program takes an instrumented binary and
attempts a variety of basic fuzzing tricks, paying close attention to
how they affect the execution path.
*/
#include "afl-fuzz.h"
/* Update stats file for unattended monitoring. */
void write_stats_file(double bitmap_cvg, double stability, double eps) {
static double last_bcvg, last_stab, last_eps;
static struct rusage usage;
u8* fn = alloc_printf("%s/fuzzer_stats", out_dir);
s32 fd;
FILE* f;
fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
ck_free(fn);
f = fdopen(fd, "w");
if (!f) PFATAL("fdopen() failed");
/* Keep last values in case we're called from another context
where exec/sec stats and such are not readily available. */
if (!bitmap_cvg && !stability && !eps) {
bitmap_cvg = last_bcvg;
stability = last_stab;
eps = last_eps;
} else {
last_bcvg = bitmap_cvg;
last_stab = stability;
last_eps = eps;
}
fprintf(f, "start_time : %llu\n"
"last_update : %llu\n"
"fuzzer_pid : %d\n"
"cycles_done : %llu\n"
"execs_done : %llu\n"
"execs_per_sec : %0.02f\n"
"paths_total : %u\n"
"paths_favored : %u\n"
"paths_found : %u\n"
"paths_imported : %u\n"
"max_depth : %u\n"
"cur_path : %u\n" /* Must match find_start_position() */
"pending_favs : %u\n"
"pending_total : %u\n"
"variable_paths : %u\n"
"stability : %0.02f%%\n"
"bitmap_cvg : %0.02f%%\n"
"unique_crashes : %llu\n"
"unique_hangs : %llu\n"
"last_path : %llu\n"
"last_crash : %llu\n"
"last_hang : %llu\n"
"execs_since_crash : %llu\n"
"exec_timeout : %u\n"
"slowest_exec_ms : %llu\n"
"peak_rss_mb : %lu\n"
"afl_banner : %s\n"
"afl_version : " VERSION "\n"
"target_mode : %s%s%s%s%s%s%s%s\n"
"command_line : %s\n",
start_time / 1000, get_cur_time() / 1000, getpid(),
queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps,
queued_paths, queued_favored, queued_discovered, queued_imported,
max_depth, current_entry, pending_favored, pending_not_fuzzed,
queued_variable, stability, bitmap_cvg, unique_crashes,
unique_hangs, last_path_time / 1000, last_crash_time / 1000,
last_hang_time / 1000, total_execs - last_crash_execs,
exec_tmout, slowest_exec_ms, (unsigned long int)usage.ru_maxrss, use_banner,
unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "",
no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "",
persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "",
(unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode ||
persistent_mode || deferred_mode) ? "" : "default",
orig_cmdline);
/* ignore errors */
fclose(f);
}
/* Update the plot file if there is a reason to. */
void maybe_update_plot_file(double bitmap_cvg, double eps) {
static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
static u64 prev_qc, prev_uc, prev_uh;
if (prev_qp == queued_paths && prev_pf == pending_favored &&
prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
prev_qc == queue_cycle && prev_uc == unique_crashes &&
prev_uh == unique_hangs && prev_md == max_depth) return;
prev_qp = queued_paths;
prev_pf = pending_favored;
prev_pnf = pending_not_fuzzed;
prev_ce = current_entry;
prev_qc = queue_cycle;
prev_uc = unique_crashes;
prev_uh = unique_hangs;
prev_md = max_depth;
/* Fields in the file:
unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed,
favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
execs_per_sec */
fprintf(plot_file,
"%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths,
pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes,
unique_hangs, max_depth, eps); /* ignore errors */
fflush(plot_file);
}
/* Check terminal dimensions after resize. */
static void check_term_size(void) {
struct winsize ws;
term_too_small = 0;
if (ioctl(1, TIOCGWINSZ, &ws)) return;
if (ws.ws_row == 0 || ws.ws_col == 0) return;
if (ws.ws_row < 24 || ws.ws_col < 79) term_too_small = 1;
}
/* A spiffy retro stats screen! This is called every stats_update_freq
execve() calls, plus in several other circumstances. */
void show_stats(void) {
static u64 last_stats_ms, last_plot_ms, last_ms, last_execs;
static double avg_exec;
double t_byte_ratio, stab_ratio;
u64 cur_ms;
u32 t_bytes, t_bits;
u32 banner_len, banner_pad;
u8 tmp[256];
cur_ms = get_cur_time();
/* If not enough time has passed since last UI update, bail out. */
if (cur_ms - last_ms < 1000 / UI_TARGET_HZ) return;
/* Check if we're past the 10 minute mark. */
if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1;
/* Calculate smoothed exec speed stats. */
if (!last_execs) {
avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time);
} else {
double cur_avg = ((double)(total_execs - last_execs)) * 1000 /
(cur_ms - last_ms);
/* If there is a dramatic (5x+) jump in speed, reset the indicator
more quickly. */
if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec)
avg_exec = cur_avg;
avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) +
cur_avg * (1.0 / AVG_SMOOTHING);
}
last_ms = cur_ms;
last_execs = total_execs;
/* Tell the callers when to contact us (as measured in execs). */
stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
if (!stats_update_freq) stats_update_freq = 1;
/* Do some bitmap stats. */
t_bytes = count_non_255_bytes(virgin_bits);
t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
if (t_bytes)
stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes;
else
stab_ratio = 100;
/* Roughly every minute, update fuzzer stats and save auto tokens. */
if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
last_stats_ms = cur_ms;
write_stats_file(t_byte_ratio, stab_ratio, avg_exec);
save_auto();
write_bitmap();
}
/* Every now and then, write plot data. */
if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) {
last_plot_ms = cur_ms;
maybe_update_plot_file(t_byte_ratio, avg_exec);
}
/* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed &&
getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2;
if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2;
/* If we're not on TTY, bail out. */
if (not_on_tty) return;
/* Compute some mildly useful bitmap stats. */
t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits);
/* Now, for the visuals... */
if (clear_screen) {
SAYF(TERM_CLEAR CURSOR_HIDE);
clear_screen = 0;
check_term_size();
}
SAYF(TERM_HOME);
if (term_too_small) {
SAYF(cBRI "Your terminal is too small to display the UI.\n"
"Please resize terminal window to at least 79x24.\n" cRST);
return;
}
/* Let's start by drawing a centered banner. */
banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + strlen(power_name) + 3 + 5;
banner_pad = (79 - banner_len) / 2;
memset(tmp, ' ', banner_pad);
#ifdef HAVE_AFFINITY
sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
" (%s) " cPIN "[%s]" cBLU " {%d}", crash_mode ? cPIN "peruvian were-rabbit" :
cYEL "american fuzzy lop", use_banner, power_name, cpu_aff);
#else
sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
" (%s) " cPIN "[%s]", crash_mode ? cPIN "peruvian were-rabbit" :
cYEL "american fuzzy lop", use_banner, power_name);
#endif /* HAVE_AFFINITY */
SAYF("\n%s\n", tmp);
/* "Handy" shortcuts for drawing boxes... */
#define bSTG bSTART cGRA
#define bH2 bH bH
#define bH5 bH2 bH2 bH
#define bH10 bH5 bH5
#define bH20 bH10 bH10
#define bH30 bH20 bH10
#define SP5 " "
#define SP10 SP5 SP5
#define SP20 SP10 SP10
/* Lord, forgive me this. */
SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH bHB
bH bSTOP cCYA " overall results " bSTG bH2 bH2 bRT "\n");
if (dumb_mode) {
strcpy(tmp, cRST);
} else {
u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60;
/* First queue cycle: don't stop now! */
if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else
/* Subsequent cycles, but we're still making finds. */
if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else
/* No finds for a long time and no test cases to try. */
if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
strcpy(tmp, cLGN);
/* Default: cautiously OK to stop? */
else strcpy(tmp, cLBL);
}
SAYF(bV bSTOP " run time : " cRST "%-33s " bSTG bV bSTOP
" cycles done : %s%-5s " bSTG bV "\n",
DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1));
/* We want to warn people about not seeing new paths after a full cycle,
except when resuming fuzzing or running in non-instrumented mode. */
if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 ||
in_bitmap || crash_mode)) {
SAYF(bV bSTOP " last new path : " cRST "%-33s ",
DTD(cur_ms, last_path_time));
} else {
if (dumb_mode)
SAYF(bV bSTOP " last new path : " cPIN "n/a" cRST
" (non-instrumented mode) ");
else
SAYF(bV bSTOP " last new path : " cRST "none yet " cLRD
"(odd, check syntax!) ");
}
SAYF(bSTG bV bSTOP " total paths : " cRST "%-5s " bSTG bV "\n",
DI(queued_paths));
/* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH
limit with a '+' appended to the count. */
sprintf(tmp, "%s%s", DI(unique_crashes),
(unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP
" uniq crashes : %s%-6s" bSTG bV "\n",
DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST,
tmp);
sprintf(tmp, "%s%s", DI(unique_hangs),
(unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
SAYF(bV bSTOP " last uniq hang : " cRST "%-33s " bSTG bV bSTOP
" uniq hangs : " cRST "%-6s" bSTG bV "\n",
DTD(cur_ms, last_hang_time), tmp);
SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
" map coverage " bSTG bH bHT bH20 bH2 bVL "\n");
/* This gets funny because we want to print several variable-length variables
together, but then cram them into a fixed-width field - so we need to
put them in a temporary buffer first. */
sprintf(tmp, "%s%s%u (%0.02f%%)", DI(current_entry),
queue_cur->favored ? "." : "*", queue_cur->fuzz_level,
((double)current_entry * 100) / queued_paths);
SAYF(bV bSTOP " now processing : " cRST "%-16s " bSTG bV bSTOP, tmp);
sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) *
100 / MAP_SIZE, t_byte_ratio);
SAYF(" map density : %s%-21s" bSTG bV "\n", t_byte_ratio > 70 ? cLRD :
((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp);
sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
((double)cur_skipped_paths * 100) / queued_paths);
SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp);
sprintf(tmp, "%0.02f bits/tuple",
t_bytes ? (((double)t_bits) / t_bytes) : 0);
SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp);
SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
" findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n");
sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
((double)queued_favored) * 100 / queued_paths);
/* Yeah... it's still going on... halp? */
SAYF(bV bSTOP " now trying : " cRST "%-20s " bSTG bV bSTOP
" favored paths : " cRST "%-22s" bSTG bV "\n", stage_name, tmp);
if (!stage_max) {
sprintf(tmp, "%s/-", DI(stage_cur));
} else {
sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max),
((double)stage_cur) * 100 / stage_max);
}
SAYF(bV bSTOP " stage execs : " cRST "%-20s " bSTG bV bSTOP, tmp);
sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov),
((double)queued_with_cov) * 100 / queued_paths);
SAYF(" new edges on : " cRST "%-22s" bSTG bV "\n", tmp);
sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes),
(unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
if (crash_mode) {
SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
" new crashes : %s%-22s" bSTG bV "\n", DI(total_execs),
unique_crashes ? cLRD : cRST, tmp);
} else {
SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
" total crashes : %s%-22s" bSTG bV "\n", DI(total_execs),
unique_crashes ? cLRD : cRST, tmp);
}
/* Show a warning about slow execution. */
if (avg_exec < 100) {
sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ?
"zzzz..." : "slow!");
SAYF(bV bSTOP " exec speed : " cLRD "%-20s ", tmp);
} else {
sprintf(tmp, "%s/sec", DF(avg_exec));
SAYF(bV bSTOP " exec speed : " cRST "%-20s ", tmp);
}
sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts),
(unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
SAYF (bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp);
/* Aaaalmost there... hold on! */
SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bHT bH10
bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bVL "\n");
if (skip_deterministic) {
strcpy(tmp, "n/a, n/a, n/a");
} else {
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]),
DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]),
DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4]));
}
SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP " levels : "
cRST "%-10s" bSTG bV "\n", tmp, DI(max_depth));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]),
DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]),
DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32]));
SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP " pending : "
cRST "%-10s" bSTG bV "\n", tmp, DI(pending_not_fuzzed));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]),
DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]),
DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32]));
SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP " pend fav : "
cRST "%-10s" bSTG bV "\n", tmp, DI(pending_favored));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]),
DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]),
DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32]));
SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP " own finds : "
cRST "%-10s" bSTG bV "\n", tmp, DI(queued_discovered));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]),
DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]),
DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]));
SAYF(bV bSTOP " dictionary : " cRST "%-36s " bSTG bV bSTOP
" imported : " cRST "%-10s" bSTG bV "\n", tmp,
sync_id ? DI(queued_imported) : (u8*)"n/a");
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]),
DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]),
DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON]));
SAYF(bV bSTOP " havoc : " cRST "%-36s " bSTG bV bSTOP, tmp);
if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio);
else strcpy(tmp, "n/a");
SAYF(" stability : %s%-10s" bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40)
? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20))
? cMGN : cRST), tmp);
if (!bytes_trim_out) {
sprintf(tmp, "n/a, ");
} else {
sprintf(tmp, "%0.02f%%/%s, ",
((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in,
DI(trim_execs));
}
if (!blocks_eff_total) {
u8 tmp2[128];
sprintf(tmp2, "n/a");
strcat(tmp, tmp2);
} else {
u8 tmp2[128];
sprintf(tmp2, "%0.02f%%",
((double)(blocks_eff_total - blocks_eff_select)) * 100 /
blocks_eff_total);
strcat(tmp, tmp2);
}
if (custom_mutator) {
sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n"
bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp);
} else {
SAYF(bV bSTOP " trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n"
bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, tmp);
}
/* Provide some CPU utilization stats. */
if (cpu_core_count) {
double cur_runnable = get_runnable_processes();
u32 cur_utilization = cur_runnable * 100 / cpu_core_count;
u8* cpu_color = cCYA;
/* If we could still run one or more processes, use green. */
if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count)
cpu_color = cLGN;
/* If we're clearly oversubscribed, use red. */
if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
#ifdef HAVE_AFFINITY
if (cpu_aff >= 0) {
SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST,
MIN(cpu_aff, 999), cpu_color,
MIN(cur_utilization, 999));
} else {
SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST,
cpu_color, MIN(cur_utilization, 999));
}
#else
SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST,
cpu_color, MIN(cur_utilization, 999));
#endif /* ^HAVE_AFFINITY */
} else SAYF("\r");
/* Hallelujah! */
fflush(0);
}
/* Display quick statistics at the end of processing the input directory,
plus a bunch of warnings. Some calibration stuff also ended up here,
along with several hardcoded constants. Maybe clean up eventually. */
void show_init_stats(void) {
struct queue_entry* q = queue;
u32 min_bits = 0, max_bits = 0;
u64 min_us = 0, max_us = 0;
u64 avg_us = 0;
u32 max_len = 0;
if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles;
while (q) {
if (!min_us || q->exec_us < min_us) min_us = q->exec_us;
if (q->exec_us > max_us) max_us = q->exec_us;
if (!min_bits || q->bitmap_size < min_bits) min_bits = q->bitmap_size;
if (q->bitmap_size > max_bits) max_bits = q->bitmap_size;
if (q->len > max_len) max_len = q->len;
q = q->next;
}
SAYF("\n");
if (avg_us > ((qemu_mode || unicorn_mode) ? 50000 : 10000))
WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.txt.",
doc_path);
/* Let's keep things moving with slow binaries. */
if (avg_us > 50000) havoc_div = 10; /* 0-19 execs/sec */
else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec */
else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */
if (!resuming_fuzz) {
if (max_len > 50 * 1024)
WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.txt!",
DMS(max_len), doc_path);
else if (max_len > 10 * 1024)
WARNF("Some test cases are big (%s) - see %s/perf_tips.txt.",
DMS(max_len), doc_path);
if (useless_at_start && !in_bitmap)
WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
if (queued_paths > 100)
WARNF(cLRD "You probably have far too many input files! Consider trimming down.");
else if (queued_paths > 20)
WARNF("You have lots of input files; try starting small.");
}
OKF("Here are some useful stats:\n\n"
cGRA " Test case count : " cRST "%u favored, %u variable, %u total\n"
cGRA " Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n"
cGRA " Exec timing : " cRST "%s to %s us (average: %s us)\n",
queued_favored, queued_variable, queued_paths, min_bits, max_bits,
((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1),
DI(min_us), DI(max_us), DI(avg_us));
if (!timeout_given) {
/* Figure out the appropriate timeout. The basic idea is: 5x average or
1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
If the program is slow, the multiplier is lowered to 2x or 3x, because
random scheduler jitter is less likely to have any impact, and because
our patience is wearing thin =) */
if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000;
else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000;
else exec_tmout = avg_us * 5 / 1000;
exec_tmout = MAX(exec_tmout, max_us / 1000);
exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT;
ACTF("No -t option specified, so I'll use exec timeout of %u ms.",
exec_tmout);
timeout_given = 1;
} else if (timeout_given == 3) {
ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout);
}
/* In dumb mode, re-running every timing out test case with a generous time
limit is very expensive, so let's select a more conservative default. */
if (dumb_mode && !getenv("AFL_HANG_TMOUT"))
hang_tmout = MIN(EXEC_TIMEOUT, exec_tmout * 2 + 100);
OKF("All set and ready to roll!");
}

File diff suppressed because it is too large Load Diff