AFL without globals (#220)

* moved globals to afl, shm and fsrv 

* moved argv to afl state, less bugs

* fixed unicorn docu

* lists everywhere

* merged custom mutators

* fixed leaks in afl-fuzz
This commit is contained in:
Dominik Maier
2020-03-09 11:24:10 +01:00
committed by GitHub
parent c159b872ef
commit dba3595c0a
26 changed files with 3741 additions and 3584 deletions

File diff suppressed because it is too large Load Diff

101
include/afl-prealloc.h Normal file
View File

@ -0,0 +1,101 @@
/* If we know we'll reuse small elements often, we'll just preallocate a buffer, then fall back to malloc */
// TODO: Replace free status check with bitmask+CLZ
#ifndef AFL_PREALLOC_H
#define AFL_PREALLOC_H
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include "debug.h"
typedef enum prealloc_status {
PRE_STATUS_UNUSED = 0,/* free in buf */
PRE_STATUS_USED, /* used in buf */
PRE_STATUS_MALLOC /* system malloc */
} pre_status_t;
/* Adds the entry used for prealloc bookkeeping to this struct */
#define PREALLOCABLE ;pre_status_t pre_status; /* prealloc status of this instance */
/* allocate an element of type *el_ptr, to this variable.
Uses (and reuses) the given prealloc_buf before hitting libc's malloc.
prealloc_buf must be the pointer to an array with type `type`.
`type` must be a struct with uses PREALLOCABLE (a pre_status_t pre_status member).
prealloc_size must be the array size.
prealloc_counter must be a variable initialized with 0 (of any name).
*/
#define PRE_ALLOC(el_ptr, prealloc_buf, prealloc_size, prealloc_counter) do { \
\
if ((prealloc_counter) >= (prealloc_size)) { \
\
el_ptr = malloc(sizeof(*el_ptr)); \
el_ptr->pre_status = PRE_STATUS_MALLOC; \
\
} else { \
\
/* Find one of our preallocated elements */ \
u32 i; \
for (i = 0; i < (prealloc_size); i++) { \
\
el_ptr = &((prealloc_buf)[i]); \
if (el_ptr->pre_status == PRE_STATUS_UNUSED) { \
\
(prealloc_counter)++; \
el_ptr->pre_status = PRE_STATUS_USED; \
break; \
\
} \
} \
} \
\
if(!el_ptr) { \
FATAL("BUG in list.h -> no element found or allocated!"); \
} \
} while(0);
/* Take a chosen (free) element from the prealloc_buf directly */
#define PRE_ALLOC_FORCE(el_ptr, prealloc_counter) do { \
if ((el_ptr)->pre_status != PRE_STATUS_UNUSED) { \
FATAL("PRE_ALLOC_FORCE element already allocated"); \
} \
(el_ptr)->pre_status = PRE_STATUS_USED; \
(prealloc_counter)++; \
} while(0);
/* free an preallocated element */
#define PRE_FREE(el_ptr, prealloc_counter) do { \
\
switch ((el_ptr)->pre_status) { \
\
case PRE_STATUS_USED: { \
(el_ptr)->pre_status = PRE_STATUS_UNUSED; \
(prealloc_counter)--; \
if ((prealloc_counter) < 0) { \
FATAL("Inconsistent data in PRE_FREE"); \
} \
break; \
} \
case PRE_STATUS_MALLOC: { \
(el_ptr)->pre_status = PRE_STATUS_UNUSED; \
free((el_ptr)); \
break; \
} \
default: { \
FATAL("Double Free Detected"); \
break; \
} \
\
} \
} while(0);
#endif

View File

@ -28,16 +28,14 @@
#include <sys/time.h>
#include "types.h"
#include "stdbool.h"
extern u8* target_path; /* Path to target binary */
void detect_file_args(char** argv, u8* prog_in);
void detect_file_args(char** argv, u8* prog_in, u8 use_stdin);
void check_environment_vars(char** env);
char** get_qemu_argv(u8* own_loc, char** argv, int argc);
char** get_wine_argv(u8* own_loc, char** argv, int argc);
char** get_qemu_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv);
char** get_wine_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv);
char* get_afl_env(char* env);
#endif
/* Get unix time in milliseconds */
@ -65,3 +63,4 @@ static u64 get_cur_time_us(void) {
}
#endif

View File

@ -27,8 +27,47 @@
#ifndef __AFL_FORKSERVER_H
#define __AFL_FORKSERVER_H
#include <stdio.h>
typedef struct afl_forkserver {
/* a program that includes afl-forkserver needs to define these */
u8 uses_asan; /* Target uses ASAN? */
u8* trace_bits; /* SHM with instrumentation bitmap */
u8 use_stdin; /* use stdin for sending data */
s32 fsrv_pid, /* PID of the fork server */
child_pid, /* PID of the fuzzed program */
out_dir_fd; /* FD of the lock file */
s32 out_fd, /* Persistent fd for afl->fsrv.out_file */
#ifndef HAVE_ARC4RANDOM
dev_urandom_fd, /* Persistent fd for /dev/urandom */
#endif
dev_null_fd, /* Persistent fd for /dev/null */
fsrv_ctl_fd, /* Fork server control pipe (write) */
fsrv_st_fd; /* Fork server status pipe (read) */
u32 exec_tmout; /* Configurable exec timeout (ms) */
u64 mem_limit; /* Memory cap for child (MB) */
u8 *out_file, /* File to fuzz, if any */
*target_path; /* Path of the target */
FILE* plot_file; /* Gnuplot output file */
u8 child_timed_out; /* Traced process timed out? */
} afl_forkserver_t;
void handle_timeout(int sig);
void init_forkserver(char **argv);
void afl_fsrv_init(afl_forkserver_t *fsrv);
void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv);
void afl_fsrv_deinit(afl_forkserver_t *fsrv);
void afl_fsrv_killall();
#ifdef __APPLE__
#define MSG_FORK_ON_APPLE \

133
include/list.h Normal file
View File

@ -0,0 +1,133 @@
#ifndef AFL_LIST
#define AFL_LIST
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include "debug.h"
#include "afl-prealloc.h"
#define LIST_PREALLOC_SIZE (64) /* How many elements to allocate before malloc is needed */
typedef struct list_element {
PREALLOCABLE;
struct list_element *prev;
struct list_element *next;
void *data;
} element_t;
typedef struct list {
element_t element_prealloc_buf[LIST_PREALLOC_SIZE];
u32 element_prealloc_count;
} list_t;
static inline element_t *get_head(list_t *list) {
return &list->element_prealloc_buf[0];
}
static void list_free_el(list_t *list, element_t *el) {
PRE_FREE(el, list->element_prealloc_count);
}
static void list_append(list_t *list, void *el) {
element_t *head = get_head(list);
if (!head->next) {
/* initialize */
memset(list, 0, sizeof(list_t));
PRE_ALLOC_FORCE(head, list->element_prealloc_count);
head->next = head->prev = head;
}
element_t *el_box = NULL;
PRE_ALLOC(el_box, list->element_prealloc_buf, LIST_PREALLOC_SIZE, list->element_prealloc_count);
if (!el_box) FATAL("failed to allocate list element");
el_box->data = el;
el_box->next = head;
el_box->prev = head->prev;
head->prev->next = el_box;
head->prev = el_box;
}
/* Simple foreach.
Pointer to the current element is in `el`,
casted to (a pointer) of the given `type`.
A return from this block will return from calling func.
*/
#define LIST_FOREACH(list, type, block) do { \
list_t *li = (list); \
element_t *head = get_head((li)); \
element_t *el_box = (head)->next; \
if (!el_box) \
FATAL("foreach over uninitialized list");\
while(el_box != head) { \
type *el = (type *)((el_box)->data); \
/* get next so el_box can be unlinked */ \
element_t *next = el_box->next; \
{block}; \
el_box = next; \
} \
} while(0);
/* In foreach: remove the current el from the list */
#define LIST_REMOVE_CURRENT_EL_IN_FOREACH() do { \
el_box->prev->next = next; \
el_box->next->prev = el_box->prev; \
list_free_el(li, el_box); \
} while(0);
/* Same as foreach, but will clear list in the process */
#define LIST_FOREACH_CLEAR(list, type, block) do { \
LIST_FOREACH((list), type, { \
{block}; \
LIST_REMOVE_CURRENT_EL_IN_FOREACH(); \
}); \
} while(0);
/* remove an item from the list */
static void list_remove(list_t *list, void *remove_me) {
LIST_FOREACH(list, void, {
if (el == remove_me) {
el_box->prev->next = el_box->next;
el_box->next->prev = el_box->prev;
el_box->data = NULL;
list_free_el(list, el_box);
return;
}
});
FATAL ("List item to be removed not in list");
}
/* Returns true if el is in list */
static bool list_contains(list_t *list, void *contains_me) {
LIST_FOREACH(list, void, {
if (el == contains_me) return true;
});
return false;
}
#endif

View File

@ -27,11 +27,32 @@
#ifndef __AFL_SHAREDMEM_H
#define __AFL_SHAREDMEM_H
void setup_shm(unsigned char dumb_mode);
void remove_shm(void);
typedef struct sharedmem {
extern int cmplog_mode;
extern struct cmp_map* cmp_map;
//extern unsigned char *trace_bits;
#ifdef USEMMAP
/* ================ Proteas ================ */
int g_shm_fd;
char g_shm_file_path[L_tmpnam];
/* ========================================= */
#else
s32 shm_id; /* ID of the SHM region */
s32 cmplog_shm_id;
#endif
u8 *map; /* shared memory region */
size_t size_alloc; /* actual allocated size */
size_t size_used; /* in use by shmem app */
int cmplog_mode;
struct cmp_map *cmp_map;
} sharedmem_t;
u8 *afl_shm_init(sharedmem_t*, size_t, unsigned char dumb_mode);
void afl_shm_deinit(sharedmem_t*);
#endif

View File

@ -84,6 +84,8 @@ static volatile u8 stop_soon, /* Ctrl-C pressed? */
static u8 qemu_mode;
static u8 *target_path;
/* Constants used for describing byte behavior. */
#define RESP_NONE 0x00 /* Changing byte is a no-op. */
@ -998,21 +1000,23 @@ int main(int argc, char** argv, char** envp) {
use_hex_offsets = !!get_afl_env("AFL_ANALYZE_HEX");
check_environment_vars(envp);
setup_shm(0);
sharedmem_t shm = {0};
trace_bits = afl_shm_init(&shm, MAP_SIZE, 0);
atexit(at_exit_handler);
setup_signal_handlers();
set_up_environment();
find_binary(argv[optind]);
detect_file_args(argv + optind, prog_in);
detect_file_args(argv + optind, prog_in, use_stdin);
if (qemu_mode) {
if (use_wine)
use_argv = get_wine_argv(argv[0], argv + optind, argc - optind);
use_argv = get_wine_argv(argv[0], &target_path, argc - optind, argv + optind);
else
use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
use_argv = get_qemu_argv(argv[0], &target_path, argc - optind, argv + optind);
} else
@ -1037,6 +1041,8 @@ int main(int argc, char** argv, char** envp) {
OKF("We're done here. Have a nice day!\n");
afl_shm_deinit(&shm);
exit(0);
}

View File

@ -36,11 +36,9 @@
#include <unistd.h>
#endif
u8* target_path; /* Path to target binary */
extern u8 use_stdin;
extern u8 be_quiet;
void detect_file_args(char** argv, u8* prog_in) {
void detect_file_args(char **argv, u8 *prog_in, u8 use_stdin) {
u32 i = 0;
#ifdef __GLIBC__
@ -64,6 +62,8 @@ void detect_file_args(char** argv, u8* prog_in) {
if (!cwd) PFATAL("getcwd() failed");
// TODO: free allocs below... somewhere.
while (argv[i]) {
u8* aa_loc = strstr(argv[i], "@@");
@ -87,6 +87,8 @@ void detect_file_args(char** argv, u8* prog_in) {
/* Construct a replacement argv value. */
// TODO: n_arg is never freed
*aa_loc = 0;
n_arg = alloc_printf("%s%s%s", argv[i], aa_subst, aa_loc + 2);
argv[i] = n_arg;
@ -108,14 +110,14 @@ void detect_file_args(char** argv, u8* prog_in) {
/* Rewrite argv for QEMU. */
char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
char** get_qemu_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv) {
char** new_argv = ck_alloc(sizeof(char*) * (argc + 4));
u8 * tmp, *cp = NULL, *rsl, *own_copy;
memcpy(new_argv + 3, argv + 1, (int)(sizeof(char*)) * argc);
new_argv[2] = target_path;
new_argv[2] = *target_path_p;
new_argv[1] = "--";
/* Now we need to actually find the QEMU binary to put in argv[0]. */
@ -128,7 +130,7 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
target_path = new_argv[0] = cp;
*target_path_p = new_argv[0] = cp;
return new_argv;
}
@ -145,7 +147,7 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
if (!access(cp, X_OK)) {
target_path = new_argv[0] = cp;
*target_path_p = new_argv[0] = cp;
return new_argv;
}
@ -156,8 +158,9 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
if (cp != NULL) ck_free(cp);
target_path = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace");
if (cp) ck_free(cp);
*target_path_p = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace");
return new_argv;
}
@ -165,7 +168,7 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
SAYF("\n" cLRD "[-] " cRST
"Oops, unable to find the 'afl-qemu-trace' binary. The binary must be "
"built\n"
" separately by following the instructions in qemu_mode/README.md. "
" separately by following the instructions in afl->qemu_mode/README.md. "
"If you\n"
" already have the binary installed, you may need to specify "
"AFL_PATH in the\n"
@ -184,14 +187,14 @@ char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
/* Rewrite argv for Wine+QEMU. */
char** get_wine_argv(u8* own_loc, char** argv, int argc) {
char** get_wine_argv(u8* own_loc, u8 **target_path_p, int argc, char **argv) {
char** new_argv = ck_alloc(sizeof(char*) * (argc + 3));
u8 * tmp, *cp = NULL, *rsl, *own_copy;
memcpy(new_argv + 2, argv + 1, (int)(sizeof(char*)) * argc);
new_argv[1] = target_path;
new_argv[1] = *target_path_p;
/* Now we need to actually find the QEMU binary to put in argv[0]. */
@ -209,7 +212,7 @@ char** get_wine_argv(u8* own_loc, char** argv, int argc) {
if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
target_path = new_argv[0] = cp;
*target_path_p = new_argv[0] = cp;
return new_argv;
}
@ -232,7 +235,7 @@ char** get_wine_argv(u8* own_loc, char** argv, int argc) {
if (!access(cp, X_OK)) {
target_path = new_argv[0] = cp;
*target_path_p = new_argv[0] = cp;
return new_argv;
}
@ -251,7 +254,7 @@ char** get_wine_argv(u8* own_loc, char** argv, int argc) {
if (!access(ncp, X_OK)) {
target_path = new_argv[0] = ck_strdup(ncp);
*target_path_p = new_argv[0] = ck_strdup(ncp);
return new_argv;
}
@ -261,7 +264,7 @@ char** get_wine_argv(u8* own_loc, char** argv, int argc) {
SAYF("\n" cLRD "[-] " cRST
"Oops, unable to find the '%s' binary. The binary must be "
"built\n"
" separately by following the instructions in qemu_mode/README.md. "
" separately by following the instructions in afl->qemu_mode/README.md. "
"If you\n"
" already have the binary installed, you may need to specify "
"AFL_PATH in the\n"
@ -326,4 +329,3 @@ char* get_afl_env(char* env) {
return val;
}

View File

@ -28,6 +28,7 @@
#include "types.h"
#include "debug.h"
#include "common.h"
#include "list.h"
#include "forkserver.h"
#include <stdio.h>
@ -41,27 +42,10 @@
#include <sys/wait.h>
#include <sys/resource.h>
/* a program that includes afl-forkserver needs to define these */
extern u8 uses_asan;
extern u8 *trace_bits;
extern u8 use_stdin;
extern s32 forksrv_pid, child_pid, fsrv_ctl_fd, fsrv_st_fd;
extern s32 out_fd, out_dir_fd, dev_null_fd; /* initialize these with -1 */
#ifndef HAVE_ARC4RANDOM
extern s32 dev_urandom_fd;
#endif
extern u32 exec_tmout;
extern u64 mem_limit;
extern u8 * out_file, *target_path, *doc_path;
extern FILE *plot_file;
/* we need this internally but can be defined and read extern in the main source
*/
u8 child_timed_out;
/* Describe integer as memory size. */
extern u8 *doc_path;
u8 *forkserver_DMS(u64 val) {
static u8 tmp[12][16];
@ -122,25 +106,40 @@ u8 *forkserver_DMS(u64 val) {
}
list_t fsrv_list = {0};
/* the timeout handler */
void handle_timeout(int sig) {
if (child_pid > 0) {
LIST_FOREACH(&fsrv_list, afl_forkserver_t, {
child_timed_out = 1;
kill(child_pid, SIGKILL);
//TODO: We need a proper timer to handle multiple timeouts
if (el->child_pid > 0) {
} else if (child_pid == -1 && forksrv_pid > 0) {
el->child_timed_out = 1;
kill(el->child_pid, SIGKILL);
child_timed_out = 1;
kill(forksrv_pid, SIGKILL);
} else if (el->child_pid == -1 && el->fsrv_pid > 0) {
}
el->child_timed_out = 1;
kill(el->fsrv_pid, SIGKILL);
}
});
}
/* Spin up fork server (instrumented mode only). The idea is explained here:
/* Initializes the struct */
void afl_fsrv_init(afl_forkserver_t *fsrv) {
list_append(&fsrv_list, fsrv);
}
/* Spins up fork server (instrumented mode only). The idea is explained here:
http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
@ -148,7 +147,7 @@ void handle_timeout(int sig) {
cloning a stopped child. So, we just execute once, and then send commands
through a pipe. The other part of this logic is in afl-as.h / llvm_mode */
void init_forkserver(char **argv) {
void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv) {
static struct itimerval it;
int st_pipe[2], ctl_pipe[2];
@ -159,12 +158,12 @@ void init_forkserver(char **argv) {
if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
child_timed_out = 0;
forksrv_pid = fork();
fsrv->child_timed_out = 0;
fsrv->fsrv_pid = fork();
if (forksrv_pid < 0) PFATAL("fork() failed");
if (fsrv->fsrv_pid < 0) PFATAL("fork() failed");
if (!forksrv_pid) {
if (!fsrv->fsrv_pid) {
/* CHILD PROCESS */
@ -180,9 +179,9 @@ void init_forkserver(char **argv) {
}
if (mem_limit) {
if (fsrv->mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
r.rlim_max = r.rlim_cur = ((rlim_t)fsrv->mem_limit) << 20;
#ifdef RLIMIT_AS
setrlimit(RLIMIT_AS, &r); /* Ignore errors */
@ -209,19 +208,19 @@ void init_forkserver(char **argv) {
if (!get_afl_env("AFL_DEBUG_CHILD_OUTPUT")) {
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
dup2(fsrv->dev_null_fd, 1);
dup2(fsrv->dev_null_fd, 2);
}
if (!use_stdin) {
if (!fsrv->use_stdin) {
dup2(dev_null_fd, 0);
dup2(fsrv->dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
dup2(fsrv->out_fd, 0);
close(fsrv->out_fd);
}
@ -235,12 +234,12 @@ void init_forkserver(char **argv) {
close(st_pipe[0]);
close(st_pipe[1]);
close(out_dir_fd);
close(dev_null_fd);
close(fsrv->out_dir_fd);
close(fsrv->dev_null_fd);
#ifndef HAVE_ARC4RANDOM
close(dev_urandom_fd);
close(fsrv->dev_urandom_fd);
#endif
close(plot_file == NULL ? -1 : fileno(plot_file));
close(fsrv->plot_file == NULL ? -1 : fileno(fsrv->plot_file));
/* This should improve performance a bit, since it stops the linker from
doing extra work post-fork(). */
@ -269,12 +268,12 @@ void init_forkserver(char **argv) {
"msan_track_origins=0",
0);
execv(target_path, argv);
execv(fsrv->target_path, argv);
/* Use a distinctive bitmap signature to tell the parent about execv()
falling through. */
*(u32 *)trace_bits = EXEC_FAIL_SIG;
*(u32 *)fsrv->trace_bits = EXEC_FAIL_SIG;
exit(0);
}
@ -286,21 +285,21 @@ void init_forkserver(char **argv) {
close(ctl_pipe[0]);
close(st_pipe[1]);
fsrv_ctl_fd = ctl_pipe[1];
fsrv_st_fd = st_pipe[0];
fsrv->fsrv_ctl_fd = ctl_pipe[1];
fsrv->fsrv_st_fd = st_pipe[0];
/* Wait for the fork server to come up, but don't wait too long. */
if (exec_tmout) {
if (fsrv->exec_tmout) {
it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
it.it_value.tv_sec = ((fsrv->exec_tmout * FORK_WAIT_MULT) / 1000);
it.it_value.tv_usec = ((fsrv->exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
}
setitimer(ITIMER_REAL, &it, NULL);
rlen = read(fsrv_st_fd, &status, 4);
rlen = read(fsrv->fsrv_st_fd, &status, 4);
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
@ -317,14 +316,14 @@ void init_forkserver(char **argv) {
}
if (child_timed_out)
if (fsrv->child_timed_out)
FATAL("Timeout while initializing fork server (adjusting -t may help)");
if (waitpid(forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
if (waitpid(fsrv->fsrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
if (WIFSIGNALED(status)) {
if (mem_limit && mem_limit < 500 && uses_asan) {
if (fsrv->mem_limit && fsrv->mem_limit < 500 && fsrv->uses_asan) {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, "
@ -336,7 +335,7 @@ void init_forkserver(char **argv) {
" %s/notes_for_asan.md for help.\n",
doc_path);
} else if (!mem_limit) {
} else if (!fsrv->mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, "
@ -389,7 +388,7 @@ void init_forkserver(char **argv) {
"options\n"
" fail, poke <afl-users@googlegroups.com> for troubleshooting "
"tips.\n",
forkserver_DMS(mem_limit << 20), mem_limit - 1);
forkserver_DMS(fsrv->mem_limit << 20), fsrv->mem_limit - 1);
}
@ -397,10 +396,10 @@ void init_forkserver(char **argv) {
}
if (*(u32 *)trace_bits == EXEC_FAIL_SIG)
if (*(u32 *)fsrv->trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute target application ('%s')", argv[0]);
if (mem_limit && mem_limit < 500 && uses_asan) {
if (fsrv->mem_limit && fsrv->mem_limit < 500 && fsrv->uses_asan) {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated "
@ -412,7 +411,7 @@ void init_forkserver(char **argv) {
" read %s/notes_for_asan.md for help.\n",
doc_path);
} else if (!mem_limit) {
} else if (!fsrv->mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated "
@ -455,7 +454,7 @@ void init_forkserver(char **argv) {
"never\n"
" reached before the program terminates.\n\n"
: "",
forkserver_DMS(mem_limit << 20), mem_limit - 1);
forkserver_DMS(fsrv->mem_limit << 20), fsrv->mem_limit - 1);
}
@ -463,3 +462,15 @@ void init_forkserver(char **argv) {
}
void afl_fsrv_killall() {
LIST_FOREACH(&fsrv_list, afl_forkserver_t, {
if (el->child_pid > 0) kill(el->child_pid, SIGKILL);
});
}
void afl_fsrv_deinit(afl_forkserver_t *fsrv) {
list_remove(&fsrv_list, fsrv);
}

View File

@ -29,20 +29,20 @@
-B option, to focus a separate fuzzing session on a particular
interesting input without rediscovering all the others. */
void write_bitmap(void) {
void write_bitmap(afl_state_t *afl) {
u8* fname;
s32 fd;
if (!bitmap_changed) return;
bitmap_changed = 0;
if (!afl->bitmap_changed) return;
afl->bitmap_changed = 0;
fname = alloc_printf("%s/fuzz_bitmap", out_dir);
fname = alloc_printf("%s/fuzz_bitmap", afl->out_dir);
fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_write(fd, virgin_bits, MAP_SIZE, fname);
ck_write(fd, afl->virgin_bits, MAP_SIZE, fname);
close(fd);
ck_free(fname);
@ -51,13 +51,13 @@ void write_bitmap(void) {
/* Read bitmap from file. This is for the -B option again. */
void read_bitmap(u8* fname) {
void read_bitmap(afl_state_t *afl, u8* fname) {
s32 fd = open(fname, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_read(fd, virgin_bits, MAP_SIZE, fname);
ck_read(fd, afl->virgin_bits, MAP_SIZE, fname);
close(fd);
@ -71,18 +71,18 @@ void read_bitmap(u8* fname) {
This function is called after every exec() on a fairly large buffer, so
it needs to be fast. We do this in 32-bit and 64-bit flavors. */
u8 has_new_bits(u8* virgin_map) {
u8 has_new_bits(afl_state_t *afl, u8* virgin_map) {
#ifdef WORD_SIZE_64
u64* current = (u64*)trace_bits;
u64* current = (u64*)afl->fsrv.trace_bits;
u64* virgin = (u64*)virgin_map;
u32 i = (MAP_SIZE >> 3);
#else
u32* current = (u32*)trace_bits;
u32* current = (u32*)afl->fsrv.trace_bits;
u32* virgin = (u32*)virgin_map;
u32 i = (MAP_SIZE >> 2);
@ -138,7 +138,7 @@ u8 has_new_bits(u8* virgin_map) {
}
if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
if (ret && virgin_map == afl->virgin_bits) afl->bitmap_changed = 1;
return ret;
@ -415,35 +415,35 @@ void minimize_bits(u8* dst, u8* src) {
/* Construct a file name for a new test case, capturing the operation
that led to its discovery. Uses a static buffer. */
u8* describe_op(u8 hnb) {
u8* describe_op(afl_state_t *afl, u8 hnb) {
static u8 ret[256];
u8 *ret = afl->describe_op_buf_256;
if (syncing_party) {
if (afl->syncing_party) {
sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case);
sprintf(ret, "sync:%s,src:%06u", afl->syncing_party, afl->syncing_case);
} else {
sprintf(ret, "src:%06u", current_entry);
sprintf(ret, "src:%06u", afl->current_entry);
sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time);
sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - afl->start_time);
if (splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", splicing_with);
if (afl->splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", afl->splicing_with);
sprintf(ret + strlen(ret), ",op:%s", stage_short);
sprintf(ret + strlen(ret), ",op:%s", afl->stage_short);
if (stage_cur_byte >= 0) {
if (afl->stage_cur_byte >= 0) {
sprintf(ret + strlen(ret), ",pos:%d", stage_cur_byte);
sprintf(ret + strlen(ret), ",pos:%d", afl->stage_cur_byte);
if (stage_val_type != STAGE_VAL_NONE)
if (afl->stage_val_type != STAGE_VAL_NONE)
sprintf(ret + strlen(ret), ",val:%s%+d",
(stage_val_type == STAGE_VAL_BE) ? "be:" : "", stage_cur_val);
(afl->stage_val_type == STAGE_VAL_BE) ? "be:" : "", afl->stage_cur_val);
} else
sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val);
sprintf(ret + strlen(ret), ",rep:%d", afl->stage_cur_val);
}
@ -457,9 +457,9 @@ u8* describe_op(u8 hnb) {
/* Write a message accompanying the crash directory :-) */
static void write_crash_readme(void) {
static void write_crash_readme(afl_state_t *afl) {
u8* fn = alloc_printf("%s/crashes/README.txt", out_dir);
u8* fn = alloc_printf("%s/crashes/README.txt", afl->out_dir);
s32 fd;
FILE* f;
@ -499,7 +499,7 @@ static void write_crash_readme(void) {
" https://github.com/vanhauser-thc/AFLplusplus\n\n",
orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */
afl->orig_cmdline, DMS(afl->fsrv.mem_limit << 20)); /* ignore errors */
fclose(f);
@ -509,7 +509,7 @@ static void write_crash_readme(void) {
save or queue the input test case for further analysis if so. Returns 1 if
entry is saved, 0 otherwise. */
u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
u8 save_if_interesting(afl_state_t *afl, void* mem, u32 len, u8 fault) {
if (len == 0) return 0;
@ -519,9 +519,9 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
u8 keeping = 0, res;
/* Update path frequency. */
u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
struct queue_entry* q = queue;
struct queue_entry* q = afl->queue;
while (q) {
if (q->exec_cksum == cksum) {
@ -535,44 +535,44 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
}
if (fault == crash_mode) {
if (fault == afl->crash_mode) {
/* Keep only if there are new bits in the map, add to queue for
future fuzzing, etc. */
if (!(hnb = has_new_bits(virgin_bits))) {
if (!(hnb = has_new_bits(afl, afl->virgin_bits))) {
if (crash_mode) ++total_crashes;
if (afl->crash_mode) ++afl->total_crashes;
return 0;
}
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths,
describe_op(hnb));
fn = alloc_printf("%s/queue/id:%06u,%s", afl->out_dir, afl->queued_paths,
describe_op(afl, hnb));
#else
fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths);
fn = alloc_printf("%s/queue/id_%06u", afl->out_dir, afl->queued_paths);
#endif /* ^!SIMPLE_FILES */
add_to_queue(fn, len, 0);
add_to_queue(afl, fn, len, 0);
if (hnb == 2) {
queue_top->has_new_cov = 1;
++queued_with_cov;
afl->queue_top->has_new_cov = 1;
++afl->queued_with_cov;
}
queue_top->exec_cksum = cksum;
afl->queue_top->exec_cksum = cksum;
/* Try to calibrate inline; this also calls update_bitmap_score() when
successful. */
res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
res = calibrate_case(afl, afl->queue_top, mem, afl->queue_cycle - 1, 0);
if (res == FAULT_ERROR) FATAL("Unable to execute target application");
@ -594,58 +594,58 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we
just keep everything. */
++total_tmouts;
++afl->total_tmouts;
if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
if (afl->unique_hangs >= KEEP_UNIQUE_HANG) return keeping;
if (!dumb_mode) {
if (!afl->dumb_mode) {
#ifdef WORD_SIZE_64
simplify_trace((u64*)trace_bits);
simplify_trace((u64*)afl->fsrv.trace_bits);
#else
simplify_trace((u32*)trace_bits);
simplify_trace((u32*)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
if (!has_new_bits(virgin_tmout)) return keeping;
if (!has_new_bits(afl, afl->virgin_tmout)) return keeping;
}
++unique_tmouts;
++afl->unique_tmouts;
/* Before saving, we make sure that it's a genuine hang by re-running
the target with a more generous timeout (unless the default timeout
is already generous). */
if (exec_tmout < hang_tmout) {
if (afl->fsrv.exec_tmout < afl->hang_tmout) {
u8 new_fault;
write_to_testcase(mem, len);
new_fault = run_target(argv, hang_tmout);
write_to_testcase(afl, mem, len);
new_fault = run_target(afl, afl->hang_tmout);
/* A corner case that one user reported bumping into: increasing the
timeout actually uncovers a crash. Make sure we don't discard it if
so. */
if (!stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash;
if (!afl->stop_soon && new_fault == FAULT_CRASH) goto keep_as_crash;
if (stop_soon || new_fault != FAULT_TMOUT) return keeping;
if (afl->stop_soon || new_fault != FAULT_TMOUT) return keeping;
}
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, unique_hangs,
describe_op(0));
fn = alloc_printf("%s/hangs/id:%06llu,%s", afl->out_dir, afl->unique_hangs,
describe_op(afl, 0));
#else
fn = alloc_printf("%s/hangs/id_%06llu", out_dir, unique_hangs);
fn = alloc_printf("%s/hangs/id_%06llu", afl->out_dir, afl->unique_hangs);
#endif /* ^!SIMPLE_FILES */
++unique_hangs;
++afl->unique_hangs;
last_hang_time = get_cur_time();
afl->last_hang_time = get_cur_time();
break;
@ -657,41 +657,41 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
except for slightly different limits and no need to re-run test
cases. */
++total_crashes;
++afl->total_crashes;
if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
if (afl->unique_crashes >= KEEP_UNIQUE_CRASH) return keeping;
if (!dumb_mode) {
if (!afl->dumb_mode) {
#ifdef WORD_SIZE_64
simplify_trace((u64*)trace_bits);
simplify_trace((u64*)afl->fsrv.trace_bits);
#else
simplify_trace((u32*)trace_bits);
simplify_trace((u32*)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
if (!has_new_bits(virgin_crash)) return keeping;
if (!has_new_bits(afl, afl->virgin_crash)) return keeping;
}
if (!unique_crashes) write_crash_readme();
if (!afl->unique_crashes) write_crash_readme(afl);
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir,
unique_crashes, kill_signal, describe_op(0));
fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", afl->out_dir,
afl->unique_crashes, afl->kill_signal, describe_op(afl, 0));
#else
fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes,
kill_signal);
fn = alloc_printf("%s/crashes/id_%06llu_%02u", afl->out_dir, afl->unique_crashes,
afl->kill_signal);
#endif /* ^!SIMPLE_FILES */
++unique_crashes;
if (infoexec) { // if the user wants to be informed on new crashes - do
++afl->unique_crashes;
if (afl->infoexec) { // if the user wants to be informed on new crashes - do
#if !TARGET_OS_IPHONE
// that
if (system(infoexec) == -1)
if (system(afl->infoexec) == -1)
hnb += 0; // we dont care if system errors, but we dont want a
// compiler warning either
#else
@ -700,8 +700,8 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
}
last_crash_time = get_cur_time();
last_crash_execs = total_execs;
afl->last_crash_time = get_cur_time();
afl->last_crash_execs = afl->total_execs;
break;

View File

@ -27,9 +27,7 @@
#include "afl-fuzz.h"
#include "cmplog.h"
static s32 cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd;
void init_cmplog_forkserver(char** argv) {
void init_cmplog_forkserver(afl_state_t *afl) {
static struct itimerval it;
int st_pipe[2], ctl_pipe[2];
@ -40,12 +38,12 @@ void init_cmplog_forkserver(char** argv) {
if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
child_timed_out = 0;
cmplog_forksrv_pid = fork();
afl->fsrv.child_timed_out = 0;
afl->cmplog_fsrv_pid = fork();
if (cmplog_forksrv_pid < 0) PFATAL("fork() failed");
if (afl->cmplog_fsrv_pid < 0) PFATAL("fork() failed");
if (!cmplog_forksrv_pid) {
if (!afl->cmplog_fsrv_pid) {
/* CHILD PROCESS */
@ -61,9 +59,9 @@ void init_cmplog_forkserver(char** argv) {
}
if (mem_limit) {
if (afl->fsrv.mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
r.rlim_max = r.rlim_cur = ((rlim_t)afl->fsrv.mem_limit) << 20;
#ifdef RLIMIT_AS
setrlimit(RLIMIT_AS, &r); /* Ignore errors */
@ -83,26 +81,26 @@ void init_cmplog_forkserver(char** argv) {
// r.rlim_max = r.rlim_cur = 0;
// setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
/* Isolate the process and configure standard descriptors. If out_file is
specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
/* Isolate the process and configure standard descriptors. If afl->fsrv.out_file is
specified, stdin is /dev/null; otherwise, afl->fsrv.out_fd is cloned instead. */
setsid();
if (!get_afl_env("AFL_DEBUG_CHILD_OUTPUT")) {
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
dup2(afl->fsrv.dev_null_fd, 1);
dup2(afl->fsrv.dev_null_fd, 2);
}
if (!use_stdin) {
if (!afl->fsrv.use_stdin) {
dup2(dev_null_fd, 0);
dup2(afl->fsrv.dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
dup2(afl->fsrv.out_fd, 0);
close(afl->fsrv.out_fd);
}
@ -116,12 +114,12 @@ void init_cmplog_forkserver(char** argv) {
close(st_pipe[0]);
close(st_pipe[1]);
close(out_dir_fd);
close(dev_null_fd);
close(afl->fsrv.out_dir_fd);
close(afl->fsrv.dev_null_fd);
#ifndef HAVE_ARC4RANDOM
close(dev_urandom_fd);
close(afl->fsrv.dev_urandom_fd);
#endif
close(plot_file == NULL ? -1 : fileno(plot_file));
close(afl->fsrv.plot_file == NULL ? -1 : fileno(afl->fsrv.plot_file));
/* This should improve performance a bit, since it stops the linker from
doing extra work post-fork(). */
@ -152,13 +150,13 @@ void init_cmplog_forkserver(char** argv) {
setenv("___AFL_EINS_ZWEI_POLIZEI___", "1", 1);
if (!qemu_mode) argv[0] = cmplog_binary;
execv(argv[0], argv);
if (!afl->qemu_mode) afl->argv[0] = afl->cmplog_binary;
execv(afl->argv[0], afl->argv);
/* Use a distinctive bitmap signature to tell the parent about execv()
falling through. */
*(u32*)trace_bits = EXEC_FAIL_SIG;
*(u32*)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
exit(0);
}
@ -170,21 +168,21 @@ void init_cmplog_forkserver(char** argv) {
close(ctl_pipe[0]);
close(st_pipe[1]);
cmplog_fsrv_ctl_fd = ctl_pipe[1];
cmplog_fsrv_st_fd = st_pipe[0];
afl->cmplog_fsrv_ctl_fd = ctl_pipe[1];
afl->cmplog_fsrv_st_fd = st_pipe[0];
/* Wait for the fork server to come up, but don't wait too long. */
if (exec_tmout) {
if (afl->fsrv.exec_tmout) {
it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
it.it_value.tv_sec = ((afl->fsrv.exec_tmout * FORK_WAIT_MULT) / 1000);
it.it_value.tv_usec = ((afl->fsrv.exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
}
setitimer(ITIMER_REAL, &it, NULL);
rlen = read(cmplog_fsrv_st_fd, &status, 4);
rlen = read(afl->cmplog_fsrv_st_fd, &status, 4);
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
@ -201,16 +199,16 @@ void init_cmplog_forkserver(char** argv) {
}
if (child_timed_out)
if (afl->fsrv.child_timed_out)
FATAL(
"Timeout while initializing cmplog fork server (adjusting -t may "
"help)");
if (waitpid(cmplog_forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
if (waitpid(afl->cmplog_fsrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
if (WIFSIGNALED(status)) {
if (mem_limit && mem_limit < 500 && uses_asan) {
if (afl->fsrv.mem_limit && afl->fsrv.mem_limit < 500 && afl->fsrv.uses_asan) {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, "
@ -222,7 +220,7 @@ void init_cmplog_forkserver(char** argv) {
" %s/notes_for_asan.md for help.\n",
doc_path);
} else if (!mem_limit) {
} else if (!afl->fsrv.mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, "
@ -275,7 +273,7 @@ void init_cmplog_forkserver(char** argv) {
"options\n"
" fail, poke <afl-users@googlegroups.com> for troubleshooting "
"tips.\n",
DMS(mem_limit << 20), mem_limit - 1);
DMS(afl->fsrv.mem_limit << 20), afl->fsrv.mem_limit - 1);
}
@ -283,10 +281,10 @@ void init_cmplog_forkserver(char** argv) {
}
if (*(u32*)trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute target application ('%s')", argv[0]);
if (*(u32*)afl->fsrv.trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute target application ('%s')", afl->argv[0]);
if (mem_limit && mem_limit < 500 && uses_asan) {
if (afl->fsrv.mem_limit && afl->fsrv.mem_limit < 500 && afl->fsrv.uses_asan) {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated "
@ -298,7 +296,7 @@ void init_cmplog_forkserver(char** argv) {
" read %s/notes_for_asan.md for help.\n",
doc_path);
} else if (!mem_limit) {
} else if (!afl->fsrv.mem_limit) {
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated "
@ -341,7 +339,7 @@ void init_cmplog_forkserver(char** argv) {
"never\n"
" reached before the program terminates.\n\n"
: "",
DMS(mem_limit << 20), mem_limit - 1);
DMS(afl->fsrv.mem_limit << 20), afl->fsrv.mem_limit - 1);
}
@ -349,7 +347,7 @@ void init_cmplog_forkserver(char** argv) {
}
u8 run_cmplog_target(char** argv, u32 timeout) {
u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
static struct itimerval it;
static u32 prev_timed_out = 0;
@ -358,13 +356,13 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
int status = 0;
u32 tb4;
child_timed_out = 0;
afl->fsrv.child_timed_out = 0;
/* After this memset, trace_bits[] are effectively volatile, so we
/* After this memset, afl->fsrv.trace_bits[] are effectively volatile, so we
must prevent any earlier operations from venturing into that
territory. */
memset(trace_bits, 0, MAP_SIZE);
memset(afl->fsrv.trace_bits, 0, MAP_SIZE);
MEM_BARRIER();
/* If we're running in "dumb" mode, we can't rely on the fork server
@ -372,19 +370,19 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
execve(). There is a bit of code duplication between here and
init_forkserver(), but c'est la vie. */
if (dumb_mode == 1 || no_forkserver) {
if (afl->dumb_mode == 1 || afl->no_forkserver) {
cmplog_child_pid = fork();
afl->cmplog_child_pid = fork();
if (cmplog_child_pid < 0) PFATAL("fork() failed");
if (afl->cmplog_child_pid < 0) PFATAL("fork() failed");
if (!cmplog_child_pid) {
if (!afl->cmplog_child_pid) {
struct rlimit r;
if (mem_limit) {
if (afl->fsrv.mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
r.rlim_max = r.rlim_cur = ((rlim_t)afl->fsrv.mem_limit) << 20;
#ifdef RLIMIT_AS
@ -402,33 +400,33 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
/* Isolate the process and configure standard descriptors. If out_file is
specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
/* Isolate the process and configure standard descriptors. If afl->fsrv.out_file is
specified, stdin is /dev/null; otherwise, afl->fsrv.out_fd is cloned instead. */
setsid();
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
dup2(afl->fsrv.dev_null_fd, 1);
dup2(afl->fsrv.dev_null_fd, 2);
if (out_file) {
if (afl->fsrv.out_file) {
dup2(dev_null_fd, 0);
dup2(afl->fsrv.dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
dup2(afl->fsrv.out_fd, 0);
close(afl->fsrv.out_fd);
}
/* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
close(dev_null_fd);
close(out_dir_fd);
close(afl->fsrv.dev_null_fd);
close(afl->fsrv.out_dir_fd);
#ifndef HAVE_ARC4RANDOM
close(dev_urandom_fd);
close(afl->fsrv.dev_urandom_fd);
#endif
close(fileno(plot_file));
close(fileno(afl->fsrv.plot_file));
/* Set sane defaults for ASAN if nothing else specified. */
@ -445,13 +443,13 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
setenv("___AFL_EINS_ZWEI_POLIZEI___", "1", 1);
if (!qemu_mode) argv[0] = cmplog_binary;
execv(argv[0], argv);
if (!afl->qemu_mode) afl->argv[0] = afl->cmplog_binary;
execv(afl->argv[0], afl->argv);
/* Use a distinctive bitmap value to tell the parent about execv()
falling through. */
*(u32*)trace_bits = EXEC_FAIL_SIG;
*(u32*)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
exit(0);
}
@ -463,23 +461,23 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
/* In non-dumb mode, we have the fork server up and running, so simply
tell it to have at it, and then read back PID. */
if ((res = write(cmplog_fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if ((res = write(afl->cmplog_fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if (stop_soon) return 0;
if (afl->stop_soon) return 0;
RPFATAL(res,
"Unable to request new process from cmplog fork server (OOM?)");
}
if ((res = read(cmplog_fsrv_st_fd, &cmplog_child_pid, 4)) != 4) {
if ((res = read(afl->cmplog_fsrv_st_fd, &afl->cmplog_child_pid, 4)) != 4) {
if (stop_soon) return 0;
if (afl->stop_soon) return 0;
RPFATAL(res,
"Unable to request new process from cmplog fork server (OOM?)");
}
if (cmplog_child_pid <= 0)
if (afl->cmplog_child_pid <= 0)
FATAL("Cmplog fork server is misbehaving (OOM?)");
}
@ -492,20 +490,20 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
setitimer(ITIMER_REAL, &it, NULL);
/* The SIGALRM handler simply kills the cmplog_child_pid and sets
* child_timed_out. */
/* The SIGALRM handler simply kills the afl->cmplog_child_pid and sets
* afl->fsrv.child_timed_out. */
if (dumb_mode == 1 || no_forkserver) {
if (afl->dumb_mode == 1 || afl->no_forkserver) {
if (waitpid(cmplog_child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
if (waitpid(afl->cmplog_child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
} else {
s32 res;
if ((res = read(cmplog_fsrv_st_fd, &status, 4)) != 4) {
if ((res = read(afl->cmplog_fsrv_st_fd, &status, 4)) != 4) {
if (stop_soon) return 0;
if (afl->stop_soon) return 0;
SAYF(
"\n" cLRD "[-] " cRST
"Unable to communicate with fork server. Some possible reasons:\n\n"
@ -520,50 +518,50 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
"culprit.\n\n"
"If all else fails you can disable the fork server via "
"AFL_NO_FORKSRV=1.\n",
mem_limit);
afl->fsrv.mem_limit);
RPFATAL(res, "Unable to communicate with fork server");
}
}
if (!WIFSTOPPED(status)) cmplog_child_pid = 0;
if (!WIFSTOPPED(status)) afl->cmplog_child_pid = 0;
getitimer(ITIMER_REAL, &it);
exec_ms =
(u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms;
if (afl->slowest_exec_ms < exec_ms) afl->slowest_exec_ms = exec_ms;
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
setitimer(ITIMER_REAL, &it, NULL);
++total_execs;
++afl->total_execs;
/* Any subsequent operations on trace_bits must not be moved by the
compiler below this point. Past this location, trace_bits[] behave
/* Any subsequent operations on afl->fsrv.trace_bits must not be moved by the
compiler below this point. Past this location, afl->fsrv.trace_bits[] behave
very normally and do not have to be treated as volatile. */
MEM_BARRIER();
tb4 = *(u32*)trace_bits;
tb4 = *(u32*)afl->fsrv.trace_bits;
#ifdef WORD_SIZE_64
classify_counts((u64*)trace_bits);
classify_counts((u64*)afl->fsrv.trace_bits);
#else
classify_counts((u32*)trace_bits);
classify_counts((u32*)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
prev_timed_out = child_timed_out;
prev_timed_out = afl->fsrv.child_timed_out;
/* Report outcome to caller. */
if (WIFSIGNALED(status) && !stop_soon) {
if (WIFSIGNALED(status) && !afl->stop_soon) {
kill_signal = WTERMSIG(status);
afl->kill_signal = WTERMSIG(status);
if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
if (afl->fsrv.child_timed_out && afl->kill_signal == SIGKILL) return FAULT_TMOUT;
return FAULT_CRASH;
@ -572,67 +570,67 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
/* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
must use a special exit code. */
if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
if (afl->fsrv.uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
kill_signal = 0;
afl->kill_signal = 0;
return FAULT_CRASH;
}
if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
if ((afl->dumb_mode == 1 || afl->no_forkserver) && tb4 == EXEC_FAIL_SIG)
return FAULT_ERROR;
return FAULT_NONE;
}
u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len) {
u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8* out_buf, u32 len) {
u8 fault;
if (post_handler) {
if (afl->post_handler) {
out_buf = post_handler(out_buf, &len);
out_buf = afl->post_handler(out_buf, &len);
if (!out_buf || !len) return 0;
}
write_to_testcase(out_buf, len);
write_to_testcase(afl, out_buf, len);
fault = run_cmplog_target(argv, exec_tmout);
fault = run_cmplog_target(afl, afl->fsrv.exec_tmout);
if (stop_soon) return 1;
if (afl->stop_soon) return 1;
if (fault == FAULT_TMOUT) {
if (subseq_tmouts++ > TMOUT_LIMIT) {
if (afl->subseq_tmouts++ > TMOUT_LIMIT) {
++cur_skipped_paths;
++afl->cur_skipped_paths;
return 1;
}
} else
subseq_tmouts = 0;
afl->subseq_tmouts = 0;
/* Users can hit us with SIGUSR1 to request the current input
to be abandoned. */
if (skip_requested) {
if (afl->skip_requested) {
skip_requested = 0;
++cur_skipped_paths;
afl->skip_requested = 0;
++afl->cur_skipped_paths;
return 1;
}
/* This handles FAULT_ERROR for us: */
/* queued_discovered += save_if_interesting(argv, out_buf, len, fault);
/* afl->queued_discovered += save_if_interesting(afl, argv, out_buf, len, fault);
if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
show_stats(); */
if (!(afl->stage_cur % afl->stats_update_freq) || afl->stage_cur + 1 == afl->stage_max)
show_stats(afl); */
return 0;

View File

@ -45,7 +45,7 @@ static int compare_extras_use_d(const void* p1, const void* p2) {
/* Read extras from a file, sort by size. */
void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
void load_extras_file(afl_state_t *afl, u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
FILE* f;
u8 buf[MAX_LINE];
@ -120,10 +120,10 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
/* Okay, let's allocate memory and copy data between "...", handling
\xNN escaping, \\, and \". */
extras =
ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
afl->extras =
ck_realloc_block(afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
wptr = afl->extras[afl->extras_cnt].data = ck_alloc(rptr - lptr);
while (*lptr) {
@ -164,16 +164,16 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
}
extras[extras_cnt].len = klen;
afl->extras[afl->extras_cnt].len = klen;
if (extras[extras_cnt].len > MAX_DICT_FILE)
if (afl->extras[afl->extras_cnt].len > MAX_DICT_FILE)
FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, DMS(klen),
DMS(MAX_DICT_FILE));
if (*min_len > klen) *min_len = klen;
if (*max_len < klen) *max_len = klen;
++extras_cnt;
++afl->extras_cnt;
}
@ -183,7 +183,7 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
/* Read extras from the extras directory and sort them by size. */
void load_extras(u8* dir) {
void load_extras(afl_state_t *afl, u8* dir) {
DIR* d;
struct dirent* de;
@ -207,7 +207,7 @@ void load_extras(u8* dir) {
if (errno == ENOTDIR) {
load_extras_file(dir, &min_len, &max_len, dict_level);
load_extras_file(afl, dir, &min_len, &max_len, dict_level);
goto check_and_sort;
}
@ -241,22 +241,22 @@ void load_extras(u8* dir) {
if (min_len > st.st_size) min_len = st.st_size;
if (max_len < st.st_size) max_len = st.st_size;
extras =
ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
afl->extras =
ck_realloc_block(afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data));
extras[extras_cnt].data = ck_alloc(st.st_size);
extras[extras_cnt].len = st.st_size;
afl->extras[afl->extras_cnt].data = ck_alloc(st.st_size);
afl->extras[afl->extras_cnt].len = st.st_size;
fd = open(fn, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", fn);
ck_read(fd, extras[extras_cnt].data, st.st_size, fn);
ck_read(fd, afl->extras[afl->extras_cnt].data, st.st_size, fn);
close(fd);
ck_free(fn);
++extras_cnt;
++afl->extras_cnt;
}
@ -264,24 +264,24 @@ void load_extras(u8* dir) {
check_and_sort:
if (!extras_cnt) FATAL("No usable files in '%s'", dir);
if (!afl->extras_cnt) FATAL("No usable files in '%s'", dir);
qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
qsort(afl->extras, afl->extras_cnt, sizeof(struct extra_data), compare_extras_len);
OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, DMS(min_len),
OKF("Loaded %u extra tokens, size range %s to %s.", afl->extras_cnt, DMS(min_len),
DMS(max_len));
if (max_len > 32)
WARNF("Some tokens are relatively large (%s) - consider trimming.",
DMS(max_len));
if (extras_cnt > MAX_DET_EXTRAS)
if (afl->extras_cnt > MAX_DET_EXTRAS)
WARNF("More than %d tokens - will use them probabilistically.",
MAX_DET_EXTRAS);
}
/* Helper function for maybe_add_auto() */
/* Helper function for maybe_add_auto(afl, ) */
static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
@ -293,7 +293,7 @@ static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
/* Maybe add automatic extra. */
void maybe_add_auto(u8* mem, u32 len) {
void maybe_add_auto(afl_state_t *afl, u8* mem, u32 len) {
u32 i;
@ -336,22 +336,22 @@ void maybe_add_auto(u8* mem, u32 len) {
match. We optimize by exploiting the fact that extras[] are sorted
by size. */
for (i = 0; i < extras_cnt; ++i)
if (extras[i].len >= len) break;
for (i = 0; i < afl->extras_cnt; ++i)
if (afl->extras[i].len >= len) break;
for (; i < extras_cnt && extras[i].len == len; ++i)
if (!memcmp_nocase(extras[i].data, mem, len)) return;
for (; i < afl->extras_cnt && afl->extras[i].len == len; ++i)
if (!memcmp_nocase(afl->extras[i].data, mem, len)) return;
/* Last but not least, check a_extras[] for matches. There are no
/* Last but not least, check afl->a_extras[] for matches. There are no
guarantees of a particular sort order. */
auto_changed = 1;
afl->auto_changed = 1;
for (i = 0; i < a_extras_cnt; ++i) {
for (i = 0; i < afl->a_extras_cnt; ++i) {
if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) {
if (afl->a_extras[i].len == len && !memcmp_nocase(afl->a_extras[i].data, mem, len)) {
a_extras[i].hit_cnt++;
afl->a_extras[i].hit_cnt++;
goto sort_a_extras;
}
@ -362,24 +362,24 @@ void maybe_add_auto(u8* mem, u32 len) {
append it if we have room. Otherwise, let's randomly evict some other
entry from the bottom half of the list. */
if (a_extras_cnt < MAX_AUTO_EXTRAS) {
if (afl->a_extras_cnt < MAX_AUTO_EXTRAS) {
a_extras = ck_realloc_block(a_extras,
(a_extras_cnt + 1) * sizeof(struct extra_data));
afl->a_extras = ck_realloc_block(afl->a_extras,
(afl->a_extras_cnt + 1) * sizeof(struct extra_data));
a_extras[a_extras_cnt].data = ck_memdup(mem, len);
a_extras[a_extras_cnt].len = len;
++a_extras_cnt;
afl->a_extras[afl->a_extras_cnt].data = ck_memdup(mem, len);
afl->a_extras[afl->a_extras_cnt].len = len;
++afl->a_extras_cnt;
} else {
i = MAX_AUTO_EXTRAS / 2 + UR((MAX_AUTO_EXTRAS + 1) / 2);
i = MAX_AUTO_EXTRAS / 2 + UR(afl, (MAX_AUTO_EXTRAS + 1) / 2);
ck_free(a_extras[i].data);
ck_free(afl->a_extras[i].data);
a_extras[i].data = ck_memdup(mem, len);
a_extras[i].len = len;
a_extras[i].hit_cnt = 0;
afl->a_extras[i].data = ck_memdup(mem, len);
afl->a_extras[i].len = len;
afl->a_extras[i].hit_cnt = 0;
}
@ -387,35 +387,35 @@ sort_a_extras:
/* First, sort all auto extras by use count, descending order. */
qsort(a_extras, a_extras_cnt, sizeof(struct extra_data),
qsort(afl->a_extras, afl->a_extras_cnt, sizeof(struct extra_data),
compare_extras_use_d);
/* Then, sort the top USE_AUTO_EXTRAS entries by size. */
qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), sizeof(struct extra_data),
qsort(afl->a_extras, MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt), sizeof(struct extra_data),
compare_extras_len);
}
/* Save automatically generated extras. */
void save_auto(void) {
void save_auto(afl_state_t *afl) {
u32 i;
if (!auto_changed) return;
auto_changed = 0;
if (!afl->auto_changed) return;
afl->auto_changed = 0;
for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); ++i) {
for (i = 0; i < MIN(USE_AUTO_EXTRAS, afl->a_extras_cnt); ++i) {
u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i);
u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", afl->out_dir, i);
s32 fd;
fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
ck_write(fd, a_extras[i].data, a_extras[i].len, fn);
ck_write(fd, afl->a_extras[i].data, afl->a_extras[i].len, fn);
close(fd);
ck_free(fn);
@ -426,14 +426,14 @@ void save_auto(void) {
/* Load automatically generated extras. */
void load_auto(void) {
void load_auto(afl_state_t *afl) {
u32 i;
for (i = 0; i < USE_AUTO_EXTRAS; ++i) {
u8 tmp[MAX_AUTO_EXTRA + 1];
u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i);
u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", afl->in_dir, i);
s32 fd, len;
fd = open(fn, O_RDONLY, 0600);
@ -454,7 +454,7 @@ void load_auto(void) {
if (len < 0) PFATAL("Unable to read from '%s'", fn);
if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
maybe_add_auto(tmp, len);
maybe_add_auto(afl, tmp, len);
close(fd);
ck_free(fn);
@ -470,19 +470,19 @@ void load_auto(void) {
/* Destroy extras. */
void destroy_extras(void) {
void destroy_extras(afl_state_t *afl) {
u32 i;
for (i = 0; i < extras_cnt; ++i)
ck_free(extras[i].data);
for (i = 0; i < afl->extras_cnt; ++i)
ck_free(afl->extras[i].data);
ck_free(extras);
ck_free(afl->extras);
for (i = 0; i < a_extras_cnt; ++i)
ck_free(a_extras[i].data);
for (i = 0; i < afl->a_extras_cnt; ++i)
ck_free(afl->a_extras[i].data);
ck_free(a_extras);
ck_free(afl->a_extras);
}

View File

@ -25,255 +25,103 @@
#include "afl-fuzz.h"
/* MOpt:
Lots of globals, but mostly for the status UI and other things where it
really makes no sense to haul them around as function parameters. */
u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start,
tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find,
most_time_key, most_time, most_execs_key, most_execs, old_hit_count;
s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module;
double w_init = 0.9, w_end = 0.3, w_now;
s32 g_now;
s32 g_max = 5000;
u64 tmp_core_time;
s32 swarm_now;
double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num],
eff_best[swarm_num][operator_num], G_best[operator_num],
v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num],
swarm_fitness[swarm_num];
u64 stage_finds_puppet[swarm_num]
[operator_num], /* Patterns found per fuzz stage */
stage_finds_puppet_v2[swarm_num][operator_num],
stage_cycles_puppet_v2[swarm_num][operator_num],
stage_cycles_puppet_v3[swarm_num][operator_num],
stage_cycles_puppet[swarm_num][operator_num],
operator_finds_puppet[operator_num],
core_operator_finds_puppet[operator_num],
core_operator_finds_puppet_v2[operator_num],
core_operator_cycles_puppet[operator_num],
core_operator_cycles_puppet_v2[operator_num],
core_operator_cycles_puppet_v3[operator_num]; /* Execs per fuzz stage */
double period_pilot_tmp = 5000.0;
s32 key_lv;
u8 *in_dir, /* Input directory with test cases */
*out_dir, /* Working & output directory */
*tmp_dir, /* Temporary directory for input */
*sync_dir, /* Synchronization directory */
*sync_id, /* Fuzzer ID */
*power_name, /* Power schedule name */
*use_banner, /* Display banner */
*in_bitmap, /* Input bitmap */
*file_extension, /* File extension */
*orig_cmdline; /* Original command line */
u8 *doc_path, /* Path to documentation dir */
*infoexec, /* Command to execute on a new crash */
*out_file; /* File to fuzz, if any */
u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */
u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */
u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */
u8 cal_cycles = CAL_CYCLES, /* Calibration cycles defaults */
cal_cycles_long = CAL_CYCLES_LONG, /* Calibration cycles defaults */
debug, /* Debug mode */
no_unlink, /* do not unlink cur_input */
use_stdin = 1, /* use stdin for sending data */
be_quiet, /* is AFL_QUIET set? */
custom_only; /* Custom mutator only mode */
u32 stats_update_freq = 1; /* Stats update frequency (execs) */
char *power_names[POWER_SCHEDULES_NUM] = {"explore", "fast", "coe",
"lin", "quad", "exploit"};
u8 schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/
u8 havoc_max_mult = HAVOC_MAX_MULT;
u8 use_radamsa;
size_t (*radamsa_mutate_ptr)(u8 *, size_t, u8 *, size_t, u32);
u8 skip_deterministic, /* Skip deterministic stages? */
force_deterministic, /* Force deterministic stages? */
use_splicing, /* Recombine input files? */
dumb_mode, /* Run in non-instrumented mode? */
score_changed, /* Scoring for favorites changed? */
kill_signal, /* Signal that killed the child */
resuming_fuzz, /* Resuming an older fuzzing job? */
timeout_given, /* Specific timeout given? */
not_on_tty, /* stdout is not a tty */
term_too_small, /* terminal dimensions too small */
no_forkserver, /* Disable forkserver? */
crash_mode, /* Crash mode! Yeah! */
in_place_resume, /* Attempt in-place resume? */
autoresume, /* Resume if out_dir exists? */
auto_changed, /* Auto-generated tokens changed? */
no_cpu_meter_red, /* Feng shui on the status screen */
no_arith, /* Skip most arithmetic ops */
shuffle_queue, /* Shuffle input queue? */
bitmap_changed = 1, /* Time to update bitmap? */
qemu_mode, /* Running in QEMU mode? */
unicorn_mode, /* Running in Unicorn mode? */
use_wine, /* Use WINE with QEMU mode */
skip_requested, /* Skip request, via SIGUSR1 */
run_over10m, /* Run time over 10 minutes? */
persistent_mode, /* Running in persistent mode? */
deferred_mode, /* Deferred forkserver mode? */
fixed_seed, /* do not reseed */
fast_cal, /* Try to calibrate faster? */
uses_asan, /* Target uses ASAN? */
disable_trim; /* Never trim in fuzz_one */
s32 out_fd, /* Persistent fd for out_file */
#ifndef HAVE_ARC4RANDOM
dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */
#endif
dev_null_fd = -1, /* Persistent fd for /dev/null */
fsrv_ctl_fd, /* Fork server control pipe (write) */
fsrv_st_fd; /* Fork server status pipe (read) */
s32 forksrv_pid, /* PID of the fork server */
child_pid = -1, /* PID of the fuzzed program */
out_dir_fd = -1; /* FD of the lock file */
u8 *trace_bits; /* SHM with instrumentation bitmap */
u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */
virgin_tmout[MAP_SIZE], /* Bits we haven't seen in tmouts */
virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */
u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */
volatile u8 stop_soon, /* Ctrl-C pressed? */
clear_screen = 1, /* Window resized? */
child_timed_out; /* Traced process timed out? */
u32 queued_paths, /* Total number of queued testcases */
queued_variable, /* Testcases with variable behavior */
queued_at_start, /* Total number of initial inputs */
queued_discovered, /* Items discovered during this run */
queued_imported, /* Items imported via -S */
queued_favored, /* Paths deemed favorable */
queued_with_cov, /* Paths with new coverage bytes */
pending_not_fuzzed, /* Queued but not done yet */
pending_favored, /* Pending favored paths */
cur_skipped_paths, /* Abandoned inputs in cur cycle */
cur_depth, /* Current path depth */
max_depth, /* Max path depth */
useless_at_start, /* Number of useless starting paths */
var_byte_count, /* Bitmap bytes with var behavior */
current_entry, /* Current queue entry ID */
havoc_div = 1; /* Cycle count divisor for havoc */
u64 total_crashes, /* Total number of crashes */
unique_crashes, /* Crashes with unique signatures */
total_tmouts, /* Total number of timeouts */
unique_tmouts, /* Timeouts with unique signatures */
unique_hangs, /* Hangs with unique signatures */
total_execs, /* Total execve() calls */
slowest_exec_ms, /* Slowest testcase non hang in ms */
start_time, /* Unix start time (ms) */
last_path_time, /* Time for most recent path (ms) */
last_crash_time, /* Time for most recent crash (ms) */
last_hang_time, /* Time for most recent hang (ms) */
last_crash_execs, /* Exec counter at last crash */
queue_cycle, /* Queue round counter */
cycles_wo_finds, /* Cycles without any new paths */
trim_execs, /* Execs done to trim input files */
bytes_trim_in, /* Bytes coming into the trimmer */
bytes_trim_out, /* Bytes coming outa the trimmer */
blocks_eff_total, /* Blocks subject to effector maps */
blocks_eff_select; /* Blocks selected as fuzzable */
u32 subseq_tmouts; /* Number of timeouts in a row */
u8 *stage_name = "init", /* Name of the current fuzz stage */
*stage_short, /* Short stage name */
*syncing_party; /* Currently syncing with... */
s32 stage_cur, stage_max; /* Stage progression */
s32 splicing_with = -1; /* Splicing with which test case? */
u32 master_id, master_max; /* Master instance job splitting */
u32 syncing_case; /* Syncing with case #... */
s32 stage_cur_byte, /* Byte offset of current stage op */
stage_cur_val; /* Value used for stage op */
u8 stage_val_type; /* Value type (STAGE_VAL_*) */
u64 stage_finds[32], /* Patterns found per fuzz stage */
stage_cycles[32]; /* Execs per fuzz stage */
#ifndef HAVE_ARC4RANDOM
u32 rand_cnt; /* Random number counter */
#endif
u32 rand_seed[2];
s64 init_seed;
u64 total_cal_us, /* Total calibration time (us) */
total_cal_cycles; /* Total calibration cycles */
u64 total_bitmap_size, /* Total bit count for all bitmaps */
total_bitmap_entries; /* Number of bitmaps counted */
s32 cpu_core_count; /* CPU core count */
#ifdef HAVE_AFFINITY
s32 cpu_aff = -1; /* Selected CPU core */
#endif /* HAVE_AFFINITY */
FILE *plot_file; /* Gnuplot output file */
struct queue_entry *queue, /* Fuzzing queue (linked list) */
*queue_cur, /* Current offset within the queue */
*queue_top, /* Top of the list */
*q_prev100; /* Previous 100 marker */
struct queue_entry *top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */
struct extra_data *extras; /* Extra tokens to fuzz with */
u32 extras_cnt; /* Total number of tokens read */
struct extra_data *a_extras; /* Automatically selected extras */
u32 a_extras_cnt; /* Total number of tokens available */
u8 *(*post_handler)(u8 *buf, u32 *len);
u8 *cmplog_binary;
s32 cmplog_child_pid, cmplog_forksrv_pid;
/* Custom mutator */
struct custom_mutator *mutator;
/* Interesting values, as per config.h */
s8 interesting_8[] = {INTERESTING_8};
s16 interesting_16[] = {INTERESTING_8, INTERESTING_16};
s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};
/* Python stuff */
#ifdef USE_PYTHON
char *power_names[POWER_SCHEDULES_NUM] = {"explore", "fast", "coe",
"lin", "quad", "exploit"};
PyObject *py_module;
PyObject *py_functions[PY_FUNC_COUNT];
u8 *doc_path = NULL; /* gath to documentation dir */
/* Initialize MOpt "globals" for this afl state */
static void init_mopt_globals(afl_state_t *afl){
MOpt_globals_t *core = &afl->mopt_globals_pilot;
core->finds = afl->core_operator_finds_puppet;
core->finds_v2 = afl->core_operator_finds_puppet_v2;
core->cycles = afl->core_operator_cycles_puppet;
core->cycles_v2 = afl->core_operator_cycles_puppet_v2;
core->cycles_v3 = afl->core_operator_cycles_puppet_v3;
core->is_pilot_mode = 0;
core->pTime = &afl->tmp_core_time;
core->period = period_core;
core->havoc_stagename = "MOpt-core-havoc";
core->splice_stageformat = "MOpt-core-splice %u";
core->havoc_stagenameshort = "MOpt_core_havoc";
core->splice_stagenameshort = "MOpt_core_splice";
MOpt_globals_t *pilot = &afl->mopt_globals_pilot;
pilot->finds = afl->stage_finds_puppet[0];
pilot->finds_v2 = afl->stage_finds_puppet_v2[0];
pilot->cycles = afl->stage_cycles_puppet[0];
pilot->cycles_v2 = afl->stage_cycles_puppet_v2[0];
pilot->cycles_v3 = afl->stage_cycles_puppet_v3[0];
pilot->is_pilot_mode = 1;
pilot->pTime = &afl->tmp_pilot_time;
pilot->period = period_pilot;
pilot->havoc_stagename = "MOpt-havoc";
pilot->splice_stageformat = "MOpt-splice %u";
pilot->havoc_stagenameshort = "MOpt_havoc";
pilot->splice_stagenameshort = "MOpt_splice";
}
/* A global pointer to all instances is needed (for now) for signals to arrive */
list_t afl_states = {0};
/* Initializes an afl_state_t. */
void afl_state_init(afl_state_t *afl) {
afl->w_init = 0.9;
afl->w_end = 0.3;
afl->g_max = 5000;
afl->period_pilot_tmp = 5000.0;
afl->schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/
afl->havoc_max_mult = HAVOC_MAX_MULT;
afl->clear_screen = 1; /* Window resized? */
afl->havoc_div = 1; /* Cycle count divisor for havoc */
afl->stage_name = "init"; /* Name of the current fuzz stage */
afl->splicing_with = -1; /* Splicing with which test case? */
#ifdef HAVE_AFFINITY
afl->cpu_aff = -1; /* Selected CPU core */
#endif /* HAVE_AFFINITY */
afl->fsrv.use_stdin = 1;
afl->cal_cycles = CAL_CYCLES;
afl->cal_cycles_long = CAL_CYCLES_LONG;
afl->fsrv.exec_tmout = EXEC_TIMEOUT;
afl->hang_tmout = EXEC_TIMEOUT;
afl->fsrv.mem_limit = MEM_LIMIT;
afl->stats_update_freq = 1;
#ifndef HAVE_ARC4RANDOM
afl->fsrv.dev_urandom_fd = -1;
#endif
afl->fsrv.dev_null_fd = -1;
#ifdef _AFL_DOCUMENT_MUTATIONS
u8 do_document;
u32 document_counter;
#endif
afl->fsrv.child_pid = -1;
afl->fsrv.out_dir_fd = -1;
init_mopt_globals(afl);
list_append(&afl_states, afl);
}
/* Removes this afl_state instance and frees it. */
void afl_state_deinit(afl_state_t *afl) {
list_remove(&afl_states, afl);
}

File diff suppressed because it is too large Load Diff

View File

@ -25,25 +25,24 @@
#include "afl-fuzz.h"
void load_custom_mutator(const char*);
void load_custom_mutator(afl_state_t*, const char*);
#ifdef USE_PYTHON
void load_custom_mutator_py(const char*);
void load_custom_mutator_py(afl_state_t*, const char*);
#endif
void setup_custom_mutator(void) {
void setup_custom_mutator(afl_state_t *afl) {
/* Try mutator library first */
u8* fn = getenv("AFL_CUSTOM_MUTATOR_LIBRARY");
if (fn) {
if (limit_time_sig)
if (afl->limit_time_sig)
FATAL(
"MOpt and custom mutator are mutually exclusive. We accept pull "
"requests that integrates MOpt with the optional mutators "
"(custom/radamsa/redquenn/...).");
load_custom_mutator(fn);
load_custom_mutator(afl, fn);
return;
@ -55,16 +54,16 @@ void setup_custom_mutator(void) {
if (module_name) {
if (limit_time_sig)
if (afl->limit_time_sig)
FATAL(
"MOpt and Python mutator are mutually exclusive. We accept pull "
"requests that integrates MOpt with the optional mutators "
"(custom/radamsa/redquenn/...).");
if (init_py_module(module_name))
if (init_py_module(afl, module_name))
FATAL("Failed to initialize Python module");
load_custom_mutator_py(module_name);
load_custom_mutator_py(afl, module_name);
}
@ -75,82 +74,81 @@ void setup_custom_mutator(void) {
}
void destroy_custom_mutator(void) {
void destroy_custom_mutator(afl_state_t *afl) {
if (mutator) {
if (afl->mutator) {
if (mutator->dh)
dlclose(mutator->dh);
if (afl->mutator->dh)
dlclose(afl->mutator->dh);
else {
/* Python mutator */
#ifdef USE_PYTHON
finalize_py_module();
finalize_py_module(afl);
#endif
}
ck_free(mutator);
ck_free(afl->mutator);
}
}
void load_custom_mutator(const char* fn) {
void load_custom_mutator(afl_state_t *afl, const char *fn) {
void* dh;
mutator = ck_alloc(sizeof(struct custom_mutator));
afl->mutator = ck_alloc(sizeof(struct custom_mutator));
mutator->name = fn;
afl->mutator->name = fn;
ACTF("Loading custom mutator library from '%s'...", fn);
dh = dlopen(fn, RTLD_NOW);
if (!dh) FATAL("%s", dlerror());
mutator->dh = dh;
afl->mutator->dh = dh;
/* Mutator */
/* "afl_custom_init", optional for backward compatibility */
mutator->afl_custom_init = dlsym(dh, "afl_custom_init");
if (!mutator->afl_custom_init) WARNF("Symbol 'afl_custom_init' not found.");
afl->mutator->afl_custom_init = dlsym(dh, "afl_custom_init");
if (!afl->mutator->afl_custom_init) WARNF("Symbol 'afl_custom_init' not found.");
/* "afl_custom_fuzz" or "afl_custom_mutator", required */
mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_fuzz");
if (!mutator->afl_custom_fuzz) {
afl->mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_fuzz");
if (!afl->mutator->afl_custom_fuzz) {
/* Try "afl_custom_mutator" for backward compatibility */
WARNF("Symbol 'afl_custom_fuzz' not found. Try 'afl_custom_mutator'.");
mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_mutator");
if (!mutator->afl_custom_fuzz)
afl->mutator->afl_custom_fuzz = dlsym(dh, "afl_custom_mutator");
if (!afl->mutator->afl_custom_fuzz)
FATAL("Symbol 'afl_custom_mutator' not found.");
}
/* "afl_custom_pre_save", optional */
mutator->afl_custom_pre_save = dlsym(dh, "afl_custom_pre_save");
if (!mutator->afl_custom_pre_save)
afl->mutator->afl_custom_pre_save = dlsym(dh, "afl_custom_pre_save");
if (!afl->mutator->afl_custom_pre_save)
WARNF("Symbol 'afl_custom_pre_save' not found.");
u8 notrim = 0;
/* "afl_custom_init_trim", optional */
mutator->afl_custom_init_trim = dlsym(dh, "afl_custom_init_trim");
if (!mutator->afl_custom_init_trim)
afl->mutator->afl_custom_init_trim = dlsym(dh, "afl_custom_init_trim");
if (!afl->mutator->afl_custom_init_trim)
WARNF("Symbol 'afl_custom_init_trim' not found.");
/* "afl_custom_trim", optional */
mutator->afl_custom_trim = dlsym(dh, "afl_custom_trim");
if (!mutator->afl_custom_trim) WARNF("Symbol 'afl_custom_trim' not found.");
afl->mutator->afl_custom_trim = dlsym(dh, "afl_custom_trim");
if (!afl->mutator->afl_custom_trim) WARNF("Symbol 'afl_custom_trim' not found.");
/* "afl_custom_post_trim", optional */
mutator->afl_custom_post_trim = dlsym(dh, "afl_custom_post_trim");
if (!mutator->afl_custom_post_trim)
afl->mutator->afl_custom_post_trim = dlsym(dh, "afl_custom_post_trim");
if (!afl->mutator->afl_custom_post_trim)
WARNF("Symbol 'afl_custom_post_trim' not found.");
if (notrim) {
mutator->afl_custom_init_trim = NULL;
mutator->afl_custom_trim = NULL;
mutator->afl_custom_post_trim = NULL;
afl->mutator->afl_custom_init_trim = NULL;
afl->mutator->afl_custom_trim = NULL;
afl->mutator->afl_custom_post_trim = NULL;
WARNF(
"Custom mutator does not implement all three trim APIs, standard "
"trimming will be used.");
@ -158,34 +156,35 @@ void load_custom_mutator(const char* fn) {
}
/* "afl_custom_havoc_mutation", optional */
mutator->afl_custom_havoc_mutation = dlsym(dh, "afl_custom_havoc_mutation");
if (!mutator->afl_custom_havoc_mutation)
afl->mutator->afl_custom_havoc_mutation = dlsym(dh, "afl_custom_havoc_mutation");
if (!afl->mutator->afl_custom_havoc_mutation)
WARNF("Symbol 'afl_custom_havoc_mutation' not found.");
/* "afl_custom_havoc_mutation", optional */
mutator->afl_custom_havoc_mutation_probability =
afl->mutator->afl_custom_havoc_mutation_probability =
dlsym(dh, "afl_custom_havoc_mutation_probability");
if (!mutator->afl_custom_havoc_mutation_probability)
if (!afl->mutator->afl_custom_havoc_mutation_probability)
WARNF("Symbol 'afl_custom_havoc_mutation_probability' not found.");
/* "afl_custom_queue_get", optional */
mutator->afl_custom_queue_get = dlsym(dh, "afl_custom_queue_get");
if (!mutator->afl_custom_queue_get)
afl->mutator->afl_custom_queue_get = dlsym(dh, "afl_custom_queue_get");
if (!afl->mutator->afl_custom_queue_get)
WARNF("Symbol 'afl_custom_queue_get' not found.");
/* "afl_custom_queue_new_entry", optional */
mutator->afl_custom_queue_new_entry = dlsym(dh, "afl_custom_queue_new_entry");
if (!mutator->afl_custom_queue_new_entry)
afl->mutator->afl_custom_queue_new_entry = dlsym(dh, "afl_custom_queue_new_entry");
if (!afl->mutator->afl_custom_queue_new_entry)
WARNF("Symbol 'afl_custom_queue_new_entry' not found");
OKF("Custom mutator '%s' installed successfully.", fn);
/* Initialize the custom mutator */
if (mutator->afl_custom_init) mutator->afl_custom_init(UR(0xFFFFFFFF));
if (afl->mutator->afl_custom_init)
afl->mutator->afl_custom_init(afl, UR(afl, 0xFFFFFFFF));
}
u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
u8 trim_case_custom(afl_state_t *afl, struct queue_entry* q, u8* in_buf) {
static u8 tmp[64];
static u8 clean_trace[MAP_SIZE];
@ -194,18 +193,18 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
u32 trim_exec = 0;
u32 orig_len = q->len;
stage_name = tmp;
bytes_trim_in += q->len;
afl->stage_name = tmp;
afl->bytes_trim_in += q->len;
/* Initialize trimming in the custom mutator */
stage_cur = 0;
stage_max = mutator->afl_custom_init_trim(in_buf, q->len);
afl->stage_cur = 0;
afl->stage_max = afl->mutator->afl_custom_init_trim(afl, in_buf, q->len);
if (not_on_tty && debug)
SAYF("[Custom Trimming] START: Max %d iterations, %u bytes", stage_max,
if (afl->not_on_tty && afl->debug)
SAYF("[Custom Trimming] START: Max %d iterations, %u bytes", afl->stage_max,
q->len);
while (stage_cur < stage_max) {
while (afl->stage_cur < afl->stage_max) {
sprintf(tmp, "ptrim %s", DI(trim_exec));
@ -214,26 +213,26 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
u8* retbuf = NULL;
size_t retlen = 0;
mutator->afl_custom_trim(&retbuf, &retlen);
afl->mutator->afl_custom_trim(afl, &retbuf, &retlen);
if (retlen > orig_len)
FATAL(
"Trimmed data returned by custom mutator is larger than original "
"data");
write_to_testcase(retbuf, retlen);
write_to_testcase(afl, retbuf, retlen);
fault = run_target(argv, exec_tmout);
++trim_execs;
fault = run_target(afl, afl->fsrv.exec_tmout);
++afl->trim_execs;
if (stop_soon || fault == FAULT_ERROR) {
if (afl->stop_soon || fault == FAULT_ERROR) {
free(retbuf);
goto abort_trimming;
}
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
if (cksum == q->exec_cksum) {
@ -246,24 +245,24 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
if (!needs_write) {
needs_write = 1;
memcpy(clean_trace, trace_bits, MAP_SIZE);
memcpy(clean_trace, afl->fsrv.trace_bits, MAP_SIZE);
}
/* Tell the custom mutator that the trimming was successful */
stage_cur = mutator->afl_custom_post_trim(1);
afl->stage_cur = afl->mutator->afl_custom_post_trim(afl, 1);
if (not_on_tty && debug)
if (afl->not_on_tty && afl->debug)
SAYF("[Custom Trimming] SUCCESS: %d/%d iterations (now at %u bytes)",
stage_cur, stage_max, q->len);
afl->stage_cur, afl->stage_max, q->len);
} else {
/* Tell the custom mutator that the trimming was unsuccessful */
stage_cur = mutator->afl_custom_post_trim(0);
if (not_on_tty && debug)
SAYF("[Custom Trimming] FAILURE: %d/%d iterations", stage_cur,
stage_max);
afl->stage_cur = afl->mutator->afl_custom_post_trim(afl, 0);
if (afl->not_on_tty && afl->debug)
SAYF("[Custom Trimming] FAILURE: %d/%d iterations", afl->stage_cur,
afl->stage_max);
}
@ -271,11 +270,11 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
/* Since this can be slow, update the screen every now and then. */
if (!(trim_exec++ % stats_update_freq)) show_stats();
if (!(trim_exec++ % afl->stats_update_freq)) show_stats(afl);
}
if (not_on_tty && debug)
if (afl->not_on_tty && afl->debug)
SAYF("[Custom Trimming] DONE: %u bytes -> %u bytes", orig_len, q->len);
/* If we have made changes to in_buf, we also need to update the on-disk
@ -294,60 +293,65 @@ u8 trim_case_custom(char** argv, struct queue_entry* q, u8* in_buf) {
ck_write(fd, in_buf, q->len, q->fname);
close(fd);
memcpy(trace_bits, clean_trace, MAP_SIZE);
update_bitmap_score(q);
memcpy(afl->fsrv.trace_bits, clean_trace, MAP_SIZE);
update_bitmap_score(afl, q);
}
abort_trimming:
bytes_trim_out += q->len;
afl->bytes_trim_out += q->len;
return fault;
}
#ifdef USE_PYTHON
void load_custom_mutator_py(const char* module_name) {
void load_custom_mutator_py(afl_state_t *afl, const char* module_name) {
mutator = ck_alloc(sizeof(struct custom_mutator));
PyObject **py_functions = afl->py_functions;
mutator->name = module_name;
afl->mutator = ck_alloc(sizeof(struct custom_mutator));
afl->mutator->name = module_name;
ACTF("Loading Python mutator library from '%s'...", module_name);
if (py_functions[PY_FUNC_INIT]) mutator->afl_custom_init = init_py;
if (py_functions[PY_FUNC_INIT])
afl->mutator->afl_custom_init = init_py;
/* "afl_custom_fuzz" should not be NULL, but the interface of Python mutator
is quite different from the custom mutator. */
mutator->afl_custom_fuzz = fuzz_py;
afl->mutator->afl_custom_fuzz = fuzz_py;
if (py_functions[PY_FUNC_PRE_SAVE])
mutator->afl_custom_pre_save = pre_save_py;
afl->mutator->afl_custom_pre_save = pre_save_py;
if (py_functions[PY_FUNC_INIT_TRIM])
mutator->afl_custom_init_trim = init_trim_py;
afl->mutator->afl_custom_init_trim = init_trim_py;
if (py_functions[PY_FUNC_POST_TRIM])
mutator->afl_custom_post_trim = post_trim_py;
afl->mutator->afl_custom_post_trim = post_trim_py;
if (py_functions[PY_FUNC_TRIM]) mutator->afl_custom_trim = trim_py;
if (py_functions[PY_FUNC_TRIM])
afl->mutator->afl_custom_trim = trim_py;
if (py_functions[PY_FUNC_HAVOC_MUTATION])
mutator->afl_custom_havoc_mutation = havoc_mutation_py;
afl->mutator->afl_custom_havoc_mutation = havoc_mutation_py;
if (py_functions[PY_FUNC_HAVOC_MUTATION_PROBABILITY])
mutator->afl_custom_havoc_mutation_probability =
havoc_mutation_probability_py;
afl->mutator->afl_custom_havoc_mutation_probability =
havoc_mutation_probability_py;
if (py_functions[PY_FUNC_QUEUE_GET])
mutator->afl_custom_queue_get = queue_get_py;
afl->mutator->afl_custom_queue_get = queue_get_py;
if (py_functions[PY_FUNC_QUEUE_NEW_ENTRY])
mutator->afl_custom_queue_new_entry = queue_new_entry_py;
afl->mutator->afl_custom_queue_new_entry = queue_new_entry_py;
OKF("Python mutator '%s' installed successfully.", module_name);
/* Initialize the custom mutator */
if (mutator->afl_custom_init) mutator->afl_custom_init(UR(0xFFFFFFFF));
if (afl->mutator->afl_custom_init)
afl->mutator->afl_custom_init(afl, UR(afl, 0xFFFFFFFF));
}

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,7 @@
/* Python stuff */
#ifdef USE_PYTHON
int init_py_module(u8* module_name) {
int init_py_module(afl_state_t *afl, u8* module_name) {
if (!module_name) return 1;
@ -40,14 +40,17 @@ int init_py_module(u8* module_name) {
PyObject* py_name = PyString_FromString(module_name);
#endif
py_module = PyImport_Import(py_name);
afl->py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
if (py_module != NULL) {
PyObject *py_module = afl->py_module;
PyObject **py_functions = afl->py_functions;
if (afl->py_module != NULL) {
u8 py_notrim = 0, py_idx;
py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init");
py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz");
py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(afl->py_module, "init");
py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(afl->py_module, "fuzz");
py_functions[PY_FUNC_PRE_SAVE] =
PyObject_GetAttrString(py_module, "pre_save");
py_functions[PY_FUNC_INIT_TRIM] =
@ -124,15 +127,15 @@ int init_py_module(u8* module_name) {
}
void finalize_py_module() {
void finalize_py_module(afl_state_t *afl) {
if (py_module != NULL) {
if (afl->py_module != NULL) {
u32 i;
for (i = 0; i < PY_FUNC_COUNT; ++i)
Py_XDECREF(py_functions[i]);
Py_XDECREF(afl->py_functions[i]);
Py_DECREF(py_module);
Py_DECREF(afl->py_module);
}
@ -140,8 +143,7 @@ void finalize_py_module() {
}
void init_py(unsigned int seed) {
void init_py(afl_state_t *afl, unsigned int seed) {
PyObject *py_args, *py_value;
/* Provide the init function a seed for the Python RNG */
@ -162,7 +164,7 @@ void init_py(unsigned int seed) {
PyTuple_SetItem(py_args, 0, py_value);
py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_INIT], py_args);
Py_DECREF(py_args);
@ -176,8 +178,8 @@ void init_py(unsigned int seed) {
}
size_t fuzz_py(u8** buf, size_t buf_size, u8* add_buf, size_t add_buf_size,
size_t max_size) {
size_t fuzz_py(afl_state_t *afl, u8** buf, size_t buf_size, u8* add_buf,
size_t add_buf_size, size_t max_size) {
size_t mutated_size;
PyObject *py_args, *py_value;
@ -220,7 +222,7 @@ size_t fuzz_py(u8** buf, size_t buf_size, u8* add_buf, size_t add_buf_size,
PyTuple_SetItem(py_args, 2, py_value);
py_value = PyObject_CallObject(py_functions[PY_FUNC_FUZZ], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_FUZZ], py_args);
Py_DECREF(py_args);
@ -242,7 +244,7 @@ size_t fuzz_py(u8** buf, size_t buf_size, u8* add_buf, size_t add_buf_size,
}
size_t pre_save_py(u8* buf, size_t buf_size, u8** out_buf) {
size_t pre_save_py(afl_state_t *afl, u8* buf, size_t buf_size, u8** out_buf) {
size_t out_buf_size;
PyObject *py_args, *py_value;
@ -257,7 +259,7 @@ size_t pre_save_py(u8* buf, size_t buf_size, u8** out_buf) {
PyTuple_SetItem(py_args, 0, py_value);
py_value = PyObject_CallObject(py_functions[PY_FUNC_PRE_SAVE], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_PRE_SAVE], py_args);
Py_DECREF(py_args);
@ -278,7 +280,7 @@ size_t pre_save_py(u8* buf, size_t buf_size, u8** out_buf) {
}
u32 init_trim_py(u8* buf, size_t buf_size) {
u32 init_trim_py(afl_state_t *afl, u8* buf, size_t buf_size) {
PyObject *py_args, *py_value;
@ -293,7 +295,7 @@ u32 init_trim_py(u8* buf, size_t buf_size) {
PyTuple_SetItem(py_args, 0, py_value);
py_value = PyObject_CallObject(py_functions[PY_FUNC_INIT_TRIM], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_INIT_TRIM], py_args);
Py_DECREF(py_args);
if (py_value != NULL) {
@ -315,7 +317,7 @@ u32 init_trim_py(u8* buf, size_t buf_size) {
}
u32 post_trim_py(u8 success) {
u32 post_trim_py(afl_state_t *afl, u8 success) {
PyObject *py_args, *py_value;
@ -331,7 +333,7 @@ u32 post_trim_py(u8 success) {
PyTuple_SetItem(py_args, 0, py_value);
py_value = PyObject_CallObject(py_functions[PY_FUNC_POST_TRIM], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_POST_TRIM], py_args);
Py_DECREF(py_args);
if (py_value != NULL) {
@ -353,12 +355,12 @@ u32 post_trim_py(u8 success) {
}
void trim_py(u8** out_buf, size_t* out_buf_size) {
void trim_py(afl_state_t *afl, u8** out_buf, size_t* out_buf_size) {
PyObject *py_args, *py_value;
py_args = PyTuple_New(0);
py_value = PyObject_CallObject(py_functions[PY_FUNC_TRIM], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_TRIM], py_args);
Py_DECREF(py_args);
if (py_value != NULL) {
@ -377,7 +379,7 @@ void trim_py(u8** out_buf, size_t* out_buf_size) {
}
size_t havoc_mutation_py(u8** buf, size_t buf_size, size_t max_size) {
size_t havoc_mutation_py(afl_state_t *afl, u8** buf, size_t buf_size, size_t max_size) {
size_t mutated_size;
PyObject *py_args, *py_value;
@ -409,7 +411,7 @@ size_t havoc_mutation_py(u8** buf, size_t buf_size, size_t max_size) {
PyTuple_SetItem(py_args, 1, py_value);
py_value = PyObject_CallObject(py_functions[PY_FUNC_HAVOC_MUTATION], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_HAVOC_MUTATION], py_args);
Py_DECREF(py_args);
@ -432,13 +434,13 @@ size_t havoc_mutation_py(u8** buf, size_t buf_size, size_t max_size) {
}
u8 havoc_mutation_probability_py(void) {
u8 havoc_mutation_probability_py(afl_state_t *afl) {
PyObject *py_args, *py_value;
py_args = PyTuple_New(0);
py_value = PyObject_CallObject(
py_functions[PY_FUNC_HAVOC_MUTATION_PROBABILITY], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_HAVOC_MUTATION_PROBABILITY],
py_args);
Py_DECREF(py_args);
if (py_value != NULL) {
@ -456,7 +458,7 @@ u8 havoc_mutation_probability_py(void) {
}
u8 queue_get_py(const u8* filename) {
u8 queue_get_py(afl_state_t *afl, const u8* filename) {
PyObject *py_args, *py_value;
@ -478,7 +480,7 @@ u8 queue_get_py(const u8* filename) {
PyTuple_SetItem(py_args, 0, py_value);
// Call Python function
py_value = PyObject_CallObject(py_functions[PY_FUNC_QUEUE_GET], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_QUEUE_GET], py_args);
Py_DECREF(py_args);
if (py_value != NULL) {
@ -504,7 +506,7 @@ u8 queue_get_py(const u8* filename) {
}
void queue_new_entry_py(const u8* filename_new_queue,
void queue_new_entry_py(afl_state_t *afl, const u8* filename_new_queue,
const u8* filename_orig_queue) {
PyObject *py_args, *py_value;
@ -547,8 +549,8 @@ void queue_new_entry_py(const u8* filename_new_queue,
PyTuple_SetItem(py_args, 1, py_value);
// Call
py_value =
PyObject_CallObject(py_functions[PY_FUNC_QUEUE_NEW_ENTRY], py_args);
py_value = PyObject_CallObject(afl->py_functions[PY_FUNC_QUEUE_NEW_ENTRY],
py_args);
Py_DECREF(py_args);
if (py_value == NULL) {

View File

@ -28,12 +28,12 @@
.state file to avoid repeating deterministic fuzzing when resuming aborted
scans. */
void mark_as_det_done(struct queue_entry* q) {
void mark_as_det_done(afl_state_t *afl, struct queue_entry* q) {
u8* fn = strrchr(q->fname, '/');
s32 fd;
fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
fn = alloc_printf("%s/queue/.state/deterministic_done/%s", afl->out_dir, fn + 1);
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
@ -48,12 +48,12 @@ void mark_as_det_done(struct queue_entry* q) {
/* Mark as variable. Create symlinks if possible to make it easier to examine
the files. */
void mark_as_variable(struct queue_entry* q) {
void mark_as_variable(afl_state_t *afl, struct queue_entry* q) {
u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
ldest = alloc_printf("../../%s", fn);
fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
fn = alloc_printf("%s/queue/.state/variable_behavior/%s", afl->out_dir, fn);
if (symlink(ldest, fn)) {
@ -73,7 +73,7 @@ void mark_as_variable(struct queue_entry* q) {
/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
but may be useful for post-processing datasets. */
void mark_as_redundant(struct queue_entry* q, u8 state) {
void mark_as_redundant(afl_state_t *afl, struct queue_entry* q, u8 state) {
u8* fn;
@ -82,7 +82,7 @@ void mark_as_redundant(struct queue_entry* q, u8 state) {
q->fs_redundant = state;
fn = strrchr(q->fname, '/');
fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
fn = alloc_printf("%s/queue/.state/redundant_edges/%s", afl->out_dir, fn + 1);
if (state) {
@ -104,49 +104,49 @@ void mark_as_redundant(struct queue_entry* q, u8 state) {
/* Append new test case to the queue. */
void add_to_queue(u8* fname, u32 len, u8 passed_det) {
void add_to_queue(afl_state_t *afl, u8* fname, u32 len, u8 passed_det) {
struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
q->fname = fname;
q->len = len;
q->depth = cur_depth + 1;
q->depth = afl->cur_depth + 1;
q->passed_det = passed_det;
q->n_fuzz = 1;
if (q->depth > max_depth) max_depth = q->depth;
if (q->depth > afl->max_depth) afl->max_depth = q->depth;
if (queue_top) {
if (afl->queue_top) {
queue_top->next = q;
queue_top = q;
afl->queue_top->next = q;
afl->queue_top = q;
} else
q_prev100 = queue = queue_top = q;
afl->q_prev100 = afl->queue = afl->queue_top = q;
++queued_paths;
++pending_not_fuzzed;
++afl->queued_paths;
++afl->pending_not_fuzzed;
cycles_wo_finds = 0;
afl->cycles_wo_finds = 0;
if (!(queued_paths % 100)) {
if (!(afl->queued_paths % 100)) {
q_prev100->next_100 = q;
q_prev100 = q;
afl->q_prev100->next_100 = q;
afl->q_prev100 = q;
}
last_path_time = get_cur_time();
afl->last_path_time = get_cur_time();
if (mutator && mutator->afl_custom_queue_new_entry) {
if (afl->mutator && afl->mutator->afl_custom_queue_new_entry) {
u8* fname_orig = NULL;
/* At the initialization stage, queue_cur is NULL */
if (queue_cur) fname_orig = queue_cur->fname;
if (afl->queue_cur) fname_orig = afl->queue_cur->fname;
mutator->afl_custom_queue_new_entry(fname, fname_orig);
afl->mutator->afl_custom_queue_new_entry(afl, fname, fname_orig);
}
@ -154,9 +154,9 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) {
/* Destroy the entire queue. */
void destroy_queue(void) {
void destroy_queue(afl_state_t *afl) {
struct queue_entry *q = queue, *n;
struct queue_entry *q = afl->queue, *n;
while (q) {
@ -176,28 +176,28 @@ void destroy_queue(void) {
seen in the bitmap so far, and focus on fuzzing them at the expense of
the rest.
The first step of the process is to maintain a list of top_rated[] entries
The first step of the process is to maintain a list of afl->top_rated[] entries
for every byte in the bitmap. We win that slot if there is no previous
contender, or if the contender has a more favorable speed x size factor. */
void update_bitmap_score(struct queue_entry* q) {
void update_bitmap_score(afl_state_t *afl, struct queue_entry* q) {
u32 i;
u64 fav_factor = q->exec_us * q->len;
u64 fuzz_p2 = next_p2(q->n_fuzz);
/* For every byte set in trace_bits[], see if there is a previous winner,
/* For every byte set in afl->fsrv.trace_bits[], see if there is a previous winner,
and how it compares to us. */
for (i = 0; i < MAP_SIZE; ++i)
if (trace_bits[i]) {
if (afl->fsrv.trace_bits[i]) {
if (top_rated[i]) {
if (afl->top_rated[i]) {
/* Faster-executing or smaller test cases are favored. */
u64 top_rated_fuzz_p2 = next_p2(top_rated[i]->n_fuzz);
u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
u64 top_rated_fuzz_p2 = next_p2(afl->top_rated[i]->n_fuzz);
u64 top_rated_fav_factor = afl->top_rated[i]->exec_us * afl->top_rated[i]->len;
if (fuzz_p2 > top_rated_fuzz_p2) {
@ -209,15 +209,15 @@ void update_bitmap_score(struct queue_entry* q) {
}
if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
if (fav_factor > afl->top_rated[i]->exec_us * afl->top_rated[i]->len) continue;
/* Looks like we're going to win. Decrease ref count for the
previous winner, discard its trace_bits[] if necessary. */
previous winner, discard its afl->fsrv.trace_bits[] if necessary. */
if (!--top_rated[i]->tc_ref) {
if (!--afl->top_rated[i]->tc_ref) {
ck_free(top_rated[i]->trace_mini);
top_rated[i]->trace_mini = 0;
ck_free(afl->top_rated[i]->trace_mini);
afl->top_rated[i]->trace_mini = 0;
}
@ -225,44 +225,44 @@ void update_bitmap_score(struct queue_entry* q) {
/* Insert ourselves as the new winner. */
top_rated[i] = q;
afl->top_rated[i] = q;
++q->tc_ref;
if (!q->trace_mini) {
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
minimize_bits(q->trace_mini, trace_bits);
minimize_bits(q->trace_mini, afl->fsrv.trace_bits);
}
score_changed = 1;
afl->score_changed = 1;
}
}
/* The second part of the mechanism discussed above is a routine that
goes over top_rated[] entries, and then sequentially grabs winners for
goes over afl->top_rated[] entries, and then sequentially grabs winners for
previously-unseen bytes (temp_v) and marks them as favored, at least
until the next run. The favored entries are given more air time during
all fuzzing steps. */
void cull_queue(void) {
void cull_queue(afl_state_t *afl) {
struct queue_entry* q;
static u8 temp_v[MAP_SIZE >> 3];
u32 i;
if (dumb_mode || !score_changed) return;
if (afl->dumb_mode || !afl->score_changed) return;
score_changed = 0;
afl->score_changed = 0;
memset(temp_v, 255, MAP_SIZE >> 3);
queued_favored = 0;
pending_favored = 0;
afl->queued_favored = 0;
afl->pending_favored = 0;
q = queue;
q = afl->queue;
while (q) {
@ -272,32 +272,32 @@ void cull_queue(void) {
}
/* Let's see if anything in the bitmap isn't captured in temp_v.
If yes, and if it has a top_rated[] contender, let's use it. */
If yes, and if it has a afl->top_rated[] contender, let's use it. */
for (i = 0; i < MAP_SIZE; ++i)
if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
if (afl->top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
u32 j = MAP_SIZE >> 3;
/* Remove all bits belonging to the current entry from temp_v. */
while (j--)
if (top_rated[i]->trace_mini[j])
temp_v[j] &= ~top_rated[i]->trace_mini[j];
if (afl->top_rated[i]->trace_mini[j])
temp_v[j] &= ~afl->top_rated[i]->trace_mini[j];
top_rated[i]->favored = 1;
++queued_favored;
afl->top_rated[i]->favored = 1;
++afl->queued_favored;
if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed)
++pending_favored;
if (afl->top_rated[i]->fuzz_level == 0 || !afl->top_rated[i]->was_fuzzed)
++afl->pending_favored;
}
q = queue;
q = afl->queue;
while (q) {
mark_as_redundant(q, !q->favored);
mark_as_redundant(afl, q, !q->favored);
q = q->next;
}
@ -308,10 +308,10 @@ void cull_queue(void) {
A helper function for fuzz_one(). Maybe some of these constants should
go into config.h. */
u32 calculate_score(struct queue_entry* q) {
u32 calculate_score(afl_state_t *afl, struct queue_entry* q) {
u32 avg_exec_us = total_cal_us / total_cal_cycles;
u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries;
u32 avg_exec_us = afl->total_cal_us / afl->total_cal_cycles;
u32 avg_bitmap_size = afl->total_bitmap_size / afl->total_bitmap_entries;
u32 perf_score = 100;
/* Adjust score based on execution speed of this path, compared to the
@ -391,7 +391,7 @@ u32 calculate_score(struct queue_entry* q) {
u32 n_paths, fuzz_mu;
u32 factor = 1;
switch (schedule) {
switch (afl->schedule) {
case EXPLORE: break;
@ -401,7 +401,7 @@ u32 calculate_score(struct queue_entry* q) {
fuzz_total = 0;
n_paths = 0;
struct queue_entry* queue_it = queue;
struct queue_entry* queue_it = afl->queue;
while (queue_it) {
fuzz_total += queue_it->n_fuzz;
@ -451,7 +451,7 @@ u32 calculate_score(struct queue_entry* q) {
perf_score *= factor / POWER_BETA;
// MOpt mode
if (limit_time_sig != 0 && max_depth - q->depth < 3)
if (afl->limit_time_sig != 0 && afl->max_depth - q->depth < 3)
perf_score *= 2;
else if (perf_score < 1)
perf_score =
@ -459,7 +459,7 @@ u32 calculate_score(struct queue_entry* q) {
/* Make sure that we don't go over limit. */
if (perf_score > havoc_max_mult * 100) perf_score = havoc_max_mult * 100;
if (perf_score > afl->havoc_max_mult * 100) perf_score = afl->havoc_max_mult * 100;
return perf_score;

View File

@ -27,8 +27,6 @@
#include "afl-fuzz.h"
#include "cmplog.h"
static char** its_argv;
///// Colorization
struct range {
@ -86,24 +84,24 @@ struct range* pop_biggest_range(struct range** ranges) {
}
u8 get_exec_checksum(u8* buf, u32 len, u32* cksum) {
static u8 get_exec_checksum(afl_state_t *afl, u8* buf, u32 len, u32* cksum) {
if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1;
if (unlikely(common_fuzz_stuff(afl, buf, len))) return 1;
*cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
*cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
return 0;
}
static void rand_replace(u8* buf, u32 len) {
static void rand_replace(afl_state_t *afl, u8* buf, u32 len) {
u32 i;
for (i = 0; i < len; ++i)
buf[i] = UR(256);
buf[i] = UR(afl, 256);
}
u8 colorization(u8* buf, u32 len, u32 exec_cksum) {
static u8 colorization(afl_state_t *afl, u8* buf, u32 len, u32 exec_cksum) {
struct range* ranges = add_range(NULL, 0, len);
u8* backup = ck_alloc_nozero(len);
@ -111,24 +109,24 @@ u8 colorization(u8* buf, u32 len, u32 exec_cksum) {
u8 needs_write = 0;
u64 orig_hit_cnt, new_hit_cnt;
orig_hit_cnt = queued_paths + unique_crashes;
orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
stage_name = "colorization";
stage_short = "colorization";
stage_max = 1000;
afl->stage_name = "colorization";
afl->stage_short = "colorization";
afl->stage_max = 1000;
struct range* rng;
stage_cur = 0;
while ((rng = pop_biggest_range(&ranges)) != NULL && stage_cur < stage_max) {
afl->stage_cur = 0;
while ((rng = pop_biggest_range(&ranges)) != NULL && afl->stage_cur < afl->stage_max) {
u32 s = rng->end - rng->start;
if (s == 0) goto empty_range;
memcpy(backup, buf + rng->start, s);
rand_replace(buf + rng->start, s);
rand_replace(afl, buf + rng->start, s);
u32 cksum;
if (unlikely(get_exec_checksum(buf, len, &cksum))) goto checksum_fail;
if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) goto checksum_fail;
if (cksum != exec_cksum) {
@ -142,15 +140,15 @@ u8 colorization(u8* buf, u32 len, u32 exec_cksum) {
empty_range:
ck_free(rng);
++stage_cur;
++afl->stage_cur;
}
if (stage_cur < stage_max) queue_cur->fully_colorized = 1;
if (afl->stage_cur < afl->stage_max) afl->queue_cur->fully_colorized = 1;
new_hit_cnt = queued_paths + unique_crashes;
stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt;
stage_cycles[STAGE_COLORIZATION] += stage_cur;
new_hit_cnt = afl->queued_paths + afl->unique_crashes;
afl->stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt;
afl->stage_cycles[STAGE_COLORIZATION] += afl->stage_cur;
ck_free(backup);
while (ranges) {
@ -167,21 +165,21 @@ u8 colorization(u8* buf, u32 len, u32 exec_cksum) {
s32 fd;
if (no_unlink) {
if (afl->no_unlink) {
fd = open(queue_cur->fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
fd = open(afl->queue_cur->fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
} else {
unlink(queue_cur->fname); /* ignore errors */
fd = open(queue_cur->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
unlink(afl->queue_cur->fname); /* ignore errors */
fd = open(afl->queue_cur->fname, O_WRONLY | O_CREAT | O_EXCL, 0600);
}
if (fd < 0) PFATAL("Unable to create '%s'", queue_cur->fname);
if (fd < 0) PFATAL("Unable to create '%s'", afl->queue_cur->fname);
ck_write(fd, buf, len, queue_cur->fname);
queue_cur->len = len; // no-op, just to be 100% safe
ck_write(fd, buf, len, afl->queue_cur->fname);
afl->queue_cur->len = len; // no-op, just to be 100% safe
close(fd);
@ -206,15 +204,15 @@ checksum_fail:
///// Input to State replacement
u8 its_fuzz(u8* buf, u32 len, u8* status) {
static u8 its_fuzz(afl_state_t *afl, u8* buf, u32 len, u8* status) {
u64 orig_hit_cnt, new_hit_cnt;
orig_hit_cnt = queued_paths + unique_crashes;
orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1;
if (unlikely(common_fuzz_stuff(afl, buf, len))) return 1;
new_hit_cnt = queued_paths + unique_crashes;
new_hit_cnt = afl->queued_paths + afl->unique_crashes;
if (unlikely(new_hit_cnt != orig_hit_cnt))
*status = 1;
@ -225,7 +223,7 @@ u8 its_fuzz(u8* buf, u32 len, u8* status) {
}
u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
u8* orig_buf, u8* buf, u32 len, u8 do_reverse,
u8* status) {
@ -246,14 +244,14 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
if (its_len >= 8 && *buf_64 == pattern) { // && *o_buf_64 == pattern) {
*buf_64 = repl;
if (unlikely(its_fuzz(buf, len, status))) return 1;
if (unlikely(its_fuzz(afl, buf, len, status))) return 1;
*buf_64 = pattern;
}
// reverse encoding
if (do_reverse)
if (unlikely(cmp_extend_encoding(h, SWAP64(pattern), SWAP64(repl), idx,
if (unlikely(cmp_extend_encoding(afl, h, SWAP64(pattern), SWAP64(repl), idx,
orig_buf, buf, len, 0, status)))
return 1;
@ -265,14 +263,14 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
*buf_32 == (u32)pattern) { // && *o_buf_32 == (u32)pattern) {
*buf_32 = (u32)repl;
if (unlikely(its_fuzz(buf, len, status))) return 1;
if (unlikely(its_fuzz(afl, buf, len, status))) return 1;
*buf_32 = pattern;
}
// reverse encoding
if (do_reverse)
if (unlikely(cmp_extend_encoding(h, SWAP32(pattern), SWAP32(repl), idx,
if (unlikely(cmp_extend_encoding(afl, h, SWAP32(pattern), SWAP32(repl), idx,
orig_buf, buf, len, 0, status)))
return 1;
@ -284,14 +282,14 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
*buf_16 == (u16)pattern) { // && *o_buf_16 == (u16)pattern) {
*buf_16 = (u16)repl;
if (unlikely(its_fuzz(buf, len, status))) return 1;
if (unlikely(its_fuzz(afl, buf, len, status))) return 1;
*buf_16 = (u16)pattern;
}
// reverse encoding
if (do_reverse)
if (unlikely(cmp_extend_encoding(h, SWAP16(pattern), SWAP16(repl), idx,
if (unlikely(cmp_extend_encoding(afl, h, SWAP16(pattern), SWAP16(repl), idx,
orig_buf, buf, len, 0, status)))
return 1;
@ -302,7 +300,7 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
if (its_len >= 2 && *buf_8 == (u8)pattern) {// && *o_buf_8 == (u8)pattern) {
*buf_8 = (u8)repl;
if (unlikely(its_fuzz(buf, len, status)))
if (unlikely(its_fuzz(afl, buf, len, status)))
return 1;
*buf_16 = (u16)pattern;
@ -314,7 +312,7 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
}
void try_to_add_to_dict(u64 v, u8 shape) {
static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
u8* b = (u8*)&v;
@ -333,7 +331,7 @@ void try_to_add_to_dict(u64 v, u8 shape) {
}
maybe_add_auto((u8*)&v, shape);
maybe_add_auto(afl, (u8*)&v, shape);
u64 rev;
switch (shape) {
@ -341,24 +339,24 @@ void try_to_add_to_dict(u64 v, u8 shape) {
case 1: break;
case 2:
rev = SWAP16((u16)v);
maybe_add_auto((u8*)&rev, shape);
maybe_add_auto(afl, (u8*)&rev, shape);
break;
case 4:
rev = SWAP32((u32)v);
maybe_add_auto((u8*)&rev, shape);
maybe_add_auto(afl, (u8*)&rev, shape);
break;
case 8:
rev = SWAP64(v);
maybe_add_auto((u8*)&rev, shape);
maybe_add_auto(afl, (u8*)&rev, shape);
break;
}
}
u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
struct cmp_header* h = &cmp_map->headers[key];
struct cmp_header* h = &afl->shm.cmp_map->headers[key];
u32 i, j, idx;
u32 loggeds = h->hits;
@ -370,16 +368,16 @@ u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
for (i = 0; i < loggeds; ++i) {
struct cmp_operands* o = &cmp_map->log[key][i];
struct cmp_operands* o = &afl->shm.cmp_map->log[key][i];
// opt not in the paper
for (j = 0; j < i; ++j)
if (cmp_map->log[key][j].v0 == o->v0 && cmp_map->log[key][i].v1 == o->v1)
if (afl->shm.cmp_map->log[key][j].v0 == o->v0 && afl->shm.cmp_map->log[key][i].v1 == o->v1)
goto cmp_fuzz_next_iter;
for (idx = 0; idx < len && fails < 8; ++idx) {
if (unlikely(cmp_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len,
if (unlikely(cmp_extend_encoding(afl, h, o->v0, o->v1, idx, orig_buf, buf, len,
1, &status)))
return 1;
if (status == 2)
@ -387,7 +385,7 @@ u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
else if (status == 1)
break;
if (unlikely(cmp_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len,
if (unlikely(cmp_extend_encoding(afl, h, o->v1, o->v0, idx, orig_buf, buf, len,
1, &status)))
return 1;
if (status == 2)
@ -400,13 +398,13 @@ u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
// If failed, add to dictionary
if (fails == 8) {
try_to_add_to_dict(o->v0, SHAPE_BYTES(h->shape));
try_to_add_to_dict(o->v1, SHAPE_BYTES(h->shape));
try_to_add_to_dict(afl, o->v0, SHAPE_BYTES(h->shape));
try_to_add_to_dict(afl, o->v1, SHAPE_BYTES(h->shape));
}
cmp_fuzz_next_iter:
stage_cur++;
afl->stage_cur++;
}
@ -414,7 +412,7 @@ u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
}
u8 rtn_extend_encoding(struct cmp_header* h, u8* pattern, u8* repl, u32 idx,
static u8 rtn_extend_encoding(afl_state_t *afl, struct cmp_header* h, u8* pattern, u8* repl, u32 idx,
u8* orig_buf, u8* buf, u32 len, u8* status) {
u32 i;
@ -430,7 +428,7 @@ u8 rtn_extend_encoding(struct cmp_header* h, u8* pattern, u8* repl, u32 idx,
if (pattern[idx + i] != buf[idx + i] || *status == 1) break;
buf[idx + i] = repl[idx + i];
if (unlikely(its_fuzz(buf, len, status))) return 1;
if (unlikely(its_fuzz(afl, buf, len, status))) return 1;
}
@ -439,9 +437,9 @@ u8 rtn_extend_encoding(struct cmp_header* h, u8* pattern, u8* repl, u32 idx,
}
u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8* orig_buf, u8* buf, u32 len) {
struct cmp_header* h = &cmp_map->headers[key];
struct cmp_header* h = &afl->shm.cmp_map->headers[key];
u32 i, j, idx;
u32 loggeds = h->hits;
@ -453,17 +451,17 @@ u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
for (i = 0; i < loggeds; ++i) {
struct cmpfn_operands* o = &((struct cmpfn_operands*)cmp_map->log[key])[i];
struct cmpfn_operands* o = &((struct cmpfn_operands*)afl->shm.cmp_map->log[key])[i];
// opt not in the paper
for (j = 0; j < i; ++j)
if (!memcmp(&((struct cmpfn_operands*)cmp_map->log[key])[j], o,
if (!memcmp(&((struct cmpfn_operands*)afl->shm.cmp_map->log[key])[j], o,
sizeof(struct cmpfn_operands)))
goto rtn_fuzz_next_iter;
for (idx = 0; idx < len && fails < 8; ++idx) {
if (unlikely(rtn_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len,
if (unlikely(rtn_extend_encoding(afl, h, o->v0, o->v1, idx, orig_buf, buf, len,
&status)))
return 1;
if (status == 2)
@ -471,7 +469,7 @@ u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
else if (status == 1)
break;
if (unlikely(rtn_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len,
if (unlikely(rtn_extend_encoding(afl, h, o->v1, o->v0, idx, orig_buf, buf, len,
&status)))
return 1;
if (status == 2)
@ -484,13 +482,13 @@ u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
// If failed, add to dictionary
if (fails == 8) {
maybe_add_auto(o->v0, SHAPE_BYTES(h->shape));
maybe_add_auto(o->v1, SHAPE_BYTES(h->shape));
maybe_add_auto(afl, o->v0, SHAPE_BYTES(h->shape));
maybe_add_auto(afl, o->v1, SHAPE_BYTES(h->shape));
}
rtn_fuzz_next_iter:
stage_cur++;
afl->stage_cur++;
}
@ -500,51 +498,50 @@ u8 rtn_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
///// Input to State stage
// queue_cur->exec_cksum
u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len,
// afl->queue_cur->exec_cksum
u8 input_to_state_stage(afl_state_t *afl, u8* orig_buf, u8* buf, u32 len,
u32 exec_cksum) {
u8 r = 1;
its_argv = argv;
if (unlikely(colorization(buf, len, exec_cksum))) return 1;
if (unlikely(colorization(afl, buf, len, exec_cksum))) return 1;
// do it manually, forkserver clear only trace_bits
memset(cmp_map->headers, 0, sizeof(cmp_map->headers));
// do it manually, forkserver clear only afl->fsrv.trace_bits
memset(afl->shm.cmp_map->headers, 0, sizeof(afl->shm.cmp_map->headers));
if (unlikely(common_fuzz_cmplog_stuff(argv, buf, len))) return 1;
if (unlikely(common_fuzz_cmplog_stuff(afl, buf, len))) return 1;
u64 orig_hit_cnt, new_hit_cnt;
u64 orig_execs = total_execs;
orig_hit_cnt = queued_paths + unique_crashes;
u64 orig_execs = afl->total_execs;
orig_hit_cnt = afl->queued_paths + afl->unique_crashes;
stage_name = "input-to-state";
stage_short = "its";
stage_max = 0;
stage_cur = 0;
afl->stage_name = "input-to-state";
afl->stage_short = "its";
afl->stage_max = 0;
afl->stage_cur = 0;
u32 k;
for (k = 0; k < CMP_MAP_W; ++k) {
if (!cmp_map->headers[k].hits) continue;
if (cmp_map->headers[k].type == CMP_TYPE_INS)
stage_max += MIN(cmp_map->headers[k].hits, CMP_MAP_H);
if (!afl->shm.cmp_map->headers[k].hits) continue;
if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS)
afl->stage_max += MIN(afl->shm.cmp_map->headers[k].hits, CMP_MAP_H);
else
stage_max += MIN(cmp_map->headers[k].hits, CMP_MAP_RTN_H);
afl->stage_max += MIN(afl->shm.cmp_map->headers[k].hits, CMP_MAP_RTN_H);
}
for (k = 0; k < CMP_MAP_W; ++k) {
if (!cmp_map->headers[k].hits) continue;
if (!afl->shm.cmp_map->headers[k].hits) continue;
if (cmp_map->headers[k].type == CMP_TYPE_INS) {
if (afl->shm.cmp_map->headers[k].type == CMP_TYPE_INS) {
if (unlikely(cmp_fuzz(k, orig_buf, buf, len))) goto exit_its;
if (unlikely(cmp_fuzz(afl, k, orig_buf, buf, len))) goto exit_its;
} else {
if (unlikely(rtn_fuzz(k, orig_buf, buf, len))) goto exit_its;
if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, len))) goto exit_its;
}
@ -555,9 +552,9 @@ u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len,
exit_its:
memcpy(orig_buf, buf, len);
new_hit_cnt = queued_paths + unique_crashes;
stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt;
stage_cycles[STAGE_ITS] += total_execs - orig_execs;
new_hit_cnt = afl->queued_paths + afl->unique_crashes;
afl->stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt;
afl->stage_cycles[STAGE_ITS] += afl->total_execs - orig_execs;
return r;

View File

@ -26,9 +26,9 @@
#include "afl-fuzz.h"
/* Execute target application, monitoring for timeouts. Return status
information. The called program will update trace_bits[]. */
information. The called program will update afl->fsrv.trace_bits[]. */
u8 run_target(char** argv, u32 timeout) {
u8 run_target(afl_state_t *afl, u32 timeout) {
static struct itimerval it;
static u32 prev_timed_out = 0;
@ -37,13 +37,13 @@ u8 run_target(char** argv, u32 timeout) {
int status = 0;
u32 tb4;
child_timed_out = 0;
afl->fsrv.child_timed_out = 0;
/* After this memset, trace_bits[] are effectively volatile, so we
/* After this memset, afl->fsrv.trace_bits[] are effectively volatile, so we
must prevent any earlier operations from venturing into that
territory. */
memset(trace_bits, 0, MAP_SIZE);
memset(afl->fsrv.trace_bits, 0, MAP_SIZE);
MEM_BARRIER();
/* If we're running in "dumb" mode, we can't rely on the fork server
@ -51,19 +51,19 @@ u8 run_target(char** argv, u32 timeout) {
execve(). There is a bit of code duplication between here and
init_forkserver(), but c'est la vie. */
if (dumb_mode == 1 || no_forkserver) {
if (afl->dumb_mode == 1 || afl->no_forkserver) {
child_pid = fork();
afl->fsrv.child_pid = fork();
if (child_pid < 0) PFATAL("fork() failed");
if (afl->fsrv.child_pid < 0) PFATAL("fork() failed");
if (!child_pid) {
if (!afl->fsrv.child_pid) {
struct rlimit r;
if (mem_limit) {
if (afl->fsrv.mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
r.rlim_max = r.rlim_cur = ((rlim_t)afl->fsrv.mem_limit) << 20;
#ifdef RLIMIT_AS
@ -81,33 +81,33 @@ u8 run_target(char** argv, u32 timeout) {
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
/* Isolate the process and configure standard descriptors. If out_file is
specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */
/* Isolate the process and configure standard descriptors. If afl->fsrv.out_file is
specified, stdin is /dev/null; otherwise, afl->fsrv.out_fd is cloned instead. */
setsid();
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
dup2(afl->fsrv.dev_null_fd, 1);
dup2(afl->fsrv.dev_null_fd, 2);
if (out_file) {
if (afl->fsrv.out_file) {
dup2(dev_null_fd, 0);
dup2(afl->fsrv.dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
dup2(afl->fsrv.out_fd, 0);
close(afl->fsrv.out_fd);
}
/* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */
close(dev_null_fd);
close(out_dir_fd);
close(afl->fsrv.dev_null_fd);
close(afl->fsrv.out_dir_fd);
#ifndef HAVE_ARC4RANDOM
close(dev_urandom_fd);
close(afl->fsrv.dev_urandom_fd);
#endif
close(fileno(plot_file));
close(fileno(afl->fsrv.plot_file));
/* Set sane defaults for ASAN if nothing else specified. */
@ -122,12 +122,12 @@ u8 run_target(char** argv, u32 timeout) {
"symbolize=0:"
"msan_track_origins=0", 0);
execv(target_path, argv);
execv(afl->fsrv.target_path, afl->argv);
/* Use a distinctive bitmap value to tell the parent about execv()
falling through. */
*(u32*)trace_bits = EXEC_FAIL_SIG;
*(u32*)afl->fsrv.trace_bits = EXEC_FAIL_SIG;
exit(0);
}
@ -139,21 +139,21 @@ u8 run_target(char** argv, u32 timeout) {
/* In non-dumb mode, we have the fork server up and running, so simply
tell it to have at it, and then read back PID. */
if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if ((res = write(afl->fsrv.fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if (stop_soon) return 0;
if (afl->stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
if ((res = read(afl->fsrv.fsrv_st_fd, &afl->fsrv.child_pid, 4)) != 4) {
if (stop_soon) return 0;
if (afl->stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
if (afl->fsrv.child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
}
@ -165,19 +165,19 @@ u8 run_target(char** argv, u32 timeout) {
setitimer(ITIMER_REAL, &it, NULL);
/* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */
/* The SIGALRM handler simply kills the afl->fsrv.child_pid and sets afl->fsrv.child_timed_out. */
if (dumb_mode == 1 || no_forkserver) {
if (afl->dumb_mode == 1 || afl->no_forkserver) {
if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
if (waitpid(afl->fsrv.child_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
} else {
s32 res;
if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
if ((res = read(afl->fsrv.fsrv_st_fd, &status, 4)) != 4) {
if (stop_soon) return 0;
if (afl->stop_soon) return 0;
SAYF(
"\n" cLRD "[-] " cRST
"Unable to communicate with fork server. Some possible reasons:\n\n"
@ -196,50 +196,50 @@ u8 run_target(char** argv, u32 timeout) {
"\n\n"
"If all else fails you can disable the fork server via "
"AFL_NO_FORKSRV=1.\n",
mem_limit);
afl->fsrv.mem_limit);
RPFATAL(res, "Unable to communicate with fork server");
}
}
if (!WIFSTOPPED(status)) child_pid = 0;
if (!WIFSTOPPED(status)) afl->fsrv.child_pid = 0;
getitimer(ITIMER_REAL, &it);
exec_ms =
(u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms;
if (afl->slowest_exec_ms < exec_ms) afl->slowest_exec_ms = exec_ms;
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
setitimer(ITIMER_REAL, &it, NULL);
++total_execs;
++afl->total_execs;
/* Any subsequent operations on trace_bits must not be moved by the
compiler below this point. Past this location, trace_bits[] behave
/* Any subsequent operations on afl->fsrv.trace_bits must not be moved by the
compiler below this point. Past this location, afl->fsrv.trace_bits[] behave
very normally and do not have to be treated as volatile. */
MEM_BARRIER();
tb4 = *(u32*)trace_bits;
tb4 = *(u32*)afl->fsrv.trace_bits;
#ifdef WORD_SIZE_64
classify_counts((u64*)trace_bits);
classify_counts((u64*)afl->fsrv.trace_bits);
#else
classify_counts((u32*)trace_bits);
classify_counts((u32*)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
prev_timed_out = child_timed_out;
prev_timed_out = afl->fsrv.child_timed_out;
/* Report outcome to caller. */
if (WIFSIGNALED(status) && !stop_soon) {
if (WIFSIGNALED(status) && !afl->stop_soon) {
kill_signal = WTERMSIG(status);
afl->kill_signal = WTERMSIG(status);
if (child_timed_out && kill_signal == SIGKILL) return FAULT_TMOUT;
if (afl->fsrv.child_timed_out && afl->kill_signal == SIGKILL) return FAULT_TMOUT;
return FAULT_CRASH;
@ -248,31 +248,31 @@ u8 run_target(char** argv, u32 timeout) {
/* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and
must use a special exit code. */
if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
if (afl->fsrv.uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
kill_signal = 0;
afl->kill_signal = 0;
return FAULT_CRASH;
}
if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
if ((afl->dumb_mode == 1 || afl->no_forkserver) && tb4 == EXEC_FAIL_SIG)
return FAULT_ERROR;
return FAULT_NONE;
}
/* Write modified data to file for testing. If out_file is set, the old file
is unlinked and a new one is created. Otherwise, out_fd is rewound and
/* Write modified data to file for testing. If afl->fsrv.out_file is set, the old file
is unlinked and a new one is created. Otherwise, afl->fsrv.out_fd is rewound and
truncated. */
void write_to_testcase(void* mem, u32 len) {
void write_to_testcase(afl_state_t *afl, void* mem, u32 len) {
s32 fd = out_fd;
s32 fd = afl->fsrv.out_fd;
#ifdef _AFL_DOCUMENT_MUTATIONS
s32 doc_fd;
char* fn = alloc_printf("%s/mutations/%09u:%s", out_dir, document_counter++,
char* fn = alloc_printf("%s/mutations/%09u:%s", afl->out_dir, afl->document_counter++,
describe_op(0));
if (fn != NULL) {
@ -290,39 +290,39 @@ void write_to_testcase(void* mem, u32 len) {
#endif
if (out_file) {
if (afl->fsrv.out_file) {
if (no_unlink) {
if (afl->no_unlink) {
fd = open(out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
} else {
unlink(out_file); /* Ignore errors. */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
unlink(afl->fsrv.out_file); /* Ignore errors. */
fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
}
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
if (fd < 0) PFATAL("Unable to create '%s'", afl->fsrv.out_file);
} else
lseek(fd, 0, SEEK_SET);
if (mutator && mutator->afl_custom_pre_save) {
if (afl->mutator && afl->mutator->afl_custom_pre_save) {
u8* new_data;
size_t new_size = mutator->afl_custom_pre_save(mem, len, &new_data);
ck_write(fd, new_data, new_size, out_file);
size_t new_size = afl->mutator->afl_custom_pre_save(afl, mem, len, &new_data);
ck_write(fd, new_data, new_size, afl->fsrv.out_file);
ck_free(new_data);
} else {
ck_write(fd, mem, len, out_file);
ck_write(fd, mem, len, afl->fsrv.out_file);
}
if (!out_file) {
if (!afl->fsrv.out_file) {
if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
@ -335,36 +335,36 @@ void write_to_testcase(void* mem, u32 len) {
/* The same, but with an adjustable gap. Used for trimming. */
void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
static void write_with_gap(afl_state_t *afl, void* mem, u32 len, u32 skip_at, u32 skip_len) {
s32 fd = out_fd;
s32 fd = afl->fsrv.out_fd;
u32 tail_len = len - skip_at - skip_len;
if (out_file) {
if (afl->fsrv.out_file) {
if (no_unlink) {
if (afl->no_unlink) {
fd = open(out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_TRUNC, 0600);
} else {
unlink(out_file); /* Ignore errors. */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
unlink(afl->fsrv.out_file); /* Ignore errors. */
fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
}
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
if (fd < 0) PFATAL("Unable to create '%s'", afl->fsrv.out_file);
} else
lseek(fd, 0, SEEK_SET);
if (skip_at) ck_write(fd, mem, skip_at, out_file);
if (skip_at) ck_write(fd, mem, skip_at, afl->fsrv.out_file);
u8* memu8 = mem;
if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, out_file);
if (tail_len) ck_write(fd, memu8 + skip_at + skip_len, tail_len, afl->fsrv.out_file);
if (!out_file) {
if (!afl->fsrv.out_file) {
if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
@ -379,7 +379,7 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
to warn about flaky or otherwise problematic test cases early on; and when
new paths are discovered to detect variable behavior and so on. */
u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
u8 calibrate_case(afl_state_t *afl, struct queue_entry* q, u8* use_mem, u32 handicap,
u8 from_queue) {
static u8 first_trace[MAP_SIZE];
@ -389,61 +389,61 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
u64 start_us, stop_us;
s32 old_sc = stage_cur, old_sm = stage_max;
u32 use_tmout = exec_tmout;
u8* old_sn = stage_name;
s32 old_sc = afl->stage_cur, old_sm = afl->stage_max;
u32 use_tmout = afl->fsrv.exec_tmout;
u8* old_sn = afl->stage_name;
/* Be a bit more generous about timeouts when resuming sessions, or when
trying to calibrate already-added finds. This helps avoid trouble due
to intermittent latency. */
if (!from_queue || resuming_fuzz)
if (!from_queue || afl->resuming_fuzz)
use_tmout =
MAX(exec_tmout + CAL_TMOUT_ADD, exec_tmout * CAL_TMOUT_PERC / 100);
MAX(afl->fsrv.exec_tmout + CAL_TMOUT_ADD, afl->fsrv.exec_tmout * CAL_TMOUT_PERC / 100);
++q->cal_failed;
stage_name = "calibration";
stage_max = fast_cal ? 3 : CAL_CYCLES;
afl->stage_name = "calibration";
afl->stage_max = afl->fast_cal ? 3 : CAL_CYCLES;
/* Make sure the forkserver is up before we do anything, and let's not
count its spin-up time toward binary calibration. */
if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) init_forkserver(argv);
if (dumb_mode != 1 && !no_forkserver && !cmplog_forksrv_pid && cmplog_mode)
init_cmplog_forkserver(argv);
if (afl->dumb_mode != 1 && !afl->no_forkserver && !afl->fsrv.fsrv_pid) afl_fsrv_start(&afl->fsrv, afl->argv);
if (afl->dumb_mode != 1 && !afl->no_forkserver && !afl->cmplog_fsrv_pid && afl->shm.cmplog_mode)
init_cmplog_forkserver(afl);
if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
if (q->exec_cksum) memcpy(first_trace, afl->fsrv.trace_bits, MAP_SIZE);
start_us = get_cur_time_us();
for (stage_cur = 0; stage_cur < stage_max; ++stage_cur) {
for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
u32 cksum;
if (!first_run && !(stage_cur % stats_update_freq)) show_stats();
if (!first_run && !(afl->stage_cur % afl->stats_update_freq)) show_stats(afl);
write_to_testcase(use_mem, q->len);
write_to_testcase(afl, use_mem, q->len);
fault = run_target(argv, use_tmout);
fault = run_target(afl, use_tmout);
/* stop_soon is set by the handler for Ctrl+C. When it's pressed,
/* afl->stop_soon is set by the handler for Ctrl+C. When it's pressed,
we want to bail out quickly. */
if (stop_soon || fault != crash_mode) goto abort_calibration;
if (afl->stop_soon || fault != afl->crash_mode) goto abort_calibration;
if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
if (!afl->dumb_mode && !afl->stage_cur && !count_bytes(afl->fsrv.trace_bits)) {
fault = FAULT_NOINST;
goto abort_calibration;
}
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
if (q->exec_cksum != cksum) {
u8 hnb = has_new_bits(virgin_bits);
u8 hnb = has_new_bits(afl, afl->virgin_bits);
if (hnb > new_bits) new_bits = hnb;
if (q->exec_cksum) {
@ -452,10 +452,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
for (i = 0; i < MAP_SIZE; ++i) {
if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
if (!afl->var_bytes[i] && first_trace[i] != afl->fsrv.trace_bits[i]) {
var_bytes[i] = 1;
stage_max = CAL_CYCLES_LONG;
afl->var_bytes[i] = 1;
afl->stage_max = CAL_CYCLES_LONG;
}
@ -466,7 +466,7 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
} else {
q->exec_cksum = cksum;
memcpy(first_trace, trace_bits, MAP_SIZE);
memcpy(first_trace, afl->fsrv.trace_bits, MAP_SIZE);
}
@ -476,34 +476,34 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
stop_us = get_cur_time_us();
total_cal_us += stop_us - start_us;
total_cal_cycles += stage_max;
afl->total_cal_us += stop_us - start_us;
afl->total_cal_cycles += afl->stage_max;
/* OK, let's collect some stats about the performance of this test case.
This is used for fuzzing air time calculations in calculate_score(). */
q->exec_us = (stop_us - start_us) / stage_max;
q->bitmap_size = count_bytes(trace_bits);
q->exec_us = (stop_us - start_us) / afl->stage_max;
q->bitmap_size = count_bytes(afl->fsrv.trace_bits);
q->handicap = handicap;
q->cal_failed = 0;
total_bitmap_size += q->bitmap_size;
++total_bitmap_entries;
afl->total_bitmap_size += q->bitmap_size;
++afl->total_bitmap_entries;
update_bitmap_score(q);
update_bitmap_score(afl, q);
/* If this case didn't result in new output from the instrumentation, tell
parent. This is a non-critical problem, but something to warn the user
about. */
if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
if (!afl->dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS;
abort_calibration:
if (new_bits == 2 && !q->has_new_cov) {
q->has_new_cov = 1;
++queued_with_cov;
++afl->queued_with_cov;
}
@ -511,22 +511,22 @@ abort_calibration:
if (var_detected) {
var_byte_count = count_bytes(var_bytes);
afl->var_byte_count = count_bytes(afl->var_bytes);
if (!q->var_behavior) {
mark_as_variable(q);
++queued_variable;
mark_as_variable(afl, q);
++afl->queued_variable;
}
}
stage_name = old_sn;
stage_cur = old_sc;
stage_max = old_sm;
afl->stage_name = old_sn;
afl->stage_cur = old_sc;
afl->stage_max = old_sm;
if (!first_run) show_stats();
if (!first_run) show_stats(afl);
return fault;
@ -534,17 +534,17 @@ abort_calibration:
/* Grab interesting test cases from other fuzzers. */
void sync_fuzzers(char** argv) {
void sync_fuzzers(afl_state_t *afl) {
DIR* sd;
struct dirent* sd_ent;
u32 sync_cnt = 0;
sd = opendir(sync_dir);
if (!sd) PFATAL("Unable to open '%s'", sync_dir);
sd = opendir(afl->sync_dir);
if (!sd) PFATAL("Unable to open '%s'", afl->sync_dir);
stage_max = stage_cur = 0;
cur_depth = 0;
afl->stage_max = afl->stage_cur = 0;
afl->cur_depth = 0;
/* Look at the entries created for every other fuzzer in the sync directory.
*/
@ -562,11 +562,11 @@ void sync_fuzzers(char** argv) {
/* Skip dot files and our own output directory. */
if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue;
if (sd_ent->d_name[0] == '.' || !strcmp(afl->sync_id, sd_ent->d_name)) continue;
/* Skip anything that doesn't have a queue/ subdirectory. */
qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name);
qd_path = alloc_printf("%s/%s/queue", afl->sync_dir, sd_ent->d_name);
if (!(qd = opendir(qd_path))) {
@ -577,7 +577,7 @@ void sync_fuzzers(char** argv) {
/* Retrieve the ID of the last seen test case. */
qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name);
qd_synced_path = alloc_printf("%s/.synced/%s", afl->out_dir, sd_ent->d_name);
id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600);
@ -590,9 +590,9 @@ void sync_fuzzers(char** argv) {
/* Show stats */
sprintf(stage_tmp, "sync %u", ++sync_cnt);
stage_name = stage_tmp;
stage_cur = 0;
stage_max = 0;
afl->stage_name = stage_tmp;
afl->stage_cur = 0;
afl->stage_max = 0;
/* For every file queued by this fuzzer, parse ID and see if we have looked
at it before; exec a test case if not. */
@ -604,13 +604,13 @@ void sync_fuzzers(char** argv) {
struct stat st;
if (qd_ent->d_name[0] == '.' ||
sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 ||
syncing_case < min_accept)
sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &afl->syncing_case) != 1 ||
afl->syncing_case < min_accept)
continue;
/* OK, sounds like a new one. Let's give it a try. */
if (syncing_case >= next_min_accept) next_min_accept = syncing_case + 1;
if (afl->syncing_case >= next_min_accept) next_min_accept = afl->syncing_case + 1;
path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
@ -639,19 +639,19 @@ void sync_fuzzers(char** argv) {
/* See what happens. We rely on save_if_interesting() to catch major
errors and save the test case. */
write_to_testcase(mem, st.st_size);
write_to_testcase(afl, mem, st.st_size);
fault = run_target(argv, exec_tmout);
fault = run_target(afl, afl->fsrv.exec_tmout);
if (stop_soon) goto close_sync;
if (afl->stop_soon) goto close_sync;
syncing_party = sd_ent->d_name;
queued_imported += save_if_interesting(argv, mem, st.st_size, fault);
syncing_party = 0;
afl->syncing_party = sd_ent->d_name;
afl->queued_imported += save_if_interesting(afl, mem, st.st_size, fault);
afl->syncing_party = 0;
munmap(mem, st.st_size);
if (!(stage_cur++ % stats_update_freq)) show_stats();
if (!(afl->stage_cur++ % afl->stats_update_freq)) show_stats(afl);
}
@ -678,11 +678,11 @@ void sync_fuzzers(char** argv) {
trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
file size, to keep the stage short and sweet. */
u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
u8 trim_case(afl_state_t *afl, struct queue_entry* q, u8* in_buf) {
/* Custom mutator trimmer */
if (mutator && mutator->afl_custom_trim)
return trim_case_custom(argv, q, in_buf);
if (afl->mutator && afl->mutator->afl_custom_trim)
return trim_case_custom(afl, q, in_buf);
static u8 tmp[64];
static u8 clean_trace[MAP_SIZE];
@ -698,8 +698,8 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
if (q->len < 5) return 0;
stage_name = tmp;
bytes_trim_in += q->len;
afl->stage_name = tmp;
afl->bytes_trim_in += q->len;
/* Select initial chunk len, starting with large steps. */
@ -716,24 +716,24 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len));
stage_cur = 0;
stage_max = q->len / remove_len;
afl->stage_cur = 0;
afl->stage_max = q->len / remove_len;
while (remove_pos < q->len) {
u32 trim_avail = MIN(remove_len, q->len - remove_pos);
u32 cksum;
write_with_gap(in_buf, q->len, remove_pos, trim_avail);
write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
fault = run_target(argv, exec_tmout);
++trim_execs;
fault = run_target(afl, afl->fsrv.exec_tmout);
++afl->trim_execs;
if (stop_soon || fault == FAULT_ERROR) goto abort_trimming;
if (afl->stop_soon || fault == FAULT_ERROR) goto abort_trimming;
/* Note that we don't keep track of crashes or hangs here; maybe TODO? */
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
/* If the deletion had no impact on the trace, make it permanent. This
isn't perfect for variable-path inputs, but we're just making a
@ -756,7 +756,7 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
if (!needs_write) {
needs_write = 1;
memcpy(clean_trace, trace_bits, MAP_SIZE);
memcpy(clean_trace, afl->fsrv.trace_bits, MAP_SIZE);
}
@ -766,8 +766,8 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
/* Since this can be slow, update the screen every now and then. */
if (!(trim_exec++ % stats_update_freq)) show_stats();
++stage_cur;
if (!(trim_exec++ % afl->stats_update_freq)) show_stats(afl);
++afl->stage_cur;
}
@ -782,7 +782,7 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
s32 fd;
if (no_unlink) {
if (afl->no_unlink) {
fd = open(q->fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
@ -798,14 +798,14 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
ck_write(fd, in_buf, q->len, q->fname);
close(fd);
memcpy(trace_bits, clean_trace, MAP_SIZE);
update_bitmap_score(q);
memcpy(afl->fsrv.trace_bits, clean_trace, MAP_SIZE);
update_bitmap_score(afl, q);
}
abort_trimming:
bytes_trim_out += q->len;
afl->bytes_trim_out += q->len;
return fault;
}
@ -814,53 +814,53 @@ abort_trimming:
error conditions, returning 1 if it's time to bail out. This is
a helper function for fuzz_one(). */
u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
u8 common_fuzz_stuff(afl_state_t *afl, u8* out_buf, u32 len) {
u8 fault;
if (post_handler) {
if (afl->post_handler) {
out_buf = post_handler(out_buf, &len);
out_buf = afl->post_handler(out_buf, &len);
if (!out_buf || !len) return 0;
}
write_to_testcase(out_buf, len);
write_to_testcase(afl, out_buf, len);
fault = run_target(argv, exec_tmout);
fault = run_target(afl, afl->fsrv.exec_tmout);
if (stop_soon) return 1;
if (afl->stop_soon) return 1;
if (fault == FAULT_TMOUT) {
if (subseq_tmouts++ > TMOUT_LIMIT) {
if (afl->subseq_tmouts++ > TMOUT_LIMIT) {
++cur_skipped_paths;
++afl->cur_skipped_paths;
return 1;
}
} else
subseq_tmouts = 0;
afl->subseq_tmouts = 0;
/* Users can hit us with SIGUSR1 to request the current input
to be abandoned. */
if (skip_requested) {
if (afl->skip_requested) {
skip_requested = 0;
++cur_skipped_paths;
afl->skip_requested = 0;
++afl->cur_skipped_paths;
return 1;
}
/* This handles FAULT_ERROR for us: */
queued_discovered += save_if_interesting(argv, out_buf, len, fault);
afl->queued_discovered += save_if_interesting(afl, out_buf, len, fault);
if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max)
show_stats();
if (!(afl->stage_cur % afl->stats_update_freq) || afl->stage_cur + 1 == afl->stage_max)
show_stats(afl);
return 0;

View File

@ -27,12 +27,12 @@
/* Update stats file for unattended monitoring. */
void write_stats_file(double bitmap_cvg, double stability, double eps) {
void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability, double eps) {
static double last_bcvg, last_stab, last_eps;
static struct rusage rus;
u8* fn = alloc_printf("%s/fuzzer_stats", out_dir);
u8* fn = alloc_printf("%s/fuzzer_stats", afl->out_dir);
s32 fd;
FILE* f;
@ -99,28 +99,28 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) {
"\n"
"target_mode : %s%s%s%s%s%s%s%s\n"
"command_line : %s\n",
start_time / 1000, get_cur_time() / 1000, getpid(),
queue_cycle ? (queue_cycle - 1) : 0, total_execs,
/*eps,*/ total_execs / ((double)(get_cur_time() - start_time) / 1000),
queued_paths, queued_favored, queued_discovered, queued_imported,
max_depth, current_entry, pending_favored, pending_not_fuzzed,
queued_variable, stability, bitmap_cvg, unique_crashes, unique_hangs,
last_path_time / 1000, last_crash_time / 1000, last_hang_time / 1000,
total_execs - last_crash_execs, exec_tmout, slowest_exec_ms,
afl->start_time / 1000, get_cur_time() / 1000, getpid(),
afl->queue_cycle ? (afl->queue_cycle - 1) : 0, afl->total_execs,
/*eps,*/ afl->total_execs / ((double)(get_cur_time() - afl->start_time) / 1000),
afl->queued_paths, afl->queued_favored, afl->queued_discovered, afl->queued_imported,
afl->max_depth, afl->current_entry, afl->pending_favored, afl->pending_not_fuzzed,
afl->queued_variable, stability, bitmap_cvg, afl->unique_crashes, afl->unique_hangs,
afl->last_path_time / 1000, afl->last_crash_time / 1000, afl->last_hang_time / 1000,
afl->total_execs - afl->last_crash_execs, afl->fsrv.exec_tmout, afl->slowest_exec_ms,
#ifdef __APPLE__
(unsigned long int)(rus.ru_maxrss >> 20),
#else
(unsigned long int)(rus.ru_maxrss >> 10),
#endif
use_banner, unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "",
dumb_mode ? " dumb " : "", no_forkserver ? "no_forksrv " : "",
crash_mode ? "crash " : "", persistent_mode ? "persistent " : "",
deferred_mode ? "deferred " : "",
(unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode ||
persistent_mode || deferred_mode)
afl->use_banner, afl->unicorn_mode ? "unicorn" : "", afl->qemu_mode ? "qemu " : "",
afl->dumb_mode ? " dumb " : "", afl->no_forkserver ? "no_fsrv " : "",
afl->crash_mode ? "crash " : "", afl->persistent_mode ? "persistent " : "",
afl->deferred_mode ? "deferred " : "",
(afl->unicorn_mode || afl->qemu_mode || afl->dumb_mode || afl->no_forkserver || afl->crash_mode ||
afl->persistent_mode || afl->deferred_mode)
? ""
: "default",
orig_cmdline);
afl->orig_cmdline);
/* ignore errors */
fclose(f);
@ -129,61 +129,61 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) {
/* Update the plot file if there is a reason to. */
void maybe_update_plot_file(double bitmap_cvg, double eps) {
void maybe_update_plot_file(afl_state_t *afl, double bitmap_cvg, double eps) {
static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md;
static u64 prev_qc, prev_uc, prev_uh;
if (prev_qp == queued_paths && prev_pf == pending_favored &&
prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
prev_qc == queue_cycle && prev_uc == unique_crashes &&
prev_uh == unique_hangs && prev_md == max_depth)
if (prev_qp == afl->queued_paths && prev_pf == afl->pending_favored &&
prev_pnf == afl->pending_not_fuzzed && prev_ce == afl->current_entry &&
prev_qc == afl->queue_cycle && prev_uc == afl->unique_crashes &&
prev_uh == afl->unique_hangs && prev_md == afl->max_depth)
return;
prev_qp = queued_paths;
prev_pf = pending_favored;
prev_pnf = pending_not_fuzzed;
prev_ce = current_entry;
prev_qc = queue_cycle;
prev_uc = unique_crashes;
prev_uh = unique_hangs;
prev_md = max_depth;
prev_qp = afl->queued_paths;
prev_pf = afl->pending_favored;
prev_pnf = afl->pending_not_fuzzed;
prev_ce = afl->current_entry;
prev_qc = afl->queue_cycle;
prev_uc = afl->unique_crashes;
prev_uh = afl->unique_hangs;
prev_md = afl->max_depth;
/* Fields in the file:
unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed,
favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
unix_time, afl->cycles_done, cur_path, paths_total, paths_not_fuzzed,
favored_not_fuzzed, afl->unique_crashes, afl->unique_hangs, afl->max_depth,
execs_per_sec */
fprintf(plot_file,
fprintf(afl->fsrv.plot_file,
"%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n",
get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths,
pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes,
unique_hangs, max_depth, eps); /* ignore errors */
get_cur_time() / 1000, afl->queue_cycle - 1, afl->current_entry, afl->queued_paths,
afl->pending_not_fuzzed, afl->pending_favored, bitmap_cvg, afl->unique_crashes,
afl->unique_hangs, afl->max_depth, eps); /* ignore errors */
fflush(plot_file);
fflush(afl->fsrv.plot_file);
}
/* Check terminal dimensions after resize. */
static void check_term_size(void) {
static void check_term_size(afl_state_t *afl) {
struct winsize ws;
term_too_small = 0;
afl->term_too_small = 0;
if (ioctl(1, TIOCGWINSZ, &ws)) return;
if (ws.ws_row == 0 || ws.ws_col == 0) return;
if (ws.ws_row < 24 || ws.ws_col < 79) term_too_small = 1;
if (ws.ws_row < 24 || ws.ws_col < 79) afl->term_too_small = 1;
}
/* A spiffy retro stats screen! This is called every stats_update_freq
/* A spiffy retro stats screen! This is called every afl->stats_update_freq
execve() calls, plus in several other circumstances. */
void show_stats(void) {
void show_stats(afl_state_t *afl) {
static u64 last_stats_ms, last_plot_ms, last_ms, last_execs;
static double avg_exec;
@ -203,18 +203,18 @@ void show_stats(void) {
/* Check if we're past the 10 minute mark. */
if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1;
if (cur_ms - afl->start_time > 10 * 60 * 1000) afl->run_over10m = 1;
/* Calculate smoothed exec speed stats. */
if (!last_execs) {
avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time);
avg_exec = ((double)afl->total_execs) * 1000 / (cur_ms - afl->start_time);
} else {
double cur_avg =
((double)(total_execs - last_execs)) * 1000 / (cur_ms - last_ms);
((double)(afl->total_execs - last_execs)) * 1000 / (cur_ms - last_ms);
/* If there is a dramatic (5x+) jump in speed, reset the indicator
more quickly. */
@ -227,20 +227,20 @@ void show_stats(void) {
}
last_ms = cur_ms;
last_execs = total_execs;
last_execs = afl->total_execs;
/* Tell the callers when to contact us (as measured in execs). */
stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
if (!stats_update_freq) stats_update_freq = 1;
afl->stats_update_freq = avg_exec / (UI_TARGET_HZ * 10);
if (!afl->stats_update_freq) afl->stats_update_freq = 1;
/* Do some bitmap stats. */
t_bytes = count_non_255_bytes(virgin_bits);
t_bytes = count_non_255_bytes(afl->virgin_bits);
t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
if (t_bytes)
stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes;
stab_ratio = 100 - ((double)afl->var_byte_count) * 100 / t_bytes;
else
stab_ratio = 100;
@ -249,9 +249,9 @@ void show_stats(void) {
if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
last_stats_ms = cur_ms;
write_stats_file(t_byte_ratio, stab_ratio, avg_exec);
save_auto();
write_bitmap();
write_stats_file(afl, t_byte_ratio, stab_ratio, avg_exec);
save_auto(afl);
write_bitmap(afl);
}
@ -260,40 +260,40 @@ void show_stats(void) {
if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) {
last_plot_ms = cur_ms;
maybe_update_plot_file(t_byte_ratio, avg_exec);
maybe_update_plot_file(afl, t_byte_ratio, avg_exec);
}
/* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed &&
if (!afl->dumb_mode && afl->cycles_wo_finds > 100 && !afl->pending_not_fuzzed &&
get_afl_env("AFL_EXIT_WHEN_DONE"))
stop_soon = 2;
afl->stop_soon = 2;
if (total_crashes && get_afl_env("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2;
if (afl->total_crashes && get_afl_env("AFL_BENCH_UNTIL_CRASH")) afl->stop_soon = 2;
/* If we're not on TTY, bail out. */
if (not_on_tty) return;
if (afl->not_on_tty) return;
/* Compute some mildly useful bitmap stats. */
t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits);
t_bits = (MAP_SIZE << 3) - count_bits(afl->virgin_bits);
/* Now, for the visuals... */
if (clear_screen) {
if (afl->clear_screen) {
SAYF(TERM_CLEAR CURSOR_HIDE);
clear_screen = 0;
afl->clear_screen = 0;
check_term_size();
check_term_size(afl);
}
SAYF(TERM_HOME);
if (term_too_small) {
if (afl->term_too_small) {
SAYF(cBRI
"Your terminal is too small to display the UI.\n"
@ -305,20 +305,20 @@ void show_stats(void) {
/* Let's start by drawing a centered banner. */
banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) +
strlen(power_name) + 3 + 5;
banner_len = (afl->crash_mode ? 24 : 22) + strlen(VERSION) + strlen(afl->use_banner) +
strlen(afl->power_name) + 3 + 5;
banner_pad = (79 - banner_len) / 2;
memset(tmp, ' ', banner_pad);
#ifdef HAVE_AFFINITY
sprintf(tmp + banner_pad,
"%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]" cBLU " {%d}",
crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
use_banner, power_name, cpu_aff);
afl->crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
afl->use_banner, afl->power_name, afl->cpu_aff);
#else
sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]",
crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
use_banner, power_name);
afl->crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
afl->use_banner, afl->power_name);
#endif /* HAVE_AFFINITY */
SAYF("\n%s\n", tmp);
@ -341,26 +341,26 @@ void show_stats(void) {
" process timing " bSTG bH30 bH5 bH bHB bH bSTOP cCYA
" overall results " bSTG bH2 bH2 bRT "\n");
if (dumb_mode) {
if (afl->dumb_mode) {
strcpy(tmp, cRST);
} else {
u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60;
u64 min_wo_finds = (cur_ms - afl->last_path_time) / 1000 / 60;
/* First queue cycle: don't stop now! */
if (queue_cycle == 1 || min_wo_finds < 15)
if (afl->queue_cycle == 1 || min_wo_finds < 15)
strcpy(tmp, cMGN);
else
/* Subsequent cycles, but we're still making finds. */
if (cycles_wo_finds < 25 || min_wo_finds < 30)
if (afl->cycles_wo_finds < 25 || min_wo_finds < 30)
strcpy(tmp, cYEL);
else
/* No finds for a long time and no test cases to try. */
if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
if (afl->cycles_wo_finds > 100 && !afl->pending_not_fuzzed && min_wo_finds > 120)
strcpy(tmp, cLGN);
/* Default: cautiously OK to stop? */
@ -371,20 +371,20 @@ void show_stats(void) {
SAYF(bV bSTOP " run time : " cRST "%-33s " bSTG bV bSTOP
" cycles done : %s%-5s " bSTG bV "\n",
DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1));
DTD(cur_ms, afl->start_time), tmp, DI(afl->queue_cycle - 1));
/* We want to warn people about not seeing new paths after a full cycle,
except when resuming fuzzing or running in non-instrumented mode. */
if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 ||
in_bitmap || crash_mode)) {
if (!afl->dumb_mode && (afl->last_path_time || afl->resuming_fuzz || afl->queue_cycle == 1 ||
afl->in_bitmap || afl->crash_mode)) {
SAYF(bV bSTOP " last new path : " cRST "%-33s ",
DTD(cur_ms, last_path_time));
DTD(cur_ms, afl->last_path_time));
} else {
if (dumb_mode)
if (afl->dumb_mode)
SAYF(bV bSTOP " last new path : " cPIN "n/a" cRST
" (non-instrumented mode) ");
@ -397,24 +397,24 @@ void show_stats(void) {
}
SAYF(bSTG bV bSTOP " total paths : " cRST "%-5s " bSTG bV "\n",
DI(queued_paths));
DI(afl->queued_paths));
/* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH
limit with a '+' appended to the count. */
sprintf(tmp, "%s%s", DI(unique_crashes),
(unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
sprintf(tmp, "%s%s", DI(afl->unique_crashes),
(afl->unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP
" uniq crashes : %s%-6s" bSTG bV "\n",
DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, tmp);
DTD(cur_ms, afl->last_crash_time), afl->unique_crashes ? cLRD : cRST, tmp);
sprintf(tmp, "%s%s", DI(unique_hangs),
(unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
sprintf(tmp, "%s%s", DI(afl->unique_hangs),
(afl->unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
SAYF(bV bSTOP " last uniq hang : " cRST "%-33s " bSTG bV bSTOP
" uniq hangs : " cRST "%-6s" bSTG bV "\n",
DTD(cur_ms, last_hang_time), tmp);
DTD(cur_ms, afl->last_hang_time), tmp);
SAYF(bVR bH bSTOP cCYA
" cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
@ -424,21 +424,21 @@ void show_stats(void) {
together, but then cram them into a fixed-width field - so we need to
put them in a temporary buffer first. */
sprintf(tmp, "%s%s%u (%0.01f%%)", DI(current_entry),
queue_cur->favored ? "." : "*", queue_cur->fuzz_level,
((double)current_entry * 100) / queued_paths);
sprintf(tmp, "%s%s%u (%0.01f%%)", DI(afl->current_entry),
afl->queue_cur->favored ? "." : "*", afl->queue_cur->fuzz_level,
((double)afl->current_entry * 100) / afl->queued_paths);
SAYF(bV bSTOP " now processing : " cRST "%-16s " bSTG bV bSTOP, tmp);
sprintf(tmp, "%0.02f%% / %0.02f%%",
((double)queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
((double)afl->queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
SAYF(" map density : %s%-21s" bSTG bV "\n",
t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST),
t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !afl->dumb_mode) ? cPIN : cRST),
tmp);
sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
((double)cur_skipped_paths * 100) / queued_paths);
sprintf(tmp, "%s (%0.02f%%)", DI(afl->cur_skipped_paths),
((double)afl->cur_skipped_paths * 100) / afl->queued_paths);
SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp);
@ -450,47 +450,47 @@ void show_stats(void) {
" stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
" findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n");
sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
((double)queued_favored) * 100 / queued_paths);
sprintf(tmp, "%s (%0.02f%%)", DI(afl->queued_favored),
((double)afl->queued_favored) * 100 / afl->queued_paths);
/* Yeah... it's still going on... halp? */
SAYF(bV bSTOP " now trying : " cRST "%-20s " bSTG bV bSTOP
" favored paths : " cRST "%-22s" bSTG bV "\n",
stage_name, tmp);
afl->stage_name, tmp);
if (!stage_max) {
if (!afl->stage_max) {
sprintf(tmp, "%s/-", DI(stage_cur));
sprintf(tmp, "%s/-", DI(afl->stage_cur));
} else {
sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max),
((double)stage_cur) * 100 / stage_max);
sprintf(tmp, "%s/%s (%0.02f%%)", DI(afl->stage_cur), DI(afl->stage_max),
((double)afl->stage_cur) * 100 / afl->stage_max);
}
SAYF(bV bSTOP " stage execs : " cRST "%-20s " bSTG bV bSTOP, tmp);
sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov),
((double)queued_with_cov) * 100 / queued_paths);
sprintf(tmp, "%s (%0.02f%%)", DI(afl->queued_with_cov),
((double)afl->queued_with_cov) * 100 / afl->queued_paths);
SAYF(" new edges on : " cRST "%-22s" bSTG bV "\n", tmp);
sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes),
(unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
sprintf(tmp, "%s (%s%s unique)", DI(afl->total_crashes), DI(afl->unique_crashes),
(afl->unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : "");
if (crash_mode) {
if (afl->crash_mode) {
SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
" new crashes : %s%-22s" bSTG bV "\n",
DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
DI(afl->total_execs), afl->unique_crashes ? cLRD : cRST, tmp);
} else {
SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
" total crashes : %s%-22s" bSTG bV "\n",
DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
DI(afl->total_execs), afl->unique_crashes ? cLRD : cRST, tmp);
}
@ -510,8 +510,8 @@ void show_stats(void) {
}
sprintf(tmp, "%s (%s%s unique)", DI(total_tmouts), DI(unique_tmouts),
(unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
sprintf(tmp, "%s (%s%s unique)", DI(afl->total_tmouts), DI(afl->unique_tmouts),
(afl->unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
SAYF(bSTG bV bSTOP " total tmouts : " cRST "%-22s" bSTG bV "\n", tmp);
@ -521,68 +521,68 @@ void show_stats(void) {
" fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA
" path geometry " bSTG bH5 bH2 bVL "\n");
if (skip_deterministic) {
if (afl->skip_deterministic) {
strcpy(tmp, "n/a, n/a, n/a");
} else {
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP1]),
DI(stage_cycles[STAGE_FLIP1]), DI(stage_finds[STAGE_FLIP2]),
DI(stage_cycles[STAGE_FLIP2]), DI(stage_finds[STAGE_FLIP4]),
DI(stage_cycles[STAGE_FLIP4]));
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_FLIP1]),
DI(afl->stage_cycles[STAGE_FLIP1]), DI(afl->stage_finds[STAGE_FLIP2]),
DI(afl->stage_cycles[STAGE_FLIP2]), DI(afl->stage_finds[STAGE_FLIP4]),
DI(afl->stage_cycles[STAGE_FLIP4]));
}
SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP
" levels : " cRST "%-10s" bSTG bV "\n",
tmp, DI(max_depth));
tmp, DI(afl->max_depth));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP8]),
DI(stage_cycles[STAGE_FLIP8]), DI(stage_finds[STAGE_FLIP16]),
DI(stage_cycles[STAGE_FLIP16]), DI(stage_finds[STAGE_FLIP32]),
DI(stage_cycles[STAGE_FLIP32]));
if (!afl->skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_FLIP8]),
DI(afl->stage_cycles[STAGE_FLIP8]), DI(afl->stage_finds[STAGE_FLIP16]),
DI(afl->stage_cycles[STAGE_FLIP16]), DI(afl->stage_finds[STAGE_FLIP32]),
DI(afl->stage_cycles[STAGE_FLIP32]));
SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP
" pending : " cRST "%-10s" bSTG bV "\n",
tmp, DI(pending_not_fuzzed));
tmp, DI(afl->pending_not_fuzzed));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_ARITH8]),
DI(stage_cycles[STAGE_ARITH8]), DI(stage_finds[STAGE_ARITH16]),
DI(stage_cycles[STAGE_ARITH16]), DI(stage_finds[STAGE_ARITH32]),
DI(stage_cycles[STAGE_ARITH32]));
if (!afl->skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_ARITH8]),
DI(afl->stage_cycles[STAGE_ARITH8]), DI(afl->stage_finds[STAGE_ARITH16]),
DI(afl->stage_cycles[STAGE_ARITH16]), DI(afl->stage_finds[STAGE_ARITH32]),
DI(afl->stage_cycles[STAGE_ARITH32]));
SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP
" pend fav : " cRST "%-10s" bSTG bV "\n",
tmp, DI(pending_favored));
tmp, DI(afl->pending_favored));
if (!skip_deterministic)
if (!afl->skip_deterministic)
sprintf(
tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_INTEREST8]),
DI(stage_cycles[STAGE_INTEREST8]), DI(stage_finds[STAGE_INTEREST16]),
DI(stage_cycles[STAGE_INTEREST16]), DI(stage_finds[STAGE_INTEREST32]),
DI(stage_cycles[STAGE_INTEREST32]));
tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_INTEREST8]),
DI(afl->stage_cycles[STAGE_INTEREST8]), DI(afl->stage_finds[STAGE_INTEREST16]),
DI(afl->stage_cycles[STAGE_INTEREST16]), DI(afl->stage_finds[STAGE_INTEREST32]),
DI(afl->stage_cycles[STAGE_INTEREST32]));
SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP
" own finds : " cRST "%-10s" bSTG bV "\n",
tmp, DI(queued_discovered));
tmp, DI(afl->queued_discovered));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_EXTRAS_UO]),
DI(stage_cycles[STAGE_EXTRAS_UO]), DI(stage_finds[STAGE_EXTRAS_UI]),
DI(stage_cycles[STAGE_EXTRAS_UI]), DI(stage_finds[STAGE_EXTRAS_AO]),
DI(stage_cycles[STAGE_EXTRAS_AO]));
if (!afl->skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_EXTRAS_UO]),
DI(afl->stage_cycles[STAGE_EXTRAS_UO]), DI(afl->stage_finds[STAGE_EXTRAS_UI]),
DI(afl->stage_cycles[STAGE_EXTRAS_UI]), DI(afl->stage_finds[STAGE_EXTRAS_AO]),
DI(afl->stage_cycles[STAGE_EXTRAS_AO]));
SAYF(bV bSTOP " dictionary : " cRST "%-36s " bSTG bV bSTOP
" imported : " cRST "%-10s" bSTG bV "\n",
tmp, sync_id ? DI(queued_imported) : (u8*)"n/a");
tmp, afl->sync_id ? DI(afl->queued_imported) : (u8*)"n/a");
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_HAVOC]),
DI(stage_cycles[STAGE_HAVOC]), DI(stage_finds[STAGE_SPLICE]),
DI(stage_cycles[STAGE_SPLICE]), DI(stage_finds[STAGE_RADAMSA]),
DI(stage_cycles[STAGE_RADAMSA]));
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_HAVOC]),
DI(afl->stage_cycles[STAGE_HAVOC]), DI(afl->stage_finds[STAGE_SPLICE]),
DI(afl->stage_cycles[STAGE_SPLICE]), DI(afl->stage_finds[STAGE_RADAMSA]),
DI(afl->stage_cycles[STAGE_RADAMSA]));
SAYF(bV bSTOP " havoc/rad : " cRST "%-36s " bSTG bV bSTOP, tmp);
@ -592,51 +592,51 @@ void show_stats(void) {
strcpy(tmp, "n/a");
SAYF(" stability : %s%-10s" bSTG bV "\n",
(stab_ratio < 85 && var_byte_count > 40)
(stab_ratio < 85 && afl->var_byte_count > 40)
? cLRD
: ((queued_variable && (!persistent_mode || var_byte_count > 20))
: ((afl->queued_variable && (!afl->persistent_mode || afl->var_byte_count > 20))
? cMGN
: cRST),
tmp);
if (cmplog_mode) {
if (afl->shm.cmplog_mode) {
sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]),
DI(stage_cycles[STAGE_PYTHON]),
DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
DI(stage_cycles[STAGE_CUSTOM_MUTATOR]),
DI(stage_finds[STAGE_COLORIZATION]),
DI(stage_cycles[STAGE_COLORIZATION]), DI(stage_finds[STAGE_ITS]),
DI(stage_cycles[STAGE_ITS]));
sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(afl->stage_finds[STAGE_PYTHON]),
DI(afl->stage_cycles[STAGE_PYTHON]),
DI(afl->stage_finds[STAGE_CUSTOM_MUTATOR]),
DI(afl->stage_cycles[STAGE_CUSTOM_MUTATOR]),
DI(afl->stage_finds[STAGE_COLORIZATION]),
DI(afl->stage_cycles[STAGE_COLORIZATION]), DI(afl->stage_finds[STAGE_ITS]),
DI(afl->stage_cycles[STAGE_ITS]));
SAYF(bV bSTOP " custom/rq : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n",
tmp);
} else {
sprintf(tmp, "%s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]),
DI(stage_cycles[STAGE_PYTHON]),
DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
sprintf(tmp, "%s/%s, %s/%s", DI(afl->stage_finds[STAGE_PYTHON]),
DI(afl->stage_cycles[STAGE_PYTHON]),
DI(afl->stage_finds[STAGE_CUSTOM_MUTATOR]),
DI(afl->stage_cycles[STAGE_CUSTOM_MUTATOR]));
SAYF(bV bSTOP " py/custom : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n",
tmp);
}
if (!bytes_trim_out) {
if (!afl->bytes_trim_out) {
sprintf(tmp, "n/a, ");
} else {
sprintf(tmp, "%0.02f%%/%s, ",
((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in,
DI(trim_execs));
((double)(afl->bytes_trim_in - afl->bytes_trim_out)) * 100 / afl->bytes_trim_in,
DI(afl->trim_execs));
}
if (!blocks_eff_total) {
if (!afl->blocks_eff_total) {
u8 tmp2[128];
@ -648,17 +648,17 @@ void show_stats(void) {
u8 tmp2[128];
sprintf(tmp2, "%0.02f%%",
((double)(blocks_eff_total - blocks_eff_select)) * 100 /
blocks_eff_total);
((double)(afl->blocks_eff_total - afl->blocks_eff_select)) * 100 /
afl->blocks_eff_total);
strcat(tmp, tmp2);
}
if (mutator) {
if (afl->mutator) {
sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
sprintf(tmp, "%s/%s", DI(afl->stage_finds[STAGE_CUSTOM_MUTATOR]),
DI(afl->stage_cycles[STAGE_CUSTOM_MUTATOR]));
SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bV RESET_G1, tmp);
} else {
@ -669,27 +669,27 @@ void show_stats(void) {
/* Provide some CPU utilization stats. */
if (cpu_core_count) {
if (afl->cpu_core_count) {
double cur_runnable = get_runnable_processes();
u32 cur_utilization = cur_runnable * 100 / cpu_core_count;
u32 cur_utilization = cur_runnable * 100 / afl->cpu_core_count;
u8* cpu_color = cCYA;
/* If we could still run one or more processes, use green. */
if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count)
if (afl->cpu_core_count > 1 && cur_runnable + 1 <= afl->cpu_core_count)
cpu_color = cLGN;
/* If we're clearly oversubscribed, use red. */
if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
if (!afl->no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD;
#ifdef HAVE_AFFINITY
if (cpu_aff >= 0) {
if (afl->cpu_aff >= 0) {
SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, MIN(cpu_aff, 999),
SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, MIN(afl->cpu_aff, 999),
cpu_color, MIN(cur_utilization, 999));
} else {
@ -723,15 +723,15 @@ void show_stats(void) {
plus a bunch of warnings. Some calibration stuff also ended up here,
along with several hardcoded constants. Maybe clean up eventually. */
void show_init_stats(void) {
void show_init_stats(afl_state_t *afl) {
struct queue_entry* q = queue;
struct queue_entry* q = afl->queue;
u32 min_bits = 0, max_bits = 0;
u64 min_us = 0, max_us = 0;
u64 avg_us = 0;
u32 max_len = 0;
if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles;
if (afl->total_cal_cycles) avg_us = afl->total_cal_us / afl->total_cal_cycles;
while (q) {
@ -749,20 +749,20 @@ void show_init_stats(void) {
SAYF("\n");
if (avg_us > ((qemu_mode || unicorn_mode) ? 50000 : 10000))
if (avg_us > ((afl->qemu_mode || afl->unicorn_mode) ? 50000 : 10000))
WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.md.",
doc_path);
/* Let's keep things moving with slow binaries. */
if (avg_us > 50000)
havoc_div = 10; /* 0-19 execs/sec */
afl->havoc_div = 10; /* 0-19 execs/sec */
else if (avg_us > 20000)
havoc_div = 5; /* 20-49 execs/sec */
afl->havoc_div = 5; /* 20-49 execs/sec */
else if (avg_us > 10000)
havoc_div = 2; /* 50-100 execs/sec */
afl->havoc_div = 2; /* 50-100 execs/sec */
if (!resuming_fuzz) {
if (!afl->resuming_fuzz) {
if (max_len > 50 * 1024)
WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.md!",
@ -771,14 +771,14 @@ void show_init_stats(void) {
WARNF("Some test cases are big (%s) - see %s/perf_tips.md.", DMS(max_len),
doc_path);
if (useless_at_start && !in_bitmap)
if (afl->useless_at_start && !afl->in_bitmap)
WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
if (queued_paths > 100)
if (afl->queued_paths > 100)
WARNF(cLRD
"You probably have far too many input files! Consider trimming "
"down.");
else if (queued_paths > 20)
else if (afl->queued_paths > 20)
WARNF("You have lots of input files; try starting small.");
}
@ -789,12 +789,12 @@ void show_init_stats(void) {
"%u favored, %u variable, %u total\n" cGRA " Bitmap range : " cRST
"%u to %u bits (average: %0.02f bits)\n" cGRA
" Exec timing : " cRST "%s to %s us (average: %s us)\n",
queued_favored, queued_variable, queued_paths, min_bits, max_bits,
((double)total_bitmap_size) /
(total_bitmap_entries ? total_bitmap_entries : 1),
afl->queued_favored, afl->queued_variable, afl->queued_paths, min_bits, max_bits,
((double)afl->total_bitmap_size) /
(afl->total_bitmap_entries ? afl->total_bitmap_entries : 1),
DI(min_us), DI(max_us), DI(avg_us));
if (!timeout_given) {
if (!afl->timeout_given) {
/* Figure out the appropriate timeout. The basic idea is: 5x average or
1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
@ -804,33 +804,33 @@ void show_init_stats(void) {
our patience is wearing thin =) */
if (avg_us > 50000)
exec_tmout = avg_us * 2 / 1000;
afl->fsrv.exec_tmout = avg_us * 2 / 1000;
else if (avg_us > 10000)
exec_tmout = avg_us * 3 / 1000;
afl->fsrv.exec_tmout = avg_us * 3 / 1000;
else
exec_tmout = avg_us * 5 / 1000;
afl->fsrv.exec_tmout = avg_us * 5 / 1000;
exec_tmout = MAX(exec_tmout, max_us / 1000);
exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
afl->fsrv.exec_tmout = MAX(afl->fsrv.exec_tmout, max_us / 1000);
afl->fsrv.exec_tmout = (afl->fsrv.exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;
if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT;
if (afl->fsrv.exec_tmout > EXEC_TIMEOUT) afl->fsrv.exec_tmout = EXEC_TIMEOUT;
ACTF("No -t option specified, so I'll use exec timeout of %u ms.",
exec_tmout);
afl->fsrv.exec_tmout);
timeout_given = 1;
afl->timeout_given = 1;
} else if (timeout_given == 3) {
} else if (afl->timeout_given == 3) {
ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout);
ACTF("Applying timeout settings from resumed session (%u ms).", afl->fsrv.exec_tmout);
}
/* In dumb mode, re-running every timing out test case with a generous time
limit is very expensive, so let's select a more conservative default. */
if (dumb_mode && !get_afl_env("AFL_HANG_TMOUT"))
hang_tmout = MIN(EXEC_TIMEOUT, exec_tmout * 2 + 100);
if (afl->dumb_mode && !get_afl_env("AFL_HANG_TMOUT"))
afl->hang_tmout = MIN(EXEC_TIMEOUT, afl->fsrv.exec_tmout * 2 + 100);
OKF("All set and ready to roll!");

File diff suppressed because it is too large Load Diff

View File

@ -36,9 +36,11 @@
#include "hash.h"
#include "sharedmem.h"
#include "cmplog.h"
#include "list.h"
#include <stdio.h>
#include <unistd.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
@ -59,114 +61,116 @@
#include <sys/shm.h>
#endif
extern unsigned char *trace_bits;
list_t shm_list = {0};
/* Get rid of shared memory. */
void afl_shm_deinit(sharedmem_t *shm) {
list_remove(&shm_list, shm);
#ifdef USEMMAP
/* ================ Proteas ================ */
int g_shm_fd = -1;
unsigned char *g_shm_base = NULL;
char g_shm_file_path[L_tmpnam];
/* ========================================= */
#else
static s32 shm_id; /* ID of the SHM region */
static s32 cmplog_shm_id;
#endif
if (shm->map != NULL) {
int cmplog_mode;
struct cmp_map *cmp_map;
/* Get rid of shared memory (atexit handler). */
void remove_shm(void) {
#ifdef USEMMAP
if (g_shm_base != NULL) {
munmap(g_shm_base, MAP_SIZE);
g_shm_base = NULL;
munmap(shm->map, shm->size_alloc);
shm->map = NULL;
}
if (g_shm_fd != -1) {
if (shm->g_shm_fd != -1) {
close(g_shm_fd);
g_shm_fd = -1;
close(shm->g_shm_fd);
shm->g_shm_fd = -1;
}
#else
shmctl(shm_id, IPC_RMID, NULL);
if (cmplog_mode) shmctl(cmplog_shm_id, IPC_RMID, NULL);
shmctl(shm->shm_id, IPC_RMID, NULL);
if (shm->cmplog_mode) shmctl(shm->cmplog_shm_id, IPC_RMID, NULL);
#endif
shm->map = NULL;
}
/* Configure shared memory. */
/* At exit, remove all leftover maps */
void setup_shm(unsigned char dumb_mode) {
void afl_shm_atexit() {
LIST_FOREACH(&shm_list, sharedmem_t, { afl_shm_deinit(el); });
}
/* Configure shared memory.
Returns a pointer to shm->map for ease of use.
*/
u8 *afl_shm_init(sharedmem_t *shm, size_t map_size, unsigned char dumb_mode) {
shm->size_alloc = shm->size_used = map_size;
shm->map = NULL;
#ifdef USEMMAP
/* generate random file name for multi instance */
/* thanks to f*cking glibc we can not use tmpnam securely, it generates a
* security warning that cannot be suppressed */
/* so we do this worse workaround */
snprintf(g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random());
shm->g_shm_fd = -1;
/* ======
generate random file name for multi instance
thanks to f*cking glibc we can not use tmpnam securely, it generates a
security warning that cannot be suppressed
so we do this worse workaround */
snprintf(shm->g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random());
/* create the shared memory segment as if it was a file */
g_shm_fd = shm_open(g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600);
if (g_shm_fd == -1) { PFATAL("shm_open() failed"); }
shm->g_shm_fd = shm_open(shm->g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600);
if (shm->g_shm_fd == -1) { PFATAL("shm_open() failed"); }
/* configure the size of the shared memory segment */
if (ftruncate(g_shm_fd, MAP_SIZE)) {
if (ftruncate(shm->g_shm_fd, map_size)) {
PFATAL("setup_shm(): ftruncate() failed");
}
/* map the shared memory segment to the address space of the process */
g_shm_base =
mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0);
if (g_shm_base == MAP_FAILED) {
shm->map =
mmap(0, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_size->g_shm_fd, 0);
if (map_size->map == MAP_FAILED) {
close(g_shm_fd);
g_shm_fd = -1;
close(map_size->g_shm_fd);
map_size->g_shm_fd = -1;
PFATAL("mmap() failed");
}
atexit(remove_shm);
/* If somebody is asking us to fuzz instrumented binaries in dumb mode,
we don't want them to detect instrumentation, since we won't be sending
fork server commands. This should be replaced with better auto-detection
later on, perhaps? */
if (!dumb_mode) setenv(SHM_ENV_VAR, g_shm_file_path, 1);
if (!dumb_mode) setenv(SHM_ENV_VAR, shm->g_shm_file_path, 1);
trace_bits = g_shm_base;
if (trace_bits == -1 || !trace_bits) PFATAL("mmap() failed");
if (shm->map == -1 || !shm->map) PFATAL("mmap() failed");
#else
u8 *shm_str;
shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600);
shm->shm_id = shmget(IPC_PRIVATE, map_size, IPC_CREAT | IPC_EXCL | 0600);
if (shm_id < 0) PFATAL("shmget() failed");
if (shm->shm_id < 0) PFATAL("shmget() failed");
if (cmplog_mode) {
if (shm->cmplog_mode) {
cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map),
shm->cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map),
IPC_CREAT | IPC_EXCL | 0600);
if (cmplog_shm_id < 0) PFATAL("shmget() failed");
if (shm->cmplog_shm_id < 0) PFATAL("shmget() failed");
}
atexit(remove_shm);
shm_str = alloc_printf("%d", shm_id);
shm_str = alloc_printf("%d", shm->shm_id);
/* If somebody is asking us to fuzz instrumented binaries in dumb mode,
we don't want them to detect instrumentation, since we won't be sending
@ -177,9 +181,9 @@ void setup_shm(unsigned char dumb_mode) {
ck_free(shm_str);
if (cmplog_mode) {
if (shm->cmplog_mode) {
shm_str = alloc_printf("%d", cmplog_shm_id);
shm_str = alloc_printf("%d", shm->cmplog_shm_id);
if (!dumb_mode) setenv(CMPLOG_SHM_ENV_VAR, shm_str, 1);
@ -187,19 +191,24 @@ void setup_shm(unsigned char dumb_mode) {
}
trace_bits = shmat(shm_id, NULL, 0);
shm->map = shmat(shm->shm_id, NULL, 0);
if (trace_bits == (void *)-1 || !trace_bits) PFATAL("shmat() failed");
if (shm->map == (void *)-1 || !shm->map) PFATAL("shmat() failed");
if (cmplog_mode) {
if (shm->cmplog_mode) {
cmp_map = shmat(cmplog_shm_id, NULL, 0);
shm->cmp_map = shmat(shm->cmplog_shm_id, NULL, 0);
if (cmp_map == (void *)-1 || !cmp_map) PFATAL("shmat() failed");
if (shm->cmp_map == (void *)-1 || !shm->cmp_map) PFATAL("shmat() failed");
}
#endif
}
list_append(&shm_list, shm);
atexit(afl_shm_atexit);
return shm->map;
}

View File

@ -59,50 +59,28 @@
#include <sys/types.h>
#include <sys/resource.h>
u8* trace_bits; /* SHM with instrumentation bitmap */
u8 be_quiet;
s32 forksrv_pid, /* PID of the fork server */
child_pid; /* PID of the tested program */
s32 fsrv_ctl_fd, /* Fork server control pipe (write) */
fsrv_st_fd; /* Fork server status pipe (read) */
s32 out_fd; /* Persistent fd for stdin_file */
s32 dev_null_fd = -1; /* FD to /dev/null */
s32 out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1;
FILE* plot_file;
u8 uses_asan, be_quiet;
u8* trace_bits; /* SHM with instrumentation bitmap */
u8 *out_file, /* Trace output file */
*stdin_file, /* stdin file */
u8 *stdin_file, /* stdin file */
*in_dir, /* input folder */
*doc_path, /* Path to docs */
*at_file = NULL; /* Substitution string for @@ */
*at_file = NULL; /* Substitution string for @@ */
static u8* in_data; /* Input data */
u32 exec_tmout; /* Exec timeout (ms) */
static u32 total, highest; /* tuple content information */
static u32 in_len, /* Input data length */
arg_offset, total_execs; /* Total number of execs */
u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */
u8 quiet_mode, /* Hide non-essential messages? */
edges_only, /* Ignore hit counts? */
raw_instr_output, /* Do not apply AFL filters */
cmin_mode, /* Generate output in afl-cmin mode? */
binary_mode, /* Write output as a binary map */
use_stdin = 1, /* use stdin - unused here */
keep_cores; /* Allow coredumps? */
static volatile u8 stop_soon, /* Ctrl-C pressed? */
child_timed_out, /* Child timed out? */
child_crashed; /* Child crashed? */
static u8 qemu_mode;
@ -168,7 +146,7 @@ static void at_exit_handler(void) {
/* Write results. */
static u32 write_results_to_file(u8* out_file) {
static u32 write_results_to_file(afl_forkserver_t *fsrv) {
s32 fd;
u32 i, ret = 0;
@ -176,30 +154,30 @@ static u32 write_results_to_file(u8* out_file) {
u8 cco = !!getenv("AFL_CMIN_CRASHES_ONLY"),
caa = !!getenv("AFL_CMIN_ALLOW_ANY");
if (!strncmp(out_file, "/dev/", 5)) {
if (!strncmp(fsrv->out_file, "/dev/", 5)) {
fd = open(out_file, O_WRONLY, 0600);
if (fd < 0) PFATAL("Unable to open '%s'", out_file);
fd = open(fsrv->out_file, O_WRONLY, 0600);
if (fd < 0) PFATAL("Unable to open '%s'", fsrv->out_file);
} else if (!strcmp(out_file, "-")) {
} else if (!strcmp(fsrv->out_file, "-")) {
fd = dup(1);
if (fd < 0) PFATAL("Unable to open stdout");
} else {
unlink(out_file); /* Ignore errors */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
unlink(fsrv->out_file); /* Ignore errors */
fd = open(fsrv->out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fsrv->out_file);
}
if (binary_mode) {
for (i = 0; i < MAP_SIZE; i++)
if (trace_bits[i]) ret++;
if (fsrv->trace_bits[i]) ret++;
ck_write(fd, trace_bits, MAP_SIZE, out_file);
ck_write(fd, fsrv->trace_bits, MAP_SIZE, fsrv->out_file);
close(fd);
} else {
@ -210,22 +188,22 @@ static u32 write_results_to_file(u8* out_file) {
for (i = 0; i < MAP_SIZE; i++) {
if (!trace_bits[i]) continue;
if (!fsrv->trace_bits[i]) continue;
ret++;
total += trace_bits[i];
if (highest < trace_bits[i]) highest = trace_bits[i];
total += fsrv->trace_bits[i];
if (highest < fsrv->trace_bits[i]) highest = fsrv->trace_bits[i];
if (cmin_mode) {
if (child_timed_out) break;
if (fsrv->child_timed_out) break;
if (!caa && child_crashed != cco) break;
fprintf(f, "%u%u\n", trace_bits[i], i);
fprintf(f, "%u%u\n", fsrv->trace_bits[i], i);
} else
fprintf(f, "%06u:%u\n", i, trace_bits[i]);
fprintf(f, "%06u:%u\n", i, fsrv->trace_bits[i]);
}
@ -239,9 +217,9 @@ static u32 write_results_to_file(u8* out_file) {
/* Write results. */
static u32 write_results(void) {
static u32 write_results(afl_forkserver_t *fsrv) {
return write_results_to_file(out_file);
return write_results_to_file(fsrv);
}
@ -269,69 +247,69 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
is unlinked and a new one is created. Otherwise, out_fd is rewound and
truncated. */
static void write_to_testcase(void* mem, u32 len) {
static void write_to_testcase(afl_forkserver_t *fsrv, void* mem, u32 len) {
lseek(out_fd, 0, SEEK_SET);
ck_write(out_fd, mem, len, out_file);
if (ftruncate(out_fd, len)) PFATAL("ftruncate() failed");
lseek(out_fd, 0, SEEK_SET);
lseek(fsrv->out_fd, 0, SEEK_SET);
ck_write(fsrv->out_fd, mem, len, fsrv->out_file);
if (ftruncate(fsrv->out_fd, len)) PFATAL("ftruncate() failed");
lseek(fsrv->out_fd, 0, SEEK_SET);
}
/* Execute target application. Returns 0 if the changes are a dud, or
1 if they should be kept. */
static u8 run_target_forkserver(char** argv, u8* mem, u32 len) {
static u8 run_target_forkserver(afl_forkserver_t *fsrv, char** argv, u8* mem, u32 len) {
static struct itimerval it;
static u32 prev_timed_out = 0;
int status = 0;
memset(trace_bits, 0, MAP_SIZE);
memset(fsrv->trace_bits, 0, MAP_SIZE);
MEM_BARRIER();
write_to_testcase(mem, len);
write_to_testcase(fsrv, mem, len);
s32 res;
/* we have the fork server up and running, so simply
tell it to have at it, and then read back PID. */
if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if ((res = write(fsrv->fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
if ((res = read(fsrv->fsrv_st_fd, &fsrv->child_pid, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
if (fsrv->child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
/* Configure timeout, wait for child, cancel timeout. */
if (exec_tmout) {
if (fsrv->exec_tmout) {
it.it_value.tv_sec = (exec_tmout / 1000);
it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
it.it_value.tv_sec = (fsrv->exec_tmout / 1000);
it.it_value.tv_usec = (fsrv->exec_tmout % 1000) * 1000;
}
setitimer(ITIMER_REAL, &it, NULL);
if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
if ((res = read(fsrv->fsrv_st_fd, &status, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to communicate with fork server (OOM?)");
}
child_pid = 0;
fsrv->child_pid = 0;
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
@ -341,24 +319,24 @@ static u8 run_target_forkserver(char** argv, u8* mem, u32 len) {
/* Clean up bitmap, analyze exit condition, etc. */
if (*(u32*)trace_bits == EXEC_FAIL_SIG)
if (*(u32*)fsrv->trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute '%s'", argv[0]);
classify_counts(trace_bits,
classify_counts(fsrv->trace_bits,
binary_mode ? count_class_binary : count_class_human);
total_execs++;
if (stop_soon) {
SAYF(cRST cLRD "\n+++ afl-showmap folder mode aborted by user +++\n" cRST);
close(write_to_file(out_file, in_data, in_len));
close(write_to_file(fsrv->out_file, in_data, in_len));
exit(1);
}
/* Always discard inputs that time out. */
if (child_timed_out) { return 0; }
if (fsrv->child_timed_out) { return 0; }
/* Handle crashing inputs depending on current mode. */
@ -401,7 +379,7 @@ u32 read_file(u8* in_file) {
/* Execute target application. */
static void run_target(char** argv) {
static void run_target(afl_forkserver_t *fsrv, char** argv) {
static struct itimerval it;
int status = 0;
@ -410,11 +388,11 @@ static void run_target(char** argv) {
MEM_BARRIER();
child_pid = fork();
fsrv->child_pid = fork();
if (child_pid < 0) PFATAL("fork() failed");
if (fsrv->child_pid < 0) PFATAL("fork() failed");
if (!child_pid) {
if (!fsrv->child_pid) {
struct rlimit r;
@ -424,7 +402,7 @@ static void run_target(char** argv) {
if (fd < 0 || dup2(fd, 1) < 0 || dup2(fd, 2) < 0) {
*(u32*)trace_bits = EXEC_FAIL_SIG;
*(u32*)fsrv->trace_bits = EXEC_FAIL_SIG;
PFATAL("Descriptor initialization failed");
}
@ -433,9 +411,9 @@ static void run_target(char** argv) {
}
if (mem_limit) {
if (fsrv->mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
r.rlim_max = r.rlim_cur = ((rlim_t)fsrv->mem_limit) << 20;
#ifdef RLIMIT_AS
@ -460,28 +438,28 @@ static void run_target(char** argv) {
setsid();
execv(target_path, argv);
execv(fsrv->target_path, argv);
*(u32*)trace_bits = EXEC_FAIL_SIG;
*(u32*)fsrv->trace_bits = EXEC_FAIL_SIG;
exit(0);
}
/* Configure timeout, wait for child, cancel timeout. */
if (exec_tmout) {
if (fsrv->exec_tmout) {
child_timed_out = 0;
it.it_value.tv_sec = (exec_tmout / 1000);
it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
fsrv->child_timed_out = 0;
it.it_value.tv_sec = (fsrv->exec_tmout / 1000);
it.it_value.tv_usec = (fsrv->exec_tmout % 1000) * 1000;
}
setitimer(ITIMER_REAL, &it, NULL);
if (waitpid(child_pid, &status, 0) <= 0) FATAL("waitpid() failed");
if (waitpid(fsrv->child_pid, &status, 0) <= 0) FATAL("waitpid() failed");
child_pid = 0;
fsrv->child_pid = 0;
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
setitimer(ITIMER_REAL, &it, NULL);
@ -490,19 +468,19 @@ static void run_target(char** argv) {
/* Clean up bitmap, analyze exit condition, etc. */
if (*(u32*)trace_bits == EXEC_FAIL_SIG)
if (*(u32*)fsrv->trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute '%s'", argv[0]);
classify_counts(trace_bits,
classify_counts(fsrv->trace_bits,
binary_mode ? count_class_binary : count_class_human);
if (!quiet_mode) SAYF(cRST "-- Program output ends --\n");
if (!child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1;
if (!fsrv->child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1;
if (!quiet_mode) {
if (child_timed_out)
if (fsrv->child_timed_out)
SAYF(cLRD "\n+++ Program timed off +++\n" cRST);
else if (stop_soon)
SAYF(cLRD "\n+++ Program aborted by user +++\n" cRST);
@ -514,13 +492,14 @@ static void run_target(char** argv) {
}
extern afl_forkserver_t *fsrv_glob;
/* Handle Ctrl-C and the like. */
static void handle_stop_sig(int sig) {
stop_soon = 1;
if (child_pid > 0) kill(child_pid, SIGKILL);
afl_fsrv_killall();
}
@ -667,16 +646,16 @@ static void usage(u8* argv0) {
/* Find binary. */
static void find_binary(u8* fname) {
static void find_binary(afl_forkserver_t *fsrv, u8* fname) {
u8* env_path = 0;
struct stat st;
if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
target_path = ck_strdup(fname);
fsrv->target_path = ck_strdup(fname);
if (stat(target_path, &st) || !S_ISREG(st.st_mode) ||
if (stat(fsrv->target_path, &st) || !S_ISREG(st.st_mode) ||
!(st.st_mode & 0111) || st.st_size < 4)
FATAL("Program '%s' not found or not executable", fname);
@ -699,22 +678,22 @@ static void find_binary(u8* fname) {
env_path = delim;
if (cur_elem[0])
target_path = alloc_printf("%s/%s", cur_elem, fname);
fsrv->target_path = alloc_printf("%s/%s", cur_elem, fname);
else
target_path = ck_strdup(fname);
fsrv->target_path = ck_strdup(fname);
ck_free(cur_elem);
if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
if (!stat(fsrv->target_path, &st) && S_ISREG(st.st_mode) &&
(st.st_mode & 0111) && st.st_size >= 4)
break;
ck_free(target_path);
target_path = 0;
ck_free(fsrv->target_path);
fsrv->target_path = 0;
}
if (!target_path) FATAL("Program '%s' not found or not executable", fname);
if (!fsrv->target_path) FATAL("Program '%s' not found or not executable", fname);
}
@ -724,11 +703,16 @@ static void find_binary(u8* fname) {
int main(int argc, char** argv, char** envp) {
//TODO: u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */
s32 opt, i;
u8 mem_limit_given = 0, timeout_given = 0, unicorn_mode = 0, use_wine = 0;
u32 tcnt = 0;
char** use_argv;
afl_forkserver_t *fsrv = calloc(1, sizeof(afl_forkserver_t));
afl_fsrv_init(fsrv);
doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
if (getenv("AFL_QUIET") != NULL) be_quiet = 1;
@ -744,8 +728,8 @@ int main(int argc, char** argv, char** envp) {
case 'o':
if (out_file) FATAL("Multiple -o options not supported");
out_file = optarg;
if (fsrv->out_file) FATAL("Multiple -o options not supported");
fsrv->out_file = optarg;
break;
case 'm': {
@ -757,29 +741,29 @@ int main(int argc, char** argv, char** envp) {
if (!strcmp(optarg, "none")) {
mem_limit = 0;
fsrv->mem_limit = 0;
break;
}
if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
if (sscanf(optarg, "%llu%c", &fsrv->mem_limit, &suffix) < 1 ||
optarg[0] == '-')
FATAL("Bad syntax used for -m");
switch (suffix) {
case 'T': mem_limit *= 1024 * 1024; break;
case 'G': mem_limit *= 1024; break;
case 'k': mem_limit /= 1024; break;
case 'T': fsrv->mem_limit *= 1024 * 1024; break;
case 'G': fsrv->mem_limit *= 1024; break;
case 'k': fsrv->mem_limit /= 1024; break;
case 'M': break;
default: FATAL("Unsupported suffix or bad syntax for -m");
}
if (mem_limit < 5) FATAL("Dangerously low value of -m");
if (fsrv->mem_limit < 5) FATAL("Dangerously low value of -m");
if (sizeof(rlim_t) == 4 && mem_limit > 2000)
if (sizeof(rlim_t) == 4 && fsrv->mem_limit > 2000)
FATAL("Value of -m out of range on 32-bit systems");
}
@ -788,7 +772,7 @@ int main(int argc, char** argv, char** envp) {
case 'f': // only in here to avoid a compiler warning for use_stdin
use_stdin = 0;
fsrv->use_stdin = 0;
FATAL("Option -f is not supported in afl-showmap");
break;
@ -800,9 +784,9 @@ int main(int argc, char** argv, char** envp) {
if (strcmp(optarg, "none")) {
exec_tmout = atoi(optarg);
fsrv->exec_tmout = atoi(optarg);
if (exec_tmout < 20 || optarg[0] == '-')
if (fsrv->exec_tmout < 20 || optarg[0] == '-')
FATAL("Dangerously low value of -t");
}
@ -839,7 +823,7 @@ int main(int argc, char** argv, char** envp) {
case 'Q':
if (qemu_mode) FATAL("Multiple -Q options not supported");
if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;
if (!mem_limit_given) fsrv->mem_limit = MEM_LIMIT_QEMU;
qemu_mode = 1;
break;
@ -847,7 +831,7 @@ int main(int argc, char** argv, char** envp) {
case 'U':
if (unicorn_mode) FATAL("Multiple -U options not supported");
if (!mem_limit_given) mem_limit = MEM_LIMIT_UNICORN;
if (!mem_limit_given) fsrv->mem_limit = MEM_LIMIT_UNICORN;
unicorn_mode = 1;
break;
@ -858,7 +842,7 @@ int main(int argc, char** argv, char** envp) {
qemu_mode = 1;
use_wine = 1;
if (!mem_limit_given) mem_limit = 0;
if (!mem_limit_given) fsrv->mem_limit = 0;
break;
@ -892,32 +876,33 @@ int main(int argc, char** argv, char** envp) {
}
if (optind == argc || !out_file) usage(argv[0]);
if (optind == argc || !fsrv->out_file) usage(argv[0]);
check_environment_vars(envp);
setup_shm(0);
sharedmem_t shm = {0};
fsrv->trace_bits = afl_shm_init(&shm, MAP_SIZE, 0);
setup_signal_handlers();
set_up_environment();
find_binary(argv[optind]);
find_binary(fsrv, argv[optind]);
if (!quiet_mode) {
show_banner();
ACTF("Executing '%s'...", target_path);
ACTF("Executing '%s'...", fsrv->target_path);
}
if (in_dir) {
if (at_file) PFATAL("Options -A and -i are mutually exclusive");
detect_file_args(argv + optind, "");
detect_file_args(argv + optind, "", fsrv->use_stdin);
} else {
detect_file_args(argv + optind, at_file);
detect_file_args(argv + optind, at_file, fsrv->use_stdin);
}
@ -927,9 +912,9 @@ int main(int argc, char** argv, char** envp) {
if (qemu_mode) {
if (use_wine)
use_argv = get_wine_argv(argv[0], argv + optind, argc - optind);
use_argv = get_wine_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
else
use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
use_argv = get_qemu_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
} else
@ -945,14 +930,14 @@ int main(int argc, char** argv, char** envp) {
struct stat statbuf;
#endif
dev_null_fd = open("/dev/null", O_RDWR);
if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
fsrv->dev_null_fd = open("/dev/null", O_RDWR);
if (fsrv->dev_null_fd < 0) PFATAL("Unable to open /dev/null");
if (!(dir_in = opendir(in_dir))) PFATAL("cannot open directory %s", in_dir);
if (!(dir_out = opendir(out_file)))
if (mkdir(out_file, 0700))
PFATAL("cannot create output directory %s", out_file);
if (!(dir_out = opendir(fsrv->out_file)))
if (mkdir(fsrv->out_file, 0700))
PFATAL("cannot create output directory %s", fsrv->out_file);
u8* use_dir = ".";
@ -966,15 +951,15 @@ int main(int argc, char** argv, char** envp) {
stdin_file = alloc_printf("%s/.afl-showmap-temp-%u", use_dir, getpid());
unlink(stdin_file);
atexit(at_exit_handler);
out_fd = open(stdin_file, O_RDWR | O_CREAT | O_EXCL, 0600);
if (out_fd < 0) PFATAL("Unable to create '%s'", out_file);
fsrv->out_fd = open(stdin_file, O_RDWR | O_CREAT | O_EXCL, 0600);
if (fsrv->out_fd < 0) PFATAL("Unable to create '%s'", fsrv->out_file);
if (arg_offset) argv[arg_offset] = stdin_file;
if (get_afl_env("AFL_DEBUG")) {
int i = optind;
SAYF(cMGN "[D]" cRST " %s:", target_path);
SAYF(cMGN "[D]" cRST " %s:", fsrv->target_path);
while (argv[i] != NULL)
SAYF(" \"%s\"", argv[i++]);
SAYF("\n");
@ -983,7 +968,7 @@ int main(int argc, char** argv, char** envp) {
}
init_forkserver(use_argv);
afl_fsrv_start(fsrv, use_argv);
while (done == 0 && (dir_ent = readdir(dir_in))) {
@ -1000,13 +985,13 @@ int main(int argc, char** argv, char** envp) {
if (-1 == stat(infile, &statbuf) || !S_ISREG(statbuf.st_mode)) continue;
#endif
snprintf(outfile, sizeof(outfile), "%s/%s", out_file, dir_ent->d_name);
snprintf(outfile, sizeof(outfile), "%s/%s", fsrv->out_file, dir_ent->d_name);
if (read_file(infile)) {
run_target_forkserver(use_argv, in_data, in_len);
run_target_forkserver(fsrv, use_argv, in_data, in_len);
ck_free(in_data);
tcnt = write_results_to_file(outfile);
tcnt = write_results_to_file(fsrv);
}
@ -1016,8 +1001,8 @@ int main(int argc, char** argv, char** envp) {
} else {
run_target(use_argv);
tcnt = write_results();
run_target(fsrv, use_argv);
tcnt = write_results(fsrv);
}
@ -1025,7 +1010,7 @@ int main(int argc, char** argv, char** envp) {
if (!tcnt) FATAL("No instrumentation detected" cRST);
OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST,
tcnt, highest, total, out_file);
tcnt, highest, total, fsrv->out_file);
}
@ -1036,6 +1021,12 @@ int main(int argc, char** argv, char** envp) {
}
afl_shm_deinit(&shm);
u8 child_timed_out = fsrv->child_timed_out;
afl_fsrv_deinit(fsrv);
free(fsrv);
exit(child_crashed * 2 + child_timed_out);
}

View File

@ -58,22 +58,12 @@
#include <sys/types.h>
#include <sys/resource.h>
s32 forksrv_pid, /* PID of the fork server */
child_pid; /* PID of the tested program */
s32 fsrv_ctl_fd, /* Fork server control pipe (write) */
fsrv_st_fd; /* Fork server status pipe (read) */
u8* trace_bits; /* SHM with instrumentation bitmap */
static u8* mask_bitmap; /* Mask for trace bits (-B) */
u8 *in_file, /* Minimizer input test case */
*output_file, /* Minimizer output file */
*out_file, /* Targeted program input file */
*doc_path; /* Path to docs */
s32 out_fd; /* Persistent fd for out_file */
static u8* in_data; /* Input data for trimming */
static u32 in_len, /* Input data length */
@ -82,18 +72,13 @@ static u32 in_len, /* Input data length */
missed_hangs, /* Misses due to hangs */
missed_crashes, /* Misses due to crashes */
missed_paths; /* Misses due to exec path diffs */
u32 exec_tmout = EXEC_TIMEOUT; /* Exec timeout (ms) */
u64 mem_limit = MEM_LIMIT; /* Memory limit (MB) */
s32 dev_null_fd = -1; /* FD to /dev/null */
u8 crash_mode, /* Crash-centric mode? */
hang_mode, /* Minimize as long as it hangs */
exit_crash, /* Treat non-zero exit as crash? */
edges_only, /* Ignore hit counts? */
exact_mode, /* Require path match for crashes? */
be_quiet, use_stdin = 1; /* Use stdin for program input? */
be_quiet;
static volatile u8 stop_soon; /* Ctrl-C pressed? */
@ -174,9 +159,9 @@ static void apply_mask(u32* mem, u32* mask) {
/* See if any bytes are set in the bitmap. */
static inline u8 anything_set(void) {
static inline u8 anything_set(afl_forkserver_t *fsrv) {
u32* ptr = (u32*)trace_bits;
u32* ptr = (u32*)fsrv->trace_bits;
u32 i = (MAP_SIZE >> 2);
while (i--)
@ -186,11 +171,9 @@ static inline u8 anything_set(void) {
}
/* Get rid of temp files (atexit handler). */
static void at_exit_handler(void) {
if (out_file) unlink(out_file); /* Ignore errors */
afl_fsrv_killall();
}
@ -243,25 +226,25 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
is unlinked and a new one is created. Otherwise, out_fd is rewound and
truncated. */
static void write_to_testcase(void* mem, u32 len) {
static void write_to_testcase(afl_forkserver_t *fsrv, void* mem, u32 len) {
s32 fd = out_fd;
s32 fd = fsrv->out_fd;
if (!use_stdin) {
if (!fsrv->use_stdin) {
unlink(out_file); /* Ignore errors. */
unlink(fsrv->out_file); /* Ignore errors. */
fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
fd = open(fsrv->out_file, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
if (fd < 0) PFATAL("Unable to create '%s'", fsrv->out_file);
} else
lseek(fd, 0, SEEK_SET);
ck_write(fd, mem, len, out_file);
ck_write(fd, mem, len, fsrv->out_file);
if (use_stdin) {
if (fsrv->use_stdin) {
if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
@ -355,7 +338,7 @@ static void init_forkserver(char **argv) {
close(st_pipe[0]);
close(st_pipe[1]);
execv(target_path, argv);
execv(fsrv->target_path, argv);
*(u32*)trace_bits = EXEC_FAIL_SIG;
exit(0);
@ -420,7 +403,7 @@ static void init_forkserver(char **argv) {
/* Execute target application. Returns 0 if the changes are a dud, or
1 if they should be kept. */
static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
static u8 run_target(afl_forkserver_t *fsrv, char** argv, u8* mem, u32 len, u8 first_run) {
static struct itimerval it;
static u32 prev_timed_out = 0;
@ -428,53 +411,53 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
u32 cksum;
child_timed_out = 0;
fsrv->child_timed_out = 0;
memset(trace_bits, 0, MAP_SIZE);
memset(fsrv->trace_bits, 0, MAP_SIZE);
MEM_BARRIER();
write_to_testcase(mem, len);
write_to_testcase(fsrv, mem, len);
s32 res;
/* we have the fork server up and running, so simply
tell it to have at it, and then read back PID. */
if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if ((res = write(fsrv->fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) {
if ((res = read(fsrv->fsrv_st_fd, &fsrv->child_pid, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from fork server (OOM?)");
}
if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
if (fsrv->child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)");
/* Configure timeout, wait for child, cancel timeout. */
if (exec_tmout) {
if (fsrv->exec_tmout) {
it.it_value.tv_sec = (exec_tmout / 1000);
it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
it.it_value.tv_sec = (fsrv->exec_tmout / 1000);
it.it_value.tv_usec = (fsrv->exec_tmout % 1000) * 1000;
}
setitimer(ITIMER_REAL, &it, NULL);
if ((res = read(fsrv_st_fd, &status, 4)) != 4) {
if ((res = read(fsrv->fsrv_st_fd, &status, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to communicate with fork server (OOM?)");
}
child_pid = 0;
fsrv->child_pid = 0;
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
@ -484,13 +467,13 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
/* Clean up bitmap, analyze exit condition, etc. */
if (*(u32*)trace_bits == EXEC_FAIL_SIG)
if (*(u32*)fsrv->trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute '%s'", argv[0]);
if (!hang_mode) {
classify_counts(trace_bits);
apply_mask((u32*)trace_bits, (u32*)mask_bitmap);
classify_counts(fsrv->trace_bits);
apply_mask((u32*)fsrv->trace_bits, (u32*)mask_bitmap);
}
@ -508,7 +491,7 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
if (hang_mode) {
if (child_timed_out) return 1;
if (fsrv->child_timed_out) return 1;
if (WIFSIGNALED(status) ||
(WIFEXITED(status) && WEXITSTATUS(status) == MSAN_ERROR) ||
@ -526,7 +509,7 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
}
if (child_timed_out) {
if (fsrv->child_timed_out) {
missed_hangs++;
return 0;
@ -565,7 +548,7 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
}
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(fsrv->trace_bits, MAP_SIZE, HASH_CONST);
if (first_run) orig_cksum = cksum;
@ -589,7 +572,7 @@ static u32 next_p2(u32 val) {
/* Actually minimize! */
static void minimize(char** argv) {
static void minimize(afl_forkserver_t *fsrv, char** argv) {
static u32 alpha_map[256];
@ -624,7 +607,7 @@ static void minimize(char** argv) {
memset(tmp_buf + set_pos, '0', use_len);
u8 res;
res = run_target(argv, tmp_buf, in_len, 0);
res = run_target(fsrv, argv, tmp_buf, in_len, 0);
if (res) {
@ -697,7 +680,7 @@ next_del_blksize:
/* Tail */
memcpy(tmp_buf + del_pos, in_data + del_pos + del_len, tail_len);
res = run_target(argv, tmp_buf, del_pos + tail_len, 0);
res = run_target(fsrv, argv, tmp_buf, del_pos + tail_len, 0);
if (res) {
@ -760,7 +743,7 @@ next_del_blksize:
for (r = 0; r < in_len; r++)
if (tmp_buf[r] == i) tmp_buf[r] = '0';
res = run_target(argv, tmp_buf, in_len, 0);
res = run_target(fsrv, argv, tmp_buf, in_len, 0);
if (res) {
@ -796,7 +779,7 @@ next_del_blksize:
if (orig == '0') continue;
tmp_buf[i] = '0';
res = run_target(argv, tmp_buf, in_len, 0);
res = run_target(fsrv, argv, tmp_buf, in_len, 0);
if (res) {
@ -851,21 +834,20 @@ finalize_all:
static void handle_stop_sig(int sig) {
stop_soon = 1;
if (child_pid > 0) kill(child_pid, SIGKILL);
afl_fsrv_killall();
}
/* Do basic preparations - persistent fds, filenames, etc. */
static void set_up_environment(void) {
static void set_up_environment(afl_forkserver_t *fsrv) {
u8* x;
dev_null_fd = open("/dev/null", O_RDWR);
if (dev_null_fd < 0) PFATAL("Unable to open /dev/null");
fsrv->dev_null_fd = open("/dev/null", O_RDWR);
if (fsrv->dev_null_fd < 0) PFATAL("Unable to open /dev/null");
if (!out_file) {
if (!fsrv->out_file) {
u8* use_dir = ".";
@ -876,15 +858,15 @@ static void set_up_environment(void) {
}
out_file = alloc_printf("%s/.afl-tmin-temp-%u", use_dir, getpid());
fsrv->out_file = alloc_printf("%s/.afl-tmin-temp-%u", use_dir, getpid());
}
unlink(out_file);
unlink(fsrv->out_file);
out_fd = open(out_file, O_RDWR | O_CREAT | O_EXCL, 0600);
fsrv->out_fd = open(fsrv->out_file, O_RDWR | O_CREAT | O_EXCL, 0600);
if (out_fd < 0) PFATAL("Unable to create '%s'", out_file);
if (fsrv->out_fd < 0) PFATAL("Unable to create '%s'", fsrv->out_file);
/* Set sane defaults... */
@ -1041,16 +1023,16 @@ static void usage(u8* argv0) {
/* Find binary. */
static void find_binary(u8* fname) {
static void find_binary(afl_forkserver_t *fsrv, u8* fname) {
u8* env_path = 0;
struct stat st;
if (strchr(fname, '/') || !(env_path = getenv("PATH"))) {
target_path = ck_strdup(fname);
fsrv->target_path = ck_strdup(fname);
if (stat(target_path, &st) || !S_ISREG(st.st_mode) ||
if (stat(fsrv->target_path, &st) || !S_ISREG(st.st_mode) ||
!(st.st_mode & 0111) || st.st_size < 4)
FATAL("Program '%s' not found or not executable", fname);
@ -1073,22 +1055,22 @@ static void find_binary(u8* fname) {
env_path = delim;
if (cur_elem[0])
target_path = alloc_printf("%s/%s", cur_elem, fname);
fsrv->target_path = alloc_printf("%s/%s", cur_elem, fname);
else
target_path = ck_strdup(fname);
fsrv->target_path = ck_strdup(fname);
ck_free(cur_elem);
if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
if (!stat(fsrv->target_path, &st) && S_ISREG(st.st_mode) &&
(st.st_mode & 0111) && st.st_size >= 4)
break;
ck_free(target_path);
target_path = 0;
ck_free(fsrv->target_path);
fsrv->target_path = NULL;
}
if (!target_path) FATAL("Program '%s' not found or not executable", fname);
if (!fsrv->target_path) FATAL("Program '%s' not found or not executable", fname);
}
@ -1116,6 +1098,9 @@ int main(int argc, char** argv, char** envp) {
u8 mem_limit_given = 0, timeout_given = 0, unicorn_mode = 0, use_wine = 0;
char** use_argv;
afl_forkserver_t *fsrv = calloc(1, sizeof(afl_forkserver_t));
afl_fsrv_init(fsrv);
doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
SAYF(cCYA "afl-tmin" VERSION cRST " by Michal Zalewski\n");
@ -1138,9 +1123,9 @@ int main(int argc, char** argv, char** envp) {
case 'f':
if (out_file) FATAL("Multiple -f options not supported");
use_stdin = 0;
out_file = optarg;
if (fsrv->out_file) FATAL("Multiple -f options not supported");
fsrv->use_stdin = 0;
fsrv->out_file = optarg;
break;
case 'e':
@ -1166,29 +1151,29 @@ int main(int argc, char** argv, char** envp) {
if (!strcmp(optarg, "none")) {
mem_limit = 0;
fsrv->mem_limit = 0;
break;
}
if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
if (sscanf(optarg, "%llu%c", &fsrv->mem_limit, &suffix) < 1 ||
optarg[0] == '-')
FATAL("Bad syntax used for -m");
switch (suffix) {
case 'T': mem_limit *= 1024 * 1024; break;
case 'G': mem_limit *= 1024; break;
case 'k': mem_limit /= 1024; break;
case 'T': fsrv->mem_limit *= 1024 * 1024; break;
case 'G': fsrv->mem_limit *= 1024; break;
case 'k': fsrv->mem_limit /= 1024; break;
case 'M': break;
default: FATAL("Unsupported suffix or bad syntax for -m");
}
if (mem_limit < 5) FATAL("Dangerously low value of -m");
if (fsrv->mem_limit < 5) FATAL("Dangerously low value of -m");
if (sizeof(rlim_t) == 4 && mem_limit > 2000)
if (sizeof(rlim_t) == 4 && fsrv->mem_limit > 2000)
FATAL("Value of -m out of range on 32-bit systems");
}
@ -1200,9 +1185,9 @@ int main(int argc, char** argv, char** envp) {
if (timeout_given) FATAL("Multiple -t options not supported");
timeout_given = 1;
exec_tmout = atoi(optarg);
fsrv->exec_tmout = atoi(optarg);
if (exec_tmout < 10 || optarg[0] == '-')
if (fsrv->exec_tmout < 10 || optarg[0] == '-')
FATAL("Dangerously low value of -t");
break;
@ -1210,7 +1195,7 @@ int main(int argc, char** argv, char** envp) {
case 'Q':
if (qemu_mode) FATAL("Multiple -Q options not supported");
if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU;
if (!mem_limit_given) fsrv->mem_limit = MEM_LIMIT_QEMU;
qemu_mode = 1;
break;
@ -1218,7 +1203,7 @@ int main(int argc, char** argv, char** envp) {
case 'U':
if (unicorn_mode) FATAL("Multiple -Q options not supported");
if (!mem_limit_given) mem_limit = MEM_LIMIT_UNICORN;
if (!mem_limit_given) fsrv->mem_limit = MEM_LIMIT_UNICORN;
unicorn_mode = 1;
break;
@ -1229,7 +1214,7 @@ int main(int argc, char** argv, char** envp) {
qemu_mode = 1;
use_wine = 1;
if (!mem_limit_given) mem_limit = 0;
if (!mem_limit_given) fsrv->mem_limit = 0;
break;
@ -1275,21 +1260,24 @@ int main(int argc, char** argv, char** envp) {
if (optind == argc || !in_file || !output_file) usage(argv[0]);
check_environment_vars(envp);
setup_shm(0);
sharedmem_t shm = {0};
fsrv->trace_bits = afl_shm_init(&shm, MAP_SIZE, 0);
atexit(at_exit_handler);
setup_signal_handlers();
set_up_environment();
set_up_environment(fsrv);
find_binary(argv[optind]);
detect_file_args(argv + optind, out_file);
find_binary(fsrv, argv[optind]);
detect_file_args(argv + optind, fsrv->out_file, fsrv->use_stdin);
if (qemu_mode) {
if (use_wine)
use_argv = get_wine_argv(argv[0], argv + optind, argc - optind);
use_argv = get_wine_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
else
use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind);
use_argv = get_qemu_argv(argv[0], &fsrv->target_path, argc - optind, argv + optind);
} else
@ -1308,20 +1296,20 @@ int main(int argc, char** argv, char** envp) {
read_initial_file();
init_forkserver(use_argv);
afl_fsrv_start(fsrv, use_argv);
ACTF("Performing dry run (mem limit = %llu MB, timeout = %u ms%s)...",
mem_limit, exec_tmout, edges_only ? ", edges only" : "");
fsrv->mem_limit, fsrv->exec_tmout, edges_only ? ", edges only" : "");
run_target(use_argv, in_data, in_len, 1);
run_target(fsrv, use_argv, in_data, in_len, 1);
if (hang_mode && !child_timed_out)
if (hang_mode && !fsrv->child_timed_out)
FATAL(
"Target binary did not time out but hang minimization mode "
"(-H) was set (-t %u).",
exec_tmout);
fsrv->exec_tmout);
if (child_timed_out && !hang_mode)
if (fsrv->child_timed_out && !hang_mode)
FATAL(
"Target binary times out (adjusting -t may help). Use -H to minimize a "
"hang.");
@ -1335,7 +1323,7 @@ int main(int argc, char** argv, char** envp) {
OKF("Program terminates normally, minimizing in " cCYA "instrumented" cRST
" mode.");
if (!anything_set()) FATAL("No instrumentation detected.");
if (!anything_set(fsrv)) FATAL("No instrumentation detected.");
} else {
@ -1345,17 +1333,22 @@ int main(int argc, char** argv, char** envp) {
}
minimize(use_argv);
minimize(fsrv, use_argv);
ACTF("Writing output to '%s'...", output_file);
unlink(out_file);
out_file = NULL;
unlink(fsrv->out_file);
fsrv->out_file = NULL;
close(write_to_file(output_file, in_data, in_len));
OKF("We're done here. Have a nice day!\n");
afl_shm_deinit(&shm);
afl_fsrv_deinit(fsrv);
free(fsrv);
exit(0);
}

View File

@ -209,10 +209,10 @@ int main(int argc, char **argv, char **envp) {
// reserve some space for our input data
mem_map_checked(uc, INPUT_LOCATION, INPUT_SIZE_MAX, UC_PROT_READ);
// build a "dummy" argv with lenth 2 at 0x10000:
// build a "dummy" argv with length 2 at 0x10000:
// 0x10000 argv[0] NULL
// 0x10008 argv[1] (char *)0x10016 --. points to the next offset.
// 0x10016 argv[1][0], ... <-^ contains the acutal input data. (INPUT_LOCATION + INPUT_OFFSET)
// 0x10016 argv[1][0], ... <-^ contains the actual input data. (INPUT_LOCATION + INPUT_OFFSET)
uc_mem_write(uc, 0x10008, "\x16\x00\x01", 3); // little endian of 0x10016, see above