mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-14 02:58:08 +00:00
AFL without globals (#220)
* moved globals to afl, shm and fsrv * moved argv to afl state, less bugs * fixed unicorn docu * lists everywhere * merged custom mutators * fixed leaks in afl-fuzz
This commit is contained in:
@ -28,12 +28,12 @@
|
||||
.state file to avoid repeating deterministic fuzzing when resuming aborted
|
||||
scans. */
|
||||
|
||||
void mark_as_det_done(struct queue_entry* q) {
|
||||
void mark_as_det_done(afl_state_t *afl, struct queue_entry* q) {
|
||||
|
||||
u8* fn = strrchr(q->fname, '/');
|
||||
s32 fd;
|
||||
|
||||
fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
|
||||
fn = alloc_printf("%s/queue/.state/deterministic_done/%s", afl->out_dir, fn + 1);
|
||||
|
||||
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
|
||||
if (fd < 0) PFATAL("Unable to create '%s'", fn);
|
||||
@ -48,12 +48,12 @@ void mark_as_det_done(struct queue_entry* q) {
|
||||
/* Mark as variable. Create symlinks if possible to make it easier to examine
|
||||
the files. */
|
||||
|
||||
void mark_as_variable(struct queue_entry* q) {
|
||||
void mark_as_variable(afl_state_t *afl, struct queue_entry* q) {
|
||||
|
||||
u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
|
||||
|
||||
ldest = alloc_printf("../../%s", fn);
|
||||
fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
|
||||
fn = alloc_printf("%s/queue/.state/variable_behavior/%s", afl->out_dir, fn);
|
||||
|
||||
if (symlink(ldest, fn)) {
|
||||
|
||||
@ -73,7 +73,7 @@ void mark_as_variable(struct queue_entry* q) {
|
||||
/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
|
||||
but may be useful for post-processing datasets. */
|
||||
|
||||
void mark_as_redundant(struct queue_entry* q, u8 state) {
|
||||
void mark_as_redundant(afl_state_t *afl, struct queue_entry* q, u8 state) {
|
||||
|
||||
u8* fn;
|
||||
|
||||
@ -82,7 +82,7 @@ void mark_as_redundant(struct queue_entry* q, u8 state) {
|
||||
q->fs_redundant = state;
|
||||
|
||||
fn = strrchr(q->fname, '/');
|
||||
fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
|
||||
fn = alloc_printf("%s/queue/.state/redundant_edges/%s", afl->out_dir, fn + 1);
|
||||
|
||||
if (state) {
|
||||
|
||||
@ -104,49 +104,49 @@ void mark_as_redundant(struct queue_entry* q, u8 state) {
|
||||
|
||||
/* Append new test case to the queue. */
|
||||
|
||||
void add_to_queue(u8* fname, u32 len, u8 passed_det) {
|
||||
void add_to_queue(afl_state_t *afl, u8* fname, u32 len, u8 passed_det) {
|
||||
|
||||
struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
|
||||
|
||||
q->fname = fname;
|
||||
q->len = len;
|
||||
q->depth = cur_depth + 1;
|
||||
q->depth = afl->cur_depth + 1;
|
||||
q->passed_det = passed_det;
|
||||
q->n_fuzz = 1;
|
||||
|
||||
if (q->depth > max_depth) max_depth = q->depth;
|
||||
if (q->depth > afl->max_depth) afl->max_depth = q->depth;
|
||||
|
||||
if (queue_top) {
|
||||
if (afl->queue_top) {
|
||||
|
||||
queue_top->next = q;
|
||||
queue_top = q;
|
||||
afl->queue_top->next = q;
|
||||
afl->queue_top = q;
|
||||
|
||||
} else
|
||||
|
||||
q_prev100 = queue = queue_top = q;
|
||||
afl->q_prev100 = afl->queue = afl->queue_top = q;
|
||||
|
||||
++queued_paths;
|
||||
++pending_not_fuzzed;
|
||||
++afl->queued_paths;
|
||||
++afl->pending_not_fuzzed;
|
||||
|
||||
cycles_wo_finds = 0;
|
||||
afl->cycles_wo_finds = 0;
|
||||
|
||||
if (!(queued_paths % 100)) {
|
||||
if (!(afl->queued_paths % 100)) {
|
||||
|
||||
q_prev100->next_100 = q;
|
||||
q_prev100 = q;
|
||||
afl->q_prev100->next_100 = q;
|
||||
afl->q_prev100 = q;
|
||||
|
||||
}
|
||||
|
||||
last_path_time = get_cur_time();
|
||||
afl->last_path_time = get_cur_time();
|
||||
|
||||
if (mutator && mutator->afl_custom_queue_new_entry) {
|
||||
if (afl->mutator && afl->mutator->afl_custom_queue_new_entry) {
|
||||
|
||||
u8* fname_orig = NULL;
|
||||
|
||||
/* At the initialization stage, queue_cur is NULL */
|
||||
if (queue_cur) fname_orig = queue_cur->fname;
|
||||
if (afl->queue_cur) fname_orig = afl->queue_cur->fname;
|
||||
|
||||
mutator->afl_custom_queue_new_entry(fname, fname_orig);
|
||||
afl->mutator->afl_custom_queue_new_entry(afl, fname, fname_orig);
|
||||
|
||||
}
|
||||
|
||||
@ -154,9 +154,9 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) {
|
||||
|
||||
/* Destroy the entire queue. */
|
||||
|
||||
void destroy_queue(void) {
|
||||
void destroy_queue(afl_state_t *afl) {
|
||||
|
||||
struct queue_entry *q = queue, *n;
|
||||
struct queue_entry *q = afl->queue, *n;
|
||||
|
||||
while (q) {
|
||||
|
||||
@ -176,28 +176,28 @@ void destroy_queue(void) {
|
||||
seen in the bitmap so far, and focus on fuzzing them at the expense of
|
||||
the rest.
|
||||
|
||||
The first step of the process is to maintain a list of top_rated[] entries
|
||||
The first step of the process is to maintain a list of afl->top_rated[] entries
|
||||
for every byte in the bitmap. We win that slot if there is no previous
|
||||
contender, or if the contender has a more favorable speed x size factor. */
|
||||
|
||||
void update_bitmap_score(struct queue_entry* q) {
|
||||
void update_bitmap_score(afl_state_t *afl, struct queue_entry* q) {
|
||||
|
||||
u32 i;
|
||||
u64 fav_factor = q->exec_us * q->len;
|
||||
u64 fuzz_p2 = next_p2(q->n_fuzz);
|
||||
|
||||
/* For every byte set in trace_bits[], see if there is a previous winner,
|
||||
/* For every byte set in afl->fsrv.trace_bits[], see if there is a previous winner,
|
||||
and how it compares to us. */
|
||||
|
||||
for (i = 0; i < MAP_SIZE; ++i)
|
||||
|
||||
if (trace_bits[i]) {
|
||||
if (afl->fsrv.trace_bits[i]) {
|
||||
|
||||
if (top_rated[i]) {
|
||||
if (afl->top_rated[i]) {
|
||||
|
||||
/* Faster-executing or smaller test cases are favored. */
|
||||
u64 top_rated_fuzz_p2 = next_p2(top_rated[i]->n_fuzz);
|
||||
u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
|
||||
u64 top_rated_fuzz_p2 = next_p2(afl->top_rated[i]->n_fuzz);
|
||||
u64 top_rated_fav_factor = afl->top_rated[i]->exec_us * afl->top_rated[i]->len;
|
||||
|
||||
if (fuzz_p2 > top_rated_fuzz_p2) {
|
||||
|
||||
@ -209,15 +209,15 @@ void update_bitmap_score(struct queue_entry* q) {
|
||||
|
||||
}
|
||||
|
||||
if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
|
||||
if (fav_factor > afl->top_rated[i]->exec_us * afl->top_rated[i]->len) continue;
|
||||
|
||||
/* Looks like we're going to win. Decrease ref count for the
|
||||
previous winner, discard its trace_bits[] if necessary. */
|
||||
previous winner, discard its afl->fsrv.trace_bits[] if necessary. */
|
||||
|
||||
if (!--top_rated[i]->tc_ref) {
|
||||
if (!--afl->top_rated[i]->tc_ref) {
|
||||
|
||||
ck_free(top_rated[i]->trace_mini);
|
||||
top_rated[i]->trace_mini = 0;
|
||||
ck_free(afl->top_rated[i]->trace_mini);
|
||||
afl->top_rated[i]->trace_mini = 0;
|
||||
|
||||
}
|
||||
|
||||
@ -225,44 +225,44 @@ void update_bitmap_score(struct queue_entry* q) {
|
||||
|
||||
/* Insert ourselves as the new winner. */
|
||||
|
||||
top_rated[i] = q;
|
||||
afl->top_rated[i] = q;
|
||||
++q->tc_ref;
|
||||
|
||||
if (!q->trace_mini) {
|
||||
|
||||
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
|
||||
minimize_bits(q->trace_mini, trace_bits);
|
||||
minimize_bits(q->trace_mini, afl->fsrv.trace_bits);
|
||||
|
||||
}
|
||||
|
||||
score_changed = 1;
|
||||
afl->score_changed = 1;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* The second part of the mechanism discussed above is a routine that
|
||||
goes over top_rated[] entries, and then sequentially grabs winners for
|
||||
goes over afl->top_rated[] entries, and then sequentially grabs winners for
|
||||
previously-unseen bytes (temp_v) and marks them as favored, at least
|
||||
until the next run. The favored entries are given more air time during
|
||||
all fuzzing steps. */
|
||||
|
||||
void cull_queue(void) {
|
||||
void cull_queue(afl_state_t *afl) {
|
||||
|
||||
struct queue_entry* q;
|
||||
static u8 temp_v[MAP_SIZE >> 3];
|
||||
u32 i;
|
||||
|
||||
if (dumb_mode || !score_changed) return;
|
||||
if (afl->dumb_mode || !afl->score_changed) return;
|
||||
|
||||
score_changed = 0;
|
||||
afl->score_changed = 0;
|
||||
|
||||
memset(temp_v, 255, MAP_SIZE >> 3);
|
||||
|
||||
queued_favored = 0;
|
||||
pending_favored = 0;
|
||||
afl->queued_favored = 0;
|
||||
afl->pending_favored = 0;
|
||||
|
||||
q = queue;
|
||||
q = afl->queue;
|
||||
|
||||
while (q) {
|
||||
|
||||
@ -272,32 +272,32 @@ void cull_queue(void) {
|
||||
}
|
||||
|
||||
/* Let's see if anything in the bitmap isn't captured in temp_v.
|
||||
If yes, and if it has a top_rated[] contender, let's use it. */
|
||||
If yes, and if it has a afl->top_rated[] contender, let's use it. */
|
||||
|
||||
for (i = 0; i < MAP_SIZE; ++i)
|
||||
if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
|
||||
if (afl->top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
|
||||
|
||||
u32 j = MAP_SIZE >> 3;
|
||||
|
||||
/* Remove all bits belonging to the current entry from temp_v. */
|
||||
|
||||
while (j--)
|
||||
if (top_rated[i]->trace_mini[j])
|
||||
temp_v[j] &= ~top_rated[i]->trace_mini[j];
|
||||
if (afl->top_rated[i]->trace_mini[j])
|
||||
temp_v[j] &= ~afl->top_rated[i]->trace_mini[j];
|
||||
|
||||
top_rated[i]->favored = 1;
|
||||
++queued_favored;
|
||||
afl->top_rated[i]->favored = 1;
|
||||
++afl->queued_favored;
|
||||
|
||||
if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed)
|
||||
++pending_favored;
|
||||
if (afl->top_rated[i]->fuzz_level == 0 || !afl->top_rated[i]->was_fuzzed)
|
||||
++afl->pending_favored;
|
||||
|
||||
}
|
||||
|
||||
q = queue;
|
||||
q = afl->queue;
|
||||
|
||||
while (q) {
|
||||
|
||||
mark_as_redundant(q, !q->favored);
|
||||
mark_as_redundant(afl, q, !q->favored);
|
||||
q = q->next;
|
||||
|
||||
}
|
||||
@ -308,10 +308,10 @@ void cull_queue(void) {
|
||||
A helper function for fuzz_one(). Maybe some of these constants should
|
||||
go into config.h. */
|
||||
|
||||
u32 calculate_score(struct queue_entry* q) {
|
||||
u32 calculate_score(afl_state_t *afl, struct queue_entry* q) {
|
||||
|
||||
u32 avg_exec_us = total_cal_us / total_cal_cycles;
|
||||
u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries;
|
||||
u32 avg_exec_us = afl->total_cal_us / afl->total_cal_cycles;
|
||||
u32 avg_bitmap_size = afl->total_bitmap_size / afl->total_bitmap_entries;
|
||||
u32 perf_score = 100;
|
||||
|
||||
/* Adjust score based on execution speed of this path, compared to the
|
||||
@ -391,7 +391,7 @@ u32 calculate_score(struct queue_entry* q) {
|
||||
u32 n_paths, fuzz_mu;
|
||||
u32 factor = 1;
|
||||
|
||||
switch (schedule) {
|
||||
switch (afl->schedule) {
|
||||
|
||||
case EXPLORE: break;
|
||||
|
||||
@ -401,7 +401,7 @@ u32 calculate_score(struct queue_entry* q) {
|
||||
fuzz_total = 0;
|
||||
n_paths = 0;
|
||||
|
||||
struct queue_entry* queue_it = queue;
|
||||
struct queue_entry* queue_it = afl->queue;
|
||||
while (queue_it) {
|
||||
|
||||
fuzz_total += queue_it->n_fuzz;
|
||||
@ -451,7 +451,7 @@ u32 calculate_score(struct queue_entry* q) {
|
||||
perf_score *= factor / POWER_BETA;
|
||||
|
||||
// MOpt mode
|
||||
if (limit_time_sig != 0 && max_depth - q->depth < 3)
|
||||
if (afl->limit_time_sig != 0 && afl->max_depth - q->depth < 3)
|
||||
perf_score *= 2;
|
||||
else if (perf_score < 1)
|
||||
perf_score =
|
||||
@ -459,7 +459,7 @@ u32 calculate_score(struct queue_entry* q) {
|
||||
|
||||
/* Make sure that we don't go over limit. */
|
||||
|
||||
if (perf_score > havoc_max_mult * 100) perf_score = havoc_max_mult * 100;
|
||||
if (perf_score > afl->havoc_max_mult * 100) perf_score = afl->havoc_max_mult * 100;
|
||||
|
||||
return perf_score;
|
||||
|
||||
|
Reference in New Issue
Block a user