afl-fuzz-src bitmap and queue C files

This commit is contained in:
Andrea Fioraldi 2019-09-01 20:34:20 +02:00
parent 4f3c417753
commit 3b3df4e3cb
6 changed files with 787 additions and 692 deletions

View File

@ -33,7 +33,7 @@ SH_PROGS = afl-plot afl-cmin afl-whatsup afl-system-config
CFLAGS ?= -O3 -funroll-loops
CFLAGS += -Wall -D_FORTIFY_SOURCE=2 -g -Wno-pointer-sign -I include/ \
-DAFL_PATH=\"$(HELPER_PATH)\" -DDOC_PATH=\"$(DOC_PATH)\" \
-DBIN_PATH=\"$(BIN_PATH)\"
-DBIN_PATH=\"$(BIN_PATH)\" -Wno-unused-function
AFL_FUZZ_FILES = $(wildcard src/afl-fuzz-src/*.c)

View File

@ -466,6 +466,34 @@ void trim_py(char**, size_t*);
#endif
/* Queue */
void mark_as_det_done(struct queue_entry* q);
void mark_as_variable(struct queue_entry* q);
void mark_as_redundant(struct queue_entry* q, u8 state);
void add_to_queue(u8* fname, u32 len, u8 passed_det);
void destroy_queue(void);
void update_bitmap_score(struct queue_entry* q);
void cull_queue(void);
/* Bitmap */
void write_bitmap(void);
void read_bitmap(u8* fname);
u8 has_new_bits(u8* virgin_map);
u32 count_bits(u8* mem);
u32 count_bytes(u8* mem);
u32 count_non_255_bytes(u8* mem);
#ifdef __x86_64__
void simplify_trace(u64* mem);
void classify_counts(u64* mem);
#else
void simplify_trace(u32* mem);
void classify_counts(u32* mem);
#endif
void init_count_class16(void);
void minimize_bits(u8* dst, u8* src);
/**** Inline routines ****/
/* Generate a random number (from 0 to limit - 1). This may
@ -493,5 +521,43 @@ static inline u32 UR(u32 limit) {
#endif
}
/* Find first power of two greater or equal to val (assuming val under
2^63). */
static u64 next_p2(u64 val) {
u64 ret = 1;
while (val > ret) ret <<= 1;
return ret;
}
/* Get unix time in milliseconds */
static u64 get_cur_time(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
}
/* Get unix time in microseconds */
static u64 get_cur_time_us(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
}
#endif

View File

@ -45,34 +45,6 @@ int select_algorithm(void) {
}
/* Get unix time in milliseconds */
static u64 get_cur_time(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
}
/* Get unix time in microseconds */
static u64 get_cur_time_us(void) {
struct timeval tv;
struct timezone tz;
gettimeofday(&tv, &tz);
return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
}
/* Shuffle an array of pointers. Might be slightly biased. */
static void shuffle_ptrs(void** ptrs, u32 cnt) {
@ -393,669 +365,6 @@ static u8* DTD(u64 cur_ms, u64 event_ms) {
}
/* Mark deterministic checks as done for a particular queue entry. We use the
.state file to avoid repeating deterministic fuzzing when resuming aborted
scans. */
static void mark_as_det_done(struct queue_entry* q) {
u8* fn = strrchr(q->fname, '/');
s32 fd;
fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
ck_free(fn);
q->passed_det = 1;
}
/* Mark as variable. Create symlinks if possible to make it easier to examine
the files. */
static void mark_as_variable(struct queue_entry* q) {
u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
ldest = alloc_printf("../../%s", fn);
fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
if (symlink(ldest, fn)) {
s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
}
ck_free(ldest);
ck_free(fn);
q->var_behavior = 1;
}
/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
but may be useful for post-processing datasets. */
static void mark_as_redundant(struct queue_entry* q, u8 state) {
u8* fn;
if (state == q->fs_redundant) return;
q->fs_redundant = state;
fn = strrchr(q->fname, '/');
fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
if (state) {
s32 fd;
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
} else {
if (unlink(fn)) PFATAL("Unable to remove '%s'", fn);
}
ck_free(fn);
}
/* Append new test case to the queue. */
static void add_to_queue(u8* fname, u32 len, u8 passed_det) {
struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
q->fname = fname;
q->len = len;
q->depth = cur_depth + 1;
q->passed_det = passed_det;
q->n_fuzz = 1;
if (q->depth > max_depth) max_depth = q->depth;
if (queue_top) {
queue_top->next = q;
queue_top = q;
} else q_prev100 = queue = queue_top = q;
++queued_paths;
++pending_not_fuzzed;
cycles_wo_finds = 0;
if (!(queued_paths % 100)) {
q_prev100->next_100 = q;
q_prev100 = q;
}
last_path_time = get_cur_time();
}
/* Destroy the entire queue. */
void destroy_queue(void) {
struct queue_entry *q = queue, *n;
while (q) {
n = q->next;
ck_free(q->fname);
ck_free(q->trace_mini);
ck_free(q);
q = n;
}
}
/* Write bitmap to file. The bitmap is useful mostly for the secret
-B option, to focus a separate fuzzing session on a particular
interesting input without rediscovering all the others. */
void write_bitmap(void) {
u8* fname;
s32 fd;
if (!bitmap_changed) return;
bitmap_changed = 0;
fname = alloc_printf("%s/fuzz_bitmap", out_dir);
fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_write(fd, virgin_bits, MAP_SIZE, fname);
close(fd);
ck_free(fname);
}
/* Read bitmap from file. This is for the -B option again. */
void read_bitmap(u8* fname) {
s32 fd = open(fname, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_read(fd, virgin_bits, MAP_SIZE, fname);
close(fd);
}
/* Check if the current execution path brings anything new to the table.
Update virgin bits to reflect the finds. Returns 1 if the only change is
the hit-count for a particular tuple; 2 if there are new tuples seen.
Updates the map, so subsequent calls will always return 0.
This function is called after every exec() on a fairly large buffer, so
it needs to be fast. We do this in 32-bit and 64-bit flavors. */
static inline u8 has_new_bits(u8* virgin_map) {
#ifdef __x86_64__
u64* current = (u64*)trace_bits;
u64* virgin = (u64*)virgin_map;
u32 i = (MAP_SIZE >> 3);
#else
u32* current = (u32*)trace_bits;
u32* virgin = (u32*)virgin_map;
u32 i = (MAP_SIZE >> 2);
#endif /* ^__x86_64__ */
u8 ret = 0;
while (i--) {
/* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
that have not been already cleared from the virgin map - since this will
almost always be the case. */
if (unlikely(*current) && unlikely(*current & *virgin)) {
if (likely(ret < 2)) {
u8* cur = (u8*)current;
u8* vir = (u8*)virgin;
/* Looks like we have not found any new bytes yet; see if any non-zero
bytes in current[] are pristine in virgin[]. */
#ifdef __x86_64__
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
(cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
(cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
else ret = 1;
#else
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
else ret = 1;
#endif /* ^__x86_64__ */
}
*virgin &= ~*current;
}
++current;
++virgin;
}
if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
return ret;
}
/* Count the number of bits set in the provided bitmap. Used for the status
screen several times every second, does not have to be fast. */
static u32 count_bits(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
/* This gets called on the inverse, virgin bitmap; optimize for sparse
data. */
if (v == 0xffffffff) {
ret += 32;
continue;
}
v -= ((v >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
}
return ret;
}
#define FF(_b) (0xff << ((_b) << 3))
/* Count the number of bytes set in the bitmap. Called fairly sporadically,
mostly to update the status screen or calibrate and examine confirmed
new paths. */
static u32 count_bytes(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
if (!v) continue;
if (v & FF(0)) ++ret;
if (v & FF(1)) ++ret;
if (v & FF(2)) ++ret;
if (v & FF(3)) ++ret;
}
return ret;
}
/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
status screen, several calls per second or so. */
static u32 count_non_255_bytes(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
/* This is called on the virgin bitmap, so optimize for the most likely
case. */
if (v == 0xffffffff) continue;
if ((v & FF(0)) != FF(0)) ++ret;
if ((v & FF(1)) != FF(1)) ++ret;
if ((v & FF(2)) != FF(2)) ++ret;
if ((v & FF(3)) != FF(3)) ++ret;
}
return ret;
}
/* Destructively simplify trace by eliminating hit count information
and replacing it with 0x80 or 0x01 depending on whether the tuple
is hit or not. Called on every new crash or timeout, should be
reasonably fast. */
static const u8 simplify_lookup[256] = {
[0] = 1,
[1 ... 255] = 128
};
#ifdef __x86_64__
static void simplify_trace(u64* mem) {
u32 i = MAP_SIZE >> 3;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u8* mem8 = (u8*)mem;
mem8[0] = simplify_lookup[mem8[0]];
mem8[1] = simplify_lookup[mem8[1]];
mem8[2] = simplify_lookup[mem8[2]];
mem8[3] = simplify_lookup[mem8[3]];
mem8[4] = simplify_lookup[mem8[4]];
mem8[5] = simplify_lookup[mem8[5]];
mem8[6] = simplify_lookup[mem8[6]];
mem8[7] = simplify_lookup[mem8[7]];
} else *mem = 0x0101010101010101ULL;
++mem;
}
}
#else
static void simplify_trace(u32* mem) {
u32 i = MAP_SIZE >> 2;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u8* mem8 = (u8*)mem;
mem8[0] = simplify_lookup[mem8[0]];
mem8[1] = simplify_lookup[mem8[1]];
mem8[2] = simplify_lookup[mem8[2]];
mem8[3] = simplify_lookup[mem8[3]];
} else *mem = 0x01010101;
++mem;
}
}
#endif /* ^__x86_64__ */
/* Destructively classify execution counts in a trace. This is used as a
preprocessing step for any newly acquired traces. Called on every exec,
must be fast. */
static const u8 count_class_lookup8[256] = {
[0] = 0,
[1] = 1,
[2] = 2,
[3] = 4,
[4 ... 7] = 8,
[8 ... 15] = 16,
[16 ... 31] = 32,
[32 ... 127] = 64,
[128 ... 255] = 128
};
static u16 count_class_lookup16[65536];
void init_count_class16(void) {
u32 b1, b2;
for (b1 = 0; b1 < 256; b1++)
for (b2 = 0; b2 < 256; b2++)
count_class_lookup16[(b1 << 8) + b2] =
(count_class_lookup8[b1] << 8) |
count_class_lookup8[b2];
}
#ifdef __x86_64__
static inline void classify_counts(u64* mem) {
u32 i = MAP_SIZE >> 3;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u16* mem16 = (u16*)mem;
mem16[0] = count_class_lookup16[mem16[0]];
mem16[1] = count_class_lookup16[mem16[1]];
mem16[2] = count_class_lookup16[mem16[2]];
mem16[3] = count_class_lookup16[mem16[3]];
}
++mem;
}
}
#else
static inline void classify_counts(u32* mem) {
u32 i = MAP_SIZE >> 2;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u16* mem16 = (u16*)mem;
mem16[0] = count_class_lookup16[mem16[0]];
mem16[1] = count_class_lookup16[mem16[1]];
}
++mem;
}
}
#endif /* ^__x86_64__ */
/* Compact trace bytes into a smaller bitmap. We effectively just drop the
count information here. This is called only sporadically, for some
new paths. */
static void minimize_bits(u8* dst, u8* src) {
u32 i = 0;
while (i < MAP_SIZE) {
if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
++i;
}
}
/* Find first power of two greater or equal to val (assuming val under
2^63). */
static u64 next_p2(u64 val) {
u64 ret = 1;
while (val > ret) ret <<= 1;
return ret;
}
/* When we bump into a new path, we call this to see if the path appears
more "favorable" than any of the existing ones. The purpose of the
"favorables" is to have a minimal set of paths that trigger all the bits
seen in the bitmap so far, and focus on fuzzing them at the expense of
the rest.
The first step of the process is to maintain a list of top_rated[] entries
for every byte in the bitmap. We win that slot if there is no previous
contender, or if the contender has a more favorable speed x size factor. */
static void update_bitmap_score(struct queue_entry* q) {
u32 i;
u64 fav_factor = q->exec_us * q->len;
u64 fuzz_p2 = next_p2 (q->n_fuzz);
/* For every byte set in trace_bits[], see if there is a previous winner,
and how it compares to us. */
for (i = 0; i < MAP_SIZE; ++i)
if (trace_bits[i]) {
if (top_rated[i]) {
/* Faster-executing or smaller test cases are favored. */
u64 top_rated_fuzz_p2 = next_p2 (top_rated[i]->n_fuzz);
u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
if (fuzz_p2 > top_rated_fuzz_p2) {
continue;
} else if (fuzz_p2 == top_rated_fuzz_p2) {
if (fav_factor > top_rated_fav_factor)
continue;
}
if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
/* Looks like we're going to win. Decrease ref count for the
previous winner, discard its trace_bits[] if necessary. */
if (!--top_rated[i]->tc_ref) {
ck_free(top_rated[i]->trace_mini);
top_rated[i]->trace_mini = 0;
}
}
/* Insert ourselves as the new winner. */
top_rated[i] = q;
++q->tc_ref;
if (!q->trace_mini) {
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
minimize_bits(q->trace_mini, trace_bits);
}
score_changed = 1;
}
}
/* The second part of the mechanism discussed above is a routine that
goes over top_rated[] entries, and then sequentially grabs winners for
previously-unseen bytes (temp_v) and marks them as favored, at least
until the next run. The favored entries are given more air time during
all fuzzing steps. */
static void cull_queue(void) {
struct queue_entry* q;
static u8 temp_v[MAP_SIZE >> 3];
u32 i;
if (dumb_mode || !score_changed) return;
score_changed = 0;
memset(temp_v, 255, MAP_SIZE >> 3);
queued_favored = 0;
pending_favored = 0;
q = queue;
while (q) {
q->favored = 0;
q = q->next;
}
/* Let's see if anything in the bitmap isn't captured in temp_v.
If yes, and if it has a top_rated[] contender, let's use it. */
for (i = 0; i < MAP_SIZE; ++i)
if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
u32 j = MAP_SIZE >> 3;
/* Remove all bits belonging to the current entry from temp_v. */
while (j--)
if (top_rated[i]->trace_mini[j])
temp_v[j] &= ~top_rated[i]->trace_mini[j];
top_rated[i]->favored = 1;
++queued_favored;
if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored;
}
q = queue;
while (q) {
mark_as_redundant(q, !q->favored);
q = q->next;
}
}
/* Load postprocessor, if available. */
static void setup_post(void) {

410
src/afl-fuzz-src/bitmap.c Normal file
View File

@ -0,0 +1,410 @@
/*
american fuzzy lop - fuzzer code
--------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Forkserver design by Jann Horn <jannhorn@googlemail.com>
Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
This is the real deal: the program takes an instrumented binary and
attempts a variety of basic fuzzing tricks, paying close attention to
how they affect the execution path.
*/
#include "afl-fuzz.h"
/* Write bitmap to file. The bitmap is useful mostly for the secret
-B option, to focus a separate fuzzing session on a particular
interesting input without rediscovering all the others. */
void write_bitmap(void) {
u8* fname;
s32 fd;
if (!bitmap_changed) return;
bitmap_changed = 0;
fname = alloc_printf("%s/fuzz_bitmap", out_dir);
fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_write(fd, virgin_bits, MAP_SIZE, fname);
close(fd);
ck_free(fname);
}
/* Read bitmap from file. This is for the -B option again. */
void read_bitmap(u8* fname) {
s32 fd = open(fname, O_RDONLY);
if (fd < 0) PFATAL("Unable to open '%s'", fname);
ck_read(fd, virgin_bits, MAP_SIZE, fname);
close(fd);
}
/* Check if the current execution path brings anything new to the table.
Update virgin bits to reflect the finds. Returns 1 if the only change is
the hit-count for a particular tuple; 2 if there are new tuples seen.
Updates the map, so subsequent calls will always return 0.
This function is called after every exec() on a fairly large buffer, so
it needs to be fast. We do this in 32-bit and 64-bit flavors. */
u8 has_new_bits(u8* virgin_map) {
#ifdef __x86_64__
u64* current = (u64*)trace_bits;
u64* virgin = (u64*)virgin_map;
u32 i = (MAP_SIZE >> 3);
#else
u32* current = (u32*)trace_bits;
u32* virgin = (u32*)virgin_map;
u32 i = (MAP_SIZE >> 2);
#endif /* ^__x86_64__ */
u8 ret = 0;
while (i--) {
/* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
that have not been already cleared from the virgin map - since this will
almost always be the case. */
if (unlikely(*current) && unlikely(*current & *virgin)) {
if (likely(ret < 2)) {
u8* cur = (u8*)current;
u8* vir = (u8*)virgin;
/* Looks like we have not found any new bytes yet; see if any non-zero
bytes in current[] are pristine in virgin[]. */
#ifdef __x86_64__
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
(cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
(cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
else ret = 1;
#else
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
else ret = 1;
#endif /* ^__x86_64__ */
}
*virgin &= ~*current;
}
++current;
++virgin;
}
if (ret && virgin_map == virgin_bits) bitmap_changed = 1;
return ret;
}
/* Count the number of bits set in the provided bitmap. Used for the status
screen several times every second, does not have to be fast. */
u32 count_bits(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
/* This gets called on the inverse, virgin bitmap; optimize for sparse
data. */
if (v == 0xffffffff) {
ret += 32;
continue;
}
v -= ((v >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
}
return ret;
}
#define FF(_b) (0xff << ((_b) << 3))
/* Count the number of bytes set in the bitmap. Called fairly sporadically,
mostly to update the status screen or calibrate and examine confirmed
new paths. */
u32 count_bytes(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
if (!v) continue;
if (v & FF(0)) ++ret;
if (v & FF(1)) ++ret;
if (v & FF(2)) ++ret;
if (v & FF(3)) ++ret;
}
return ret;
}
/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
status screen, several calls per second or so. */
u32 count_non_255_bytes(u8* mem) {
u32* ptr = (u32*)mem;
u32 i = (MAP_SIZE >> 2);
u32 ret = 0;
while (i--) {
u32 v = *(ptr++);
/* This is called on the virgin bitmap, so optimize for the most likely
case. */
if (v == 0xffffffff) continue;
if ((v & FF(0)) != FF(0)) ++ret;
if ((v & FF(1)) != FF(1)) ++ret;
if ((v & FF(2)) != FF(2)) ++ret;
if ((v & FF(3)) != FF(3)) ++ret;
}
return ret;
}
/* Destructively simplify trace by eliminating hit count information
and replacing it with 0x80 or 0x01 depending on whether the tuple
is hit or not. Called on every new crash or timeout, should be
reasonably fast. */
const u8 simplify_lookup[256] = {
[0] = 1,
[1 ... 255] = 128
};
#ifdef __x86_64__
void simplify_trace(u64* mem) {
u32 i = MAP_SIZE >> 3;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u8* mem8 = (u8*)mem;
mem8[0] = simplify_lookup[mem8[0]];
mem8[1] = simplify_lookup[mem8[1]];
mem8[2] = simplify_lookup[mem8[2]];
mem8[3] = simplify_lookup[mem8[3]];
mem8[4] = simplify_lookup[mem8[4]];
mem8[5] = simplify_lookup[mem8[5]];
mem8[6] = simplify_lookup[mem8[6]];
mem8[7] = simplify_lookup[mem8[7]];
} else *mem = 0x0101010101010101ULL;
++mem;
}
}
#else
void simplify_trace(u32* mem) {
u32 i = MAP_SIZE >> 2;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u8* mem8 = (u8*)mem;
mem8[0] = simplify_lookup[mem8[0]];
mem8[1] = simplify_lookup[mem8[1]];
mem8[2] = simplify_lookup[mem8[2]];
mem8[3] = simplify_lookup[mem8[3]];
} else *mem = 0x01010101;
++mem;
}
}
#endif /* ^__x86_64__ */
/* Destructively classify execution counts in a trace. This is used as a
preprocessing step for any newly acquired traces. Called on every exec,
must be fast. */
static const u8 count_class_lookup8[256] = {
[0] = 0,
[1] = 1,
[2] = 2,
[3] = 4,
[4 ... 7] = 8,
[8 ... 15] = 16,
[16 ... 31] = 32,
[32 ... 127] = 64,
[128 ... 255] = 128
};
static u16 count_class_lookup16[65536];
void init_count_class16(void) {
u32 b1, b2;
for (b1 = 0; b1 < 256; b1++)
for (b2 = 0; b2 < 256; b2++)
count_class_lookup16[(b1 << 8) + b2] =
(count_class_lookup8[b1] << 8) |
count_class_lookup8[b2];
}
#ifdef __x86_64__
void classify_counts(u64* mem) {
u32 i = MAP_SIZE >> 3;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u16* mem16 = (u16*)mem;
mem16[0] = count_class_lookup16[mem16[0]];
mem16[1] = count_class_lookup16[mem16[1]];
mem16[2] = count_class_lookup16[mem16[2]];
mem16[3] = count_class_lookup16[mem16[3]];
}
++mem;
}
}
#else
void classify_counts(u32* mem) {
u32 i = MAP_SIZE >> 2;
while (i--) {
/* Optimize for sparse bitmaps. */
if (unlikely(*mem)) {
u16* mem16 = (u16*)mem;
mem16[0] = count_class_lookup16[mem16[0]];
mem16[1] = count_class_lookup16[mem16[1]];
}
++mem;
}
}
#endif /* ^__x86_64__ */
/* Compact trace bytes into a smaller bitmap. We effectively just drop the
count information here. This is called only sporadically, for some
new paths. */
void minimize_bits(u8* dst, u8* src) {
u32 i = 0;
while (i < MAP_SIZE) {
if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
++i;
}
}

24
src/afl-fuzz-src/misc.c Normal file
View File

@ -0,0 +1,24 @@
/*
american fuzzy lop - fuzzer code
--------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Forkserver design by Jann Horn <jannhorn@googlemail.com>
Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
This is the real deal: the program takes an instrumented binary and
attempts a variety of basic fuzzing tricks, paying close attention to
how they affect the execution path.
*/
#include "afl-fuzz.h"

286
src/afl-fuzz-src/queue.c Normal file
View File

@ -0,0 +1,286 @@
/*
american fuzzy lop - fuzzer code
--------------------------------
Written and maintained by Michal Zalewski <lcamtuf@google.com>
Forkserver design by Jann Horn <jannhorn@googlemail.com>
Copyright 2013, 2014, 2015, 2016, 2017 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
This is the real deal: the program takes an instrumented binary and
attempts a variety of basic fuzzing tricks, paying close attention to
how they affect the execution path.
*/
#include "afl-fuzz.h"
/* Mark deterministic checks as done for a particular queue entry. We use the
.state file to avoid repeating deterministic fuzzing when resuming aborted
scans. */
void mark_as_det_done(struct queue_entry* q) {
u8* fn = strrchr(q->fname, '/');
s32 fd;
fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1);
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
ck_free(fn);
q->passed_det = 1;
}
/* Mark as variable. Create symlinks if possible to make it easier to examine
the files. */
void mark_as_variable(struct queue_entry* q) {
u8 *fn = strrchr(q->fname, '/') + 1, *ldest;
ldest = alloc_printf("../../%s", fn);
fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn);
if (symlink(ldest, fn)) {
s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
}
ck_free(ldest);
ck_free(fn);
q->var_behavior = 1;
}
/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
but may be useful for post-processing datasets. */
void mark_as_redundant(struct queue_entry* q, u8 state) {
u8* fn;
if (state == q->fs_redundant) return;
q->fs_redundant = state;
fn = strrchr(q->fname, '/');
fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1);
if (state) {
s32 fd;
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
close(fd);
} else {
if (unlink(fn)) PFATAL("Unable to remove '%s'", fn);
}
ck_free(fn);
}
/* Append new test case to the queue. */
void add_to_queue(u8* fname, u32 len, u8 passed_det) {
struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
q->fname = fname;
q->len = len;
q->depth = cur_depth + 1;
q->passed_det = passed_det;
q->n_fuzz = 1;
if (q->depth > max_depth) max_depth = q->depth;
if (queue_top) {
queue_top->next = q;
queue_top = q;
} else q_prev100 = queue = queue_top = q;
++queued_paths;
++pending_not_fuzzed;
cycles_wo_finds = 0;
if (!(queued_paths % 100)) {
q_prev100->next_100 = q;
q_prev100 = q;
}
last_path_time = get_cur_time();
}
/* Destroy the entire queue. */
void destroy_queue(void) {
struct queue_entry *q = queue, *n;
while (q) {
n = q->next;
ck_free(q->fname);
ck_free(q->trace_mini);
ck_free(q);
q = n;
}
}
/* When we bump into a new path, we call this to see if the path appears
more "favorable" than any of the existing ones. The purpose of the
"favorables" is to have a minimal set of paths that trigger all the bits
seen in the bitmap so far, and focus on fuzzing them at the expense of
the rest.
The first step of the process is to maintain a list of top_rated[] entries
for every byte in the bitmap. We win that slot if there is no previous
contender, or if the contender has a more favorable speed x size factor. */
void update_bitmap_score(struct queue_entry* q) {
u32 i;
u64 fav_factor = q->exec_us * q->len;
u64 fuzz_p2 = next_p2 (q->n_fuzz);
/* For every byte set in trace_bits[], see if there is a previous winner,
and how it compares to us. */
for (i = 0; i < MAP_SIZE; ++i)
if (trace_bits[i]) {
if (top_rated[i]) {
/* Faster-executing or smaller test cases are favored. */
u64 top_rated_fuzz_p2 = next_p2 (top_rated[i]->n_fuzz);
u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
if (fuzz_p2 > top_rated_fuzz_p2) {
continue;
} else if (fuzz_p2 == top_rated_fuzz_p2) {
if (fav_factor > top_rated_fav_factor)
continue;
}
if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
/* Looks like we're going to win. Decrease ref count for the
previous winner, discard its trace_bits[] if necessary. */
if (!--top_rated[i]->tc_ref) {
ck_free(top_rated[i]->trace_mini);
top_rated[i]->trace_mini = 0;
}
}
/* Insert ourselves as the new winner. */
top_rated[i] = q;
++q->tc_ref;
if (!q->trace_mini) {
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
minimize_bits(q->trace_mini, trace_bits);
}
score_changed = 1;
}
}
/* The second part of the mechanism discussed above is a routine that
goes over top_rated[] entries, and then sequentially grabs winners for
previously-unseen bytes (temp_v) and marks them as favored, at least
until the next run. The favored entries are given more air time during
all fuzzing steps. */
void cull_queue(void) {
struct queue_entry* q;
static u8 temp_v[MAP_SIZE >> 3];
u32 i;
if (dumb_mode || !score_changed) return;
score_changed = 0;
memset(temp_v, 255, MAP_SIZE >> 3);
queued_favored = 0;
pending_favored = 0;
q = queue;
while (q) {
q->favored = 0;
q = q->next;
}
/* Let's see if anything in the bitmap isn't captured in temp_v.
If yes, and if it has a top_rated[] contender, let's use it. */
for (i = 0; i < MAP_SIZE; ++i)
if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
u32 j = MAP_SIZE >> 3;
/* Remove all bits belonging to the current entry from temp_v. */
while (j--)
if (top_rated[i]->trace_mini[j])
temp_v[j] &= ~top_rated[i]->trace_mini[j];
top_rated[i]->favored = 1;
++queued_favored;
if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored;
}
q = queue;
while (q) {
mark_as_redundant(q, !q->favored);
q = q->next;
}
}