configurable testcache with malloc (#581)

* cache item number to cache memory size

* reload testcase if trimming changed the size

* fix splicing selection

* slim splicing

* import sync fix

* write testcache stats to fuzzer_stats

* fix new seed selection algo

* malloc+read instead of mmap

* fix

* testcache is configurable now and no reference counts

* fixes compilation, test script

* fixes

* switch TEST_CC to afl-cc in makefile

* code format

* fix

* fix crash

* fix crash

* fix env help output

* remove unnecessary pointer resets

* fix endless loop bug

* actually use the cache if set

* one more fix

* increase default cache entries, add default cache size value to config.h

Co-authored-by: hexcoder- <heiko@hexco.de>
This commit is contained in:
van Hauser
2020-10-14 15:30:30 +02:00
committed by GitHub
parent c20ba2c2c5
commit 56ac3fcdc5
13 changed files with 276 additions and 163 deletions

View File

@ -223,8 +223,6 @@ ifneq "$(findstring OpenBSD, $(shell uname))" ""
LDFLAGS += -lpthread LDFLAGS += -lpthread
endif endif
TEST_CC = afl-gcc
COMM_HDR = include/alloc-inl.h include/config.h include/debug.h include/types.h COMM_HDR = include/alloc-inl.h include/config.h include/debug.h include/types.h
ifeq "$(shell echo '$(HASH)include <Python.h>@int main() {return 0; }' | tr @ '\n' | $(CC) $(CFLAGS) -x c - -o .test $(PYTHON_INCLUDE) $(LDFLAGS) $(PYTHON_LIB) 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1" ifeq "$(shell echo '$(HASH)include <Python.h>@int main() {return 0; }' | tr @ '\n' | $(CC) $(CFLAGS) -x c - -o .test $(PYTHON_INCLUDE) $(LDFLAGS) $(PYTHON_LIB) 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
@ -488,7 +486,7 @@ code-format:
ifndef AFL_NO_X86 ifndef AFL_NO_X86
test_build: afl-cc afl-as afl-showmap test_build: afl-cc afl-as afl-showmap
@echo "[*] Testing the CC wrapper and instrumentation output..." @echo "[*] Testing the CC wrapper and instrumentation output..."
@unset AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_DEBUG=1 AFL_INST_RATIO=100 AFL_PATH=. ./$(TEST_CC) $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) 2>&1 | grep 'afl-as' >/dev/null || (echo "Oops, afl-as did not get called from "$(TEST_CC)". This is normally achieved by "$(CC)" honoring the -B option."; exit 1 ) @unset AFL_MAP_SIZE AFL_USE_UBSAN AFL_USE_CFISAN AFL_USE_ASAN AFL_USE_MSAN AFL_CC; AFL_INST_RATIO=100 AFL_PATH=. ./afl-cc $(CFLAGS) test-instr.c -o test-instr $(LDFLAGS) 2>&1 || (echo "Oops, afl-cc failed"; exit 1 )
ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
echo 1 | ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr1 ./test-instr echo 1 | ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr1 ./test-instr
@rm -f test-instr @rm -f test-instr

View File

@ -168,6 +168,8 @@ struct queue_entry {
double perf_score; /* performance score */ double perf_score; /* performance score */
u8 *testcase_buf; /* The testcase buffer, if loaded. */
struct queue_entry *next; /* Next element, if any */ struct queue_entry *next; /* Next element, if any */
}; };
@ -363,7 +365,7 @@ typedef struct afl_env_vars {
u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path, u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path,
*afl_hang_tmout, *afl_forksrv_init_tmout, *afl_skip_crashes, *afl_preload, *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_skip_crashes, *afl_preload,
*afl_max_det_extras, *afl_statsd_host, *afl_statsd_port, *afl_max_det_extras, *afl_statsd_host, *afl_statsd_port,
*afl_statsd_tags_flavor; *afl_statsd_tags_flavor, *afl_testcache_size;
} afl_env_vars_t; } afl_env_vars_t;
@ -675,6 +677,9 @@ typedef struct afl_state {
u8 *in_scratch_buf; u8 *in_scratch_buf;
u8 *ex_buf; u8 *ex_buf;
u8 *testcase_buf, *splicecase_buf;
u32 custom_mutators_count; u32 custom_mutators_count;
list_t custom_mutator_list; list_t custom_mutator_list;
@ -686,6 +691,22 @@ typedef struct afl_state {
/* queue entries ready for splicing count (len > 4) */ /* queue entries ready for splicing count (len > 4) */
u32 ready_for_splicing_count; u32 ready_for_splicing_count;
/* This is the user specified maximum size to use for the testcase cache */
u64 q_testcase_max_cache_size;
/* How much of the testcase cache is used so far */
u64 q_testcase_cache_size;
/* highest cache count so far */
u32 q_testcase_max_cache_count;
/* How many queue entries currently have cached testcases */
u32 q_testcase_cache_count;
/* Refs to each queue entry with cached testcase (for eviction, if cache_count
* is too large) */
struct queue_entry *q_testcase_cache[TESTCASE_ENTRIES];
} afl_state_t; } afl_state_t;
struct custom_mutator { struct custom_mutator {
@ -1135,5 +1156,13 @@ static inline u64 next_p2(u64 val) {
} }
/* Returns the testcase buf from the file behind this queue entry.
Increases the refcount. */
u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q);
/* If trimming changes the testcase size we have to reload it */
void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q,
u32 old_len);
#endif #endif

View File

@ -295,6 +295,15 @@
#define RESEED_RNG 100000 #define RESEED_RNG 100000
/* The maximum number of testcases to cache */
#define TESTCASE_ENTRIES 16384
/* The default maximum testcase cache size in MB, 0 = disable.
A value between 50 and 250 is a good default value. */
#define TESTCASE_CACHE 0
/* Maximum line length passed from GCC to 'as' and used for parsing /* Maximum line length passed from GCC to 'as' and used for parsing
configuration files: */ configuration files: */

View File

@ -139,6 +139,7 @@ static char *afl_environment_variables[] = {
"AFL_STATSD_HOST", "AFL_STATSD_HOST",
"AFL_STATSD_PORT", "AFL_STATSD_PORT",
"AFL_STATSD_TAGS_FLAVOR", "AFL_STATSD_TAGS_FLAVOR",
"AFL_TESTCACHE_SIZE",
"AFL_TMIN_EXACT", "AFL_TMIN_EXACT",
"AFL_TMPDIR", "AFL_TMPDIR",
"AFL_TOKEN_FILE", "AFL_TOKEN_FILE",

View File

@ -1045,7 +1045,7 @@ restart_outer_cull_loop:
while (q) { while (q) {
if (q->cal_failed || !q->exec_cksum) continue; if (q->cal_failed || !q->exec_cksum) { goto next_entry; }
restart_inner_cull_loop: restart_inner_cull_loop:
@ -1090,6 +1090,8 @@ restart_outer_cull_loop:
} }
next_entry:
prev = q; prev = q;
q = q->next; q = q->next;

View File

@ -370,7 +370,7 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) {
u8 fuzz_one_original(afl_state_t *afl) { u8 fuzz_one_original(afl_state_t *afl) {
s32 len, fd, temp_len; s32 len, temp_len;
u32 j; u32 j;
u32 i; u32 i;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@ -453,32 +453,9 @@ u8 fuzz_one_original(afl_state_t *afl) {
} }
/* Map the test case into memory. */ orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
fd = open(afl->queue_cur->fname, O_RDONLY);
if (unlikely(fd < 0)) {
PFATAL("Unable to open '%s'", afl->queue_cur->fname);
}
len = afl->queue_cur->len; len = afl->queue_cur->len;
orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (unlikely(orig_in == MAP_FAILED)) {
PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len);
}
close(fd);
/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
single byte anyway, so it wouldn't give us any performance or memory usage
benefits. */
out_buf = afl_realloc(AFL_BUF_PARAM(out), len); out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
if (unlikely(!out_buf)) { PFATAL("alloc"); } if (unlikely(!out_buf)) { PFATAL("alloc"); }
@ -526,6 +503,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
!afl->disable_trim)) { !afl->disable_trim)) {
u8 res = trim_case(afl, afl->queue_cur, in_buf); u8 res = trim_case(afl, afl->queue_cur, in_buf);
orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
if (unlikely(res == FSRV_RUN_ERROR)) { if (unlikely(res == FSRV_RUN_ERROR)) {
@ -1720,17 +1698,7 @@ custom_mutator_stage:
afl->splicing_with = tid; afl->splicing_with = tid;
/* Read the additional testcase into a new buffer. */ /* Read the additional testcase into a new buffer. */
fd = open(target->fname, O_RDONLY); new_buf = queue_testcase_get(afl, target);
if (unlikely(fd < 0)) {
PFATAL("Unable to open '%s'", target->fname);
}
new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len);
if (unlikely(!new_buf)) { PFATAL("alloc"); }
ck_read(fd, new_buf, target->len, target->fname);
close(fd);
target_len = target->len; target_len = target->len;
} }
@ -2182,7 +2150,6 @@ havoc_stage:
afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch)); afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch));
out_buf = new_buf; out_buf = new_buf;
new_buf = NULL;
temp_len += clone_len; temp_len += clone_len;
} }
@ -2326,43 +2293,21 @@ havoc_stage:
/* Pick a random queue entry and seek to it. */ /* Pick a random queue entry and seek to it. */
u32 tid; u32 tid;
do do {
tid = rand_below(afl, afl->queued_paths); tid = rand_below(afl, afl->queued_paths);
while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4);
} while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4);
/* Get the testcase for splicing. */
struct queue_entry *target = afl->queue_buf[tid]; struct queue_entry *target = afl->queue_buf[tid];
u32 new_len = target->len;
u8 * new_buf = queue_testcase_get(afl, target);
/* Read the testcase into a new buffer. */ if ((temp_len >= 2 && rand_below(afl, 2)) ||
temp_len + HAVOC_BLK_XL >= MAX_FILE) {
fd = open(target->fname, O_RDONLY); /* overwrite mode */
if (unlikely(fd < 0)) {
PFATAL("Unable to open '%s'", target->fname);
}
u32 new_len = target->len;
u8 *new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), new_len);
if (unlikely(!new_buf)) { PFATAL("alloc"); }
ck_read(fd, new_buf, new_len, target->fname);
close(fd);
u8 overwrite = 0;
if (temp_len >= 2 && rand_below(afl, 2))
overwrite = 1;
else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) {
if (temp_len >= 2)
overwrite = 1;
else
break;
}
if (overwrite) {
u32 copy_from, copy_to, copy_len; u32 copy_from, copy_to, copy_len;
@ -2376,15 +2321,16 @@ havoc_stage:
} else { } else {
/* insert mode */
u32 clone_from, clone_to, clone_len; u32 clone_from, clone_to, clone_len;
clone_len = choose_block_len(afl, new_len); clone_len = choose_block_len(afl, new_len);
clone_from = rand_below(afl, new_len - clone_len + 1); clone_from = rand_below(afl, new_len - clone_len + 1);
clone_to = rand_below(afl, temp_len + 1);
clone_to = rand_below(afl, temp_len); u8 *temp_buf = afl_realloc(AFL_BUF_PARAM(out_scratch),
temp_len + clone_len + 1);
u8 *temp_buf =
afl_realloc(AFL_BUF_PARAM(out_scratch), temp_len + clone_len);
if (unlikely(!temp_buf)) { PFATAL("alloc"); } if (unlikely(!temp_buf)) { PFATAL("alloc"); }
/* Head */ /* Head */
@ -2496,21 +2442,10 @@ retry_splicing:
} while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4); } while (tid == afl->current_entry || afl->queue_buf[tid]->len < 4);
/* Get the testcase */
afl->splicing_with = tid; afl->splicing_with = tid;
target = afl->queue_buf[tid]; target = afl->queue_buf[tid];
new_buf = queue_testcase_get(afl, target);
/* Read the testcase into a new buffer. */
fd = open(target->fname, O_RDONLY);
if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); }
new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len);
if (unlikely(!new_buf)) { PFATAL("alloc"); }
ck_read(fd, new_buf, target->len, target->fname);
close(fd);
/* Find a suitable splicing location, somewhere between the first and /* Find a suitable splicing location, somewhere between the first and
the last differing byte. Bail out if the difference is just a single the last differing byte. Bail out if the difference is just a single
@ -2527,18 +2462,16 @@ retry_splicing:
/* Do the thing. */ /* Do the thing. */
len = target->len; len = target->len;
memcpy(new_buf, in_buf, split_at); afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len);
afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); memcpy(afl->in_scratch_buf, in_buf, split_at);
in_buf = new_buf; memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at);
in_buf = afl->in_scratch_buf;
out_buf = afl_realloc(AFL_BUF_PARAM(out), len); out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
if (unlikely(!out_buf)) { PFATAL("alloc"); } if (unlikely(!out_buf)) { PFATAL("alloc"); }
memcpy(out_buf, in_buf, len); memcpy(out_buf, in_buf, len);
goto custom_mutator_stage; goto custom_mutator_stage;
/* ???: While integrating Python module, the author decided to jump to
python stage, but the reason behind this is not clear.*/
// goto havoc_stage;
} }
@ -2564,9 +2497,7 @@ abandon_entry:
} }
++afl->queue_cur->fuzz_level; ++afl->queue_cur->fuzz_level;
orig_in = NULL;
munmap(orig_in, afl->queue_cur->len);
return ret_val; return ret_val;
#undef FLIP_BIT #undef FLIP_BIT
@ -2587,7 +2518,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
} }
s32 len, fd, temp_len; s32 len, temp_len;
u32 i; u32 i;
u32 j; u32 j;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@ -2652,32 +2583,11 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
} }
/* Map the test case into memory. */ /* Map the test case into memory. */
orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
fd = open(afl->queue_cur->fname, O_RDONLY);
if (fd < 0) { PFATAL("Unable to open '%s'", afl->queue_cur->fname); }
len = afl->queue_cur->len; len = afl->queue_cur->len;
orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (orig_in == MAP_FAILED) {
PFATAL("Unable to mmap '%s'", afl->queue_cur->fname);
}
close(fd);
/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
single byte anyway, so it wouldn't give us any performance or memory usage
benefits. */
out_buf = afl_realloc(AFL_BUF_PARAM(out), len); out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
if (unlikely(!out_buf)) { PFATAL("alloc"); } if (unlikely(!out_buf)) { PFATAL("alloc"); }
afl->subseq_tmouts = 0; afl->subseq_tmouts = 0;
afl->cur_depth = afl->queue_cur->depth; afl->cur_depth = afl->queue_cur->depth;
/******************************************* /*******************************************
@ -2721,6 +2631,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
u32 old_len = afl->queue_cur->len; u32 old_len = afl->queue_cur->len;
u8 res = trim_case(afl, afl->queue_cur, in_buf); u8 res = trim_case(afl, afl->queue_cur, in_buf);
orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur);
if (res == FSRV_RUN_ERROR) { if (res == FSRV_RUN_ERROR) {
@ -4497,17 +4408,7 @@ pacemaker_fuzzing:
target = afl->queue_buf[tid]; target = afl->queue_buf[tid];
/* Read the testcase into a new buffer. */ /* Read the testcase into a new buffer. */
new_buf = queue_testcase_get(afl, target);
fd = open(target->fname, O_RDONLY);
if (fd < 0) { PFATAL("Unable to open '%s'", target->fname); }
new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len);
if (unlikely(!new_buf)) { PFATAL("alloc"); }
ck_read(fd, new_buf, target->len, target->fname);
close(fd);
/* Find a suitable splicin g location, somewhere between the first and /* Find a suitable splicin g location, somewhere between the first and
the last differing byte. Bail out if the difference is just a single the last differing byte. Bail out if the difference is just a single
@ -4529,9 +4430,11 @@ pacemaker_fuzzing:
/* Do the thing. */ /* Do the thing. */
len = target->len; len = target->len;
memcpy(new_buf, in_buf, split_at); afl->in_scratch_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), len);
afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch)); memcpy(afl->in_scratch_buf, in_buf, split_at);
in_buf = new_buf; memcpy(afl->in_scratch_buf + split_at, new_buf, len - split_at);
in_buf = afl->in_scratch_buf;
out_buf = afl_realloc(AFL_BUF_PARAM(out), len); out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
if (unlikely(!out_buf)) { PFATAL("alloc"); } if (unlikely(!out_buf)) { PFATAL("alloc"); }
memcpy(out_buf, in_buf, len); memcpy(out_buf, in_buf, len);
@ -4569,7 +4472,7 @@ pacemaker_fuzzing:
// if (afl->queue_cur->favored) --afl->pending_favored; // if (afl->queue_cur->favored) --afl->pending_favored;
// } // }
munmap(orig_in, afl->queue_cur->len); orig_in = NULL;
if (afl->key_puppet == 1) { if (afl->key_puppet == 1) {

View File

@ -31,11 +31,12 @@
inline u32 select_next_queue_entry(afl_state_t *afl) { inline u32 select_next_queue_entry(afl_state_t *afl) {
u32 s = rand_below(afl, afl->queued_paths); u32 s = rand_below(afl, afl->queued_paths);
double p = rand_next_percent(afl); double p = rand_next_percent(afl);
/* /*
fprintf(stderr, "select: p=%f s=%u ... p < prob[s]=%f ? s=%u : alias[%u]=%u" fprintf(stderr, "select: p=%f s=%u ... p < prob[s]=%f ? s=%u : alias[%u]=%u"
" ==> %u\n", p, s, afl->alias_probability[s], s, s, afl->alias_table[s], p < afl->alias_probability[s] ? s : afl->alias_table[s]); " ==> %u\n", p, s, afl->alias_probability[s], s, s, afl->alias_table[s], p <
afl->alias_probability[s] ? s : afl->alias_table[s]);
*/ */
return (p < afl->alias_probability[s] ? s : afl->alias_table[s]); return (p < afl->alias_probability[s] ? s : afl->alias_table[s]);
@ -55,7 +56,7 @@ void create_alias_table(afl_state_t *afl) {
int * S = (u32 *)afl_realloc(AFL_BUF_PARAM(out_scratch), n * sizeof(u32)); int * S = (u32 *)afl_realloc(AFL_BUF_PARAM(out_scratch), n * sizeof(u32));
int * L = (u32 *)afl_realloc(AFL_BUF_PARAM(in_scratch), n * sizeof(u32)); int * L = (u32 *)afl_realloc(AFL_BUF_PARAM(in_scratch), n * sizeof(u32));
if (!P || !S || !L) FATAL("could not aquire memory for alias table"); if (!P || !S || !L) { FATAL("could not aquire memory for alias table"); }
memset((void *)afl->alias_table, 0, n * sizeof(u32)); memset((void *)afl->alias_table, 0, n * sizeof(u32));
memset((void *)afl->alias_probability, 0, n * sizeof(double)); memset((void *)afl->alias_probability, 0, n * sizeof(double));
@ -65,7 +66,7 @@ void create_alias_table(afl_state_t *afl) {
struct queue_entry *q = afl->queue_buf[i]; struct queue_entry *q = afl->queue_buf[i];
if (!q->disabled) q->perf_score = calculate_score(afl, q); if (!q->disabled) { q->perf_score = calculate_score(afl, q); }
sum += q->perf_score; sum += q->perf_score;
@ -74,19 +75,23 @@ void create_alias_table(afl_state_t *afl) {
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct queue_entry *q = afl->queue_buf[i]; struct queue_entry *q = afl->queue_buf[i];
P[i] = (q->perf_score * n) / sum;
P[i] = q->perf_score * n / sum;
} }
int nS = 0, nL = 0, s; int nS = 0, nL = 0, s;
for (s = (s32)n - 1; s >= 0; --s) { for (s = (s32)n - 1; s >= 0; --s) {
if (P[s] < 1) if (P[s] < 1) {
S[nS++] = s; S[nS++] = s;
else
} else {
L[nL++] = s; L[nL++] = s;
}
} }
while (nS && nL) { while (nS && nL) {
@ -96,11 +101,16 @@ void create_alias_table(afl_state_t *afl) {
afl->alias_probability[a] = P[a]; afl->alias_probability[a] = P[a];
afl->alias_table[a] = g; afl->alias_table[a] = g;
P[g] = P[g] + P[a] - 1; P[g] = P[g] + P[a] - 1;
if (P[g] < 1) if (P[g] < 1) {
S[nS++] = g; S[nS++] = g;
else
} else {
L[nL++] = g; L[nL++] = g;
}
} }
while (nL) while (nL)
@ -110,11 +120,10 @@ void create_alias_table(afl_state_t *afl) {
afl->alias_probability[S[--nS]] = 1; afl->alias_probability[S[--nS]] = 1;
/* /*
fprintf(stderr, " %-3s %-3s %-9s %-9s\n", "entry", "alias", "prob", "perf"); fprintf(stderr, " entry alias probability perf_score\n");
for (u32 i = 0; i < n; ++i) for (u32 i = 0; i < n; ++i)
fprintf(stderr, " %3i %3i %9.7f %9.7f\n", i, afl->alias_table[i], fprintf(stderr, " %5u %5u %11u %0.9f\n", i, afl->alias_table[i],
afl->alias_probability[i], afl->queue_buf[i]->perf_score); afl->alias_probability[i], afl->queue_buf[i]->perf_score);
*/ */
} }
@ -860,3 +869,131 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
} }
void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q,
u32 old_len) {
if (likely(q->testcase_buf)) {
free(q->testcase_buf);
int fd = open(q->fname, O_RDONLY);
if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); }
u32 len = q->len;
q->testcase_buf = malloc(len);
if (unlikely(!q->testcase_buf)) {
PFATAL("Unable to mmap '%s' with len %d", q->fname, len);
}
close(fd);
afl->q_testcase_cache_size = afl->q_testcase_cache_size + q->len - old_len;
}
}
/* Returns the testcase buf from the file behind this queue entry.
Increases the refcount. */
inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
u32 len = q->len;
/* first handle if no testcase cache is configured */
if (unlikely(!afl->q_testcase_max_cache_size)) {
u8 *buf;
if (q == afl->queue_cur) {
buf = afl_realloc((void **)&afl->testcase_buf, len);
} else {
buf = afl_realloc((void **)&afl->splicecase_buf, len);
}
if (unlikely(!buf)) {
PFATAL("Unable to malloc '%s' with len %u", q->fname, len);
}
int fd = open(q->fname, O_RDONLY);
if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); }
ck_read(fd, buf, len, q->fname);
close(fd);
return buf;
}
/* now handle the testcase cache */
if (unlikely(!q->testcase_buf)) {
/* Buf not cached, let's load it */
u32 tid = 0;
while (unlikely(afl->q_testcase_cache_size + len >=
afl->q_testcase_max_cache_size ||
afl->q_testcase_cache_count >= TESTCASE_ENTRIES - 1)) {
/* Cache full. We neet to evict one to map one.
Get a random one which is not in use */
do {
tid = rand_below(afl, afl->q_testcase_max_cache_count);
} while (afl->q_testcase_cache[tid] == NULL ||
afl->q_testcase_cache[tid] == afl->queue_cur);
struct queue_entry *old_cached = afl->q_testcase_cache[tid];
free(old_cached->testcase_buf);
old_cached->testcase_buf = NULL;
afl->q_testcase_cache_size -= old_cached->len;
afl->q_testcase_cache[tid] = NULL;
--afl->q_testcase_cache_count;
}
while (likely(afl->q_testcase_cache[tid] != NULL))
++tid;
/* Map the test case into memory. */
int fd = open(q->fname, O_RDONLY);
if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); }
q->testcase_buf = malloc(len);
if (unlikely(!q->testcase_buf)) {
PFATAL("Unable to malloc '%s' with len %u", q->fname, len);
}
ck_read(fd, q->testcase_buf, len, q->fname);
close(fd);
/* Register testcase as cached */
afl->q_testcase_cache[tid] = q;
afl->q_testcase_cache_size += q->len;
++afl->q_testcase_cache_count;
if (tid >= afl->q_testcase_max_cache_count)
afl->q_testcase_max_cache_count = tid + 1;
}
return q->testcase_buf;
}

View File

@ -692,6 +692,8 @@ void sync_fuzzers(afl_state_t *afl) {
u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
u32 orig_len = q->len;
/* Custom mutator trimmer */ /* Custom mutator trimmer */
if (afl->custom_mutators_count) { if (afl->custom_mutators_count) {
@ -709,6 +711,8 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
}); });
if (orig_len != q->len) { queue_testcase_retake(afl, q, orig_len); }
if (custom_trimmed) return trimmed_case; if (custom_trimmed) return trimmed_case;
} }
@ -842,6 +846,8 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
close(fd); close(fd);
if (orig_len != q->len) queue_testcase_retake(afl, q, orig_len);
memcpy(afl->fsrv.trace_bits, afl->clean_trace, afl->fsrv.map_size); memcpy(afl->fsrv.trace_bits, afl->clean_trace, afl->fsrv.map_size);
update_bitmap_score(afl, q); update_bitmap_score(afl, q);

View File

@ -103,6 +103,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) {
afl->stats_avg_exec = -1; afl->stats_avg_exec = -1;
afl->skip_deterministic = 1; afl->skip_deterministic = 1;
afl->use_splicing = 1; afl->use_splicing = 1;
afl->q_testcase_max_cache_size = TESTCASE_CACHE * 1024000;
#ifdef HAVE_AFFINITY #ifdef HAVE_AFFINITY
afl->cpu_aff = -1; /* Selected CPU core */ afl->cpu_aff = -1; /* Selected CPU core */
@ -353,6 +354,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
afl->afl_env.afl_forksrv_init_tmout = afl->afl_env.afl_forksrv_init_tmout =
(u8 *)get_afl_env(afl_environment_variables[i]); (u8 *)get_afl_env(afl_environment_variables[i]);
} else if (!strncmp(env, "AFL_TESTCACHE_SIZE",
afl_environment_variable_len)) {
afl->afl_env.afl_testcache_size =
(u8 *)get_afl_env(afl_environment_variables[i]);
} else if (!strncmp(env, "AFL_STATSD_HOST", } else if (!strncmp(env, "AFL_STATSD_HOST",
afl_environment_variable_len)) { afl_environment_variable_len)) {

View File

@ -165,6 +165,8 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
"edges_found : %u\n" "edges_found : %u\n"
"var_byte_count : %u\n" "var_byte_count : %u\n"
"havoc_expansion : %u\n" "havoc_expansion : %u\n"
"testcache_size : %llu\n"
"testcache_count : %u\n"
"afl_banner : %s\n" "afl_banner : %s\n"
"afl_version : " VERSION "afl_version : " VERSION
"\n" "\n"
@ -198,8 +200,9 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
#else #else
-1, -1,
#endif #endif
t_bytes, afl->var_byte_count, afl->expand_havoc, afl->use_banner, t_bytes, afl->var_byte_count, afl->expand_havoc,
afl->unicorn_mode ? "unicorn" : "", afl->q_testcase_cache_size, afl->q_testcase_cache_count,
afl->use_banner, afl->unicorn_mode ? "unicorn" : "",
afl->fsrv.qemu_mode ? "qemu " : "", afl->fsrv.qemu_mode ? "qemu " : "",
afl->non_instrumented_mode ? " non_instrumented " : "", afl->non_instrumented_mode ? " non_instrumented " : "",
afl->no_forkserver ? "no_fsrv " : "", afl->crash_mode ? "crash " : "", afl->no_forkserver ? "no_fsrv " : "", afl->crash_mode ? "crash " : "",

View File

@ -196,11 +196,13 @@ static void usage(u8 *argv0, int more_help) {
"AFL_SKIP_BIN_CHECK: skip the check, if the target is an executable\n" "AFL_SKIP_BIN_CHECK: skip the check, if the target is an executable\n"
"AFL_SKIP_CPUFREQ: do not warn about variable cpu clocking\n" "AFL_SKIP_CPUFREQ: do not warn about variable cpu clocking\n"
"AFL_SKIP_CRASHES: during initial dry run do not terminate for crashing inputs\n" "AFL_SKIP_CRASHES: during initial dry run do not terminate for crashing inputs\n"
"AFL_STATSD: enables StatsD metrics collection" "AFL_STATSD: enables StatsD metrics collection\n"
"AFL_STATSD_HOST: change default statsd host (default 127.0.0.1)" "AFL_STATSD_HOST: change default statsd host (default 127.0.0.1)\n"
"AFL_STATSD_PORT: change default statsd port (default: 8125)" "AFL_STATSD_PORT: change default statsd port (default: 8125)\n"
"AFL_STATSD_TAGS_FLAVOR: change default statsd tags format (default will disable tags)." "AFL_STATSD_TAGS_FLAVOR: set statsd tags format (default: disable tags)\n"
" Supported formats are: 'dogstatsd', 'librato', 'signalfx' and 'influxdb'" " Supported formats are: 'dogstatsd', 'librato', 'signalfx'\n"
" and 'influxdb'\n"
"AFL_TESTCACHE_SIZE: use a cache for testcases, improves performance (in MB)\n"
"AFL_TMPDIR: directory to use for input file generation (ramdisk recommended)\n" "AFL_TMPDIR: directory to use for input file generation (ramdisk recommended)\n"
//"AFL_PERSISTENT: not supported anymore -> no effect, just a warning\n" //"AFL_PERSISTENT: not supported anymore -> no effect, just a warning\n"
//"AFL_DEFER_FORKSRV: not supported anymore -> no effect, just a warning\n" //"AFL_DEFER_FORKSRV: not supported anymore -> no effect, just a warning\n"
@ -885,7 +887,7 @@ int main(int argc, char **argv_orig, char **envp) {
auto_sync = 1; auto_sync = 1;
afl->sync_id = ck_strdup("default"); afl->sync_id = ck_strdup("default");
afl->is_secondary_node = 1; afl->is_secondary_node = 1;
OKF("no -M/-S set, autoconfiguring for \"-S %s\"", afl->sync_id); OKF("No -M/-S set, autoconfiguring for \"-S %s\"", afl->sync_id);
} }
@ -1006,6 +1008,21 @@ int main(int argc, char **argv_orig, char **envp) {
} }
if (afl->afl_env.afl_testcache_size) {
afl->q_testcase_max_cache_size =
(u64)atoi(afl->afl_env.afl_testcache_size) * 1024000;
OKF("Enabled testcache with %llu MB",
afl->q_testcase_max_cache_size / 1024000);
} else {
ACTF(
"No testcache was configured. it is recommended to use a testcache, it "
"improves performance: set AFL_TESTCACHE_SIZE=(value in MB)");
}
if (afl->afl_env.afl_forksrv_init_tmout) { if (afl->afl_env.afl_forksrv_init_tmout) {
afl->fsrv.init_tmout = atoi(afl->afl_env.afl_forksrv_init_tmout); afl->fsrv.init_tmout = atoi(afl->afl_env.afl_forksrv_init_tmout);

View File

@ -71,7 +71,7 @@ inline uint64_t rand_next(afl_state_t *afl) {
inline double rand_next_percent(afl_state_t *afl) { inline double rand_next_percent(afl_state_t *afl) {
return (double)(((double)rand_next(afl)) / (double) 0xffffffffffffffff); return (double)(((double)rand_next(afl)) / (double)0xffffffffffffffff);
} }

View File

@ -1,4 +1,4 @@
f#!/bin/sh #!/bin/sh
. ./test-pre.sh . ./test-pre.sh