Merge branch 'memcache_marc' into dev

This commit is contained in:
van Hauser
2020-10-12 03:18:54 +02:00
committed by GitHub
5 changed files with 179 additions and 99 deletions

View File

@ -168,6 +168,9 @@ struct queue_entry {
double perf_score; /* performance score */
u8 *testcase_buf; /* The testcase buffer, if loaded. */
u32 testcase_refs; /* count of users of testcase buf */
struct queue_entry *next; /* Next element, if any */
};
@ -686,6 +689,12 @@ typedef struct afl_state {
/* queue entries ready for splicing count (len > 4) */
u32 ready_for_splicing_count;
/* How many queue entries currently have cached testcases */
u32 q_testcase_cache_count;
/* Refs to each queue entry with cached testcase (for eviction, if cache_count
* is too large) */
struct queue_entry *q_testcase_cache[TESTCASE_CACHE_SIZE];
} afl_state_t;
struct custom_mutator {
@ -1132,5 +1141,12 @@ static inline u64 next_p2(u64 val) {
}
/* Returns the testcase buf from the file behind this queue entry.
Increases the refcount. */
u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q);
/* Tell afl that this testcase may be evicted from the cache */
void queue_testcase_release(afl_state_t *afl, struct queue_entry *q);
#endif

View File

@ -295,6 +295,15 @@
#define RESEED_RNG 100000
/* The amount of entries in the testcase cache, held in memory.
Decrease if RAM usage is high. */
#define TESTCASE_CACHE_SIZE 3072
#if TESTCASE_CACHE_SIZE < 4
#error \
"Dangerously low cache size: Set TESTCASE_CACHE_SIZE to 4 or more in config.h!"
#endif
/* Maximum line length passed from GCC to 'as' and used for parsing
configuration files: */

View File

@ -370,7 +370,7 @@ static void locate_diffs(u8 *ptr1, u8 *ptr2, u32 len, s32 *first, s32 *last) {
u8 fuzz_one_original(afl_state_t *afl) {
s32 len, fd, temp_len;
s32 len, temp_len;
u32 j;
u32 i;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@ -453,28 +453,9 @@ u8 fuzz_one_original(afl_state_t *afl) {
}
/* Map the test case into memory. */
fd = open(afl->queue_cur->fname, O_RDONLY);
if (unlikely(fd < 0)) {
PFATAL("Unable to open '%s'", afl->queue_cur->fname);
}
orig_in = in_buf = queue_testcase_take(afl, afl->queue_cur);
len = afl->queue_cur->len;
orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (unlikely(orig_in == MAP_FAILED)) {
PFATAL("Unable to mmap '%s' with len %d", afl->queue_cur->fname, len);
}
close(fd);
/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
single byte anyway, so it wouldn't give us any performance or memory usage
benefits. */
@ -1697,7 +1678,7 @@ custom_mutator_stage:
for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max;
++afl->stage_cur) {
struct queue_entry *target;
struct queue_entry *target = NULL;
u32 tid;
u8 * new_buf = NULL;
u32 target_len = 0;
@ -1720,17 +1701,7 @@ custom_mutator_stage:
afl->splicing_with = tid;
/* Read the additional testcase into a new buffer. */
fd = open(target->fname, O_RDONLY);
if (unlikely(fd < 0)) {
PFATAL("Unable to open '%s'", target->fname);
}
new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), target->len);
if (unlikely(!new_buf)) { PFATAL("alloc"); }
ck_read(fd, new_buf, target->len, target->fname);
close(fd);
new_buf = queue_testcase_take(afl, target);
target_len = target->len;
}
@ -1741,6 +1712,13 @@ custom_mutator_stage:
el->afl_custom_fuzz(el->data, out_buf, len, &mutated_buf, new_buf,
target_len, max_seed_size);
if (new_buf) {
queue_testcase_release(afl, target);
new_buf = NULL;
}
if (unlikely(!mutated_buf)) {
FATAL("Error in custom_fuzz. Size returned: %zd", mutated_size);
@ -2323,54 +2301,55 @@ havoc_stage:
/* Overwrite bytes with a randomly selected chunk from another
testcase or insert that chunk. */
if (afl->queued_paths < 4) break;
if (afl->queued_paths < 4) { break; }
/* Pick a random queue entry and seek to it. */
u32 tid;
do
do {
tid = rand_below(afl, afl->queued_paths);
while (tid == afl->current_entry);
} while (tid == afl->current_entry);
struct queue_entry *target = afl->queue_buf[tid];
/* Make sure that the target has a reasonable length. */
while (target && (target->len < 2 || target == afl->queue_cur))
while (target && (target->len < 2 || target == afl->queue_cur)) {
target = target->next;
if (!target) break;
/* Read the testcase into a new buffer. */
fd = open(target->fname, O_RDONLY);
if (unlikely(fd < 0)) {
PFATAL("Unable to open '%s'", target->fname);
}
if (!target) { break; }
u32 new_len = target->len;
u8 *new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), new_len);
if (unlikely(!new_buf)) { PFATAL("alloc"); }
ck_read(fd, new_buf, new_len, target->fname);
close(fd);
/* Get the testcase contents for splicing. */
u8 *new_buf = queue_testcase_take(afl, target);
u8 overwrite = 0;
if (temp_len >= 2 && rand_below(afl, 2))
overwrite = 1;
else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) {
if (temp_len >= 2 && rand_below(afl, 2)) {
if (temp_len >= 2)
overwrite = 1;
else
} else if (temp_len + HAVOC_BLK_XL >= MAX_FILE) {
if (temp_len >= 2) {
overwrite = 1;
} else {
queue_testcase_release(afl, target);
new_buf = NULL;
break;
}
}
if (overwrite) {
u32 copy_from, copy_to, copy_len;
@ -2414,6 +2393,9 @@ havoc_stage:
}
/* We don't need this splice testcase anymore */
queue_testcase_release(afl, target);
new_buf = NULL;
break;
}
@ -2519,24 +2501,17 @@ retry_splicing:
if (!target) { goto retry_splicing; }
/* Read the testcase into a new buffer. */
fd = open(target->fname, O_RDONLY);
if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", target->fname); }
/* Get the testcase buffer */
u8 *splice_buf = queue_testcase_take(afl, target);
new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len);
if (unlikely(!new_buf)) { PFATAL("alloc"); }
ck_read(fd, new_buf, target->len, target->fname);
close(fd);
/* Find a suitable splicing location, somewhere between the first and
the last differing byte. Bail out if the difference is just a single
byte or so. */
locate_diffs(in_buf, new_buf, MIN(len, (s64)target->len), &f_diff, &l_diff);
locate_diffs(in_buf, splice_buf, MIN(len, (s64)target->len), &f_diff,
&l_diff);
if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { goto retry_splicing; }
@ -2548,6 +2523,7 @@ retry_splicing:
len = target->len;
memcpy(new_buf, in_buf, split_at);
memcpy(new_buf + split_at, splice_buf + split_at, target->len - split_at);
afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch));
in_buf = new_buf;
@ -2555,6 +2531,9 @@ retry_splicing:
if (unlikely(!out_buf)) { PFATAL("alloc"); }
memcpy(out_buf, in_buf, len);
queue_testcase_release(afl, target);
splice_buf = NULL;
goto custom_mutator_stage;
/* ???: While integrating Python module, the author decided to jump to
python stage, but the reason behind this is not clear.*/
@ -2585,7 +2564,8 @@ abandon_entry:
++afl->queue_cur->fuzz_level;
munmap(orig_in, afl->queue_cur->len);
queue_testcase_release(afl, afl->queue_cur);
orig_in = NULL;
return ret_val;
@ -2607,7 +2587,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
}
s32 len, fd, temp_len;
s32 len, temp_len;
u32 i;
u32 j;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
@ -2672,23 +2652,9 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
}
/* Map the test case into memory. */
fd = open(afl->queue_cur->fname, O_RDONLY);
if (fd < 0) { PFATAL("Unable to open '%s'", afl->queue_cur->fname); }
orig_in = in_buf = queue_testcase_take(afl, afl->queue_cur);
len = afl->queue_cur->len;
orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (orig_in == MAP_FAILED) {
PFATAL("Unable to mmap '%s'", afl->queue_cur->fname);
}
close(fd);
/* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every
single byte anyway, so it wouldn't give us any performance or memory usage
benefits. */
@ -4528,31 +4494,24 @@ pacemaker_fuzzing:
if (!target) { goto retry_splicing_puppet; }
/* Read the testcase into a new buffer. */
fd = open(target->fname, O_RDONLY);
if (fd < 0) { PFATAL("Unable to open '%s'", target->fname); }
new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len);
if (unlikely(!new_buf)) { PFATAL("alloc"); }
ck_read(fd, new_buf, target->len, target->fname);
close(fd);
u8 *splicing_buf = queue_testcase_take(afl, target);
/* Find a suitable splicin g location, somewhere between the first and
the last differing byte. Bail out if the difference is just a single
byte or so. */
locate_diffs(in_buf, new_buf, MIN(len, (s32)target->len), &f_diff,
locate_diffs(in_buf, splicing_buf, MIN(len, (s32)target->len), &f_diff,
&l_diff);
if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) {
queue_testcase_release(afl, target);
goto retry_splicing_puppet;
}
new_buf = afl_realloc(AFL_BUF_PARAM(in_scratch), target->len);
/* Split somewhere between the first and last differing byte. */
split_at = f_diff + rand_below(afl, l_diff - f_diff);
@ -4561,12 +4520,17 @@ pacemaker_fuzzing:
len = target->len;
memcpy(new_buf, in_buf, split_at);
memcpy(new_buf + split_at, splicing_buf + split_at,
target->len - split_at);
afl_swap_bufs(AFL_BUF_PARAM(in), AFL_BUF_PARAM(in_scratch));
in_buf = new_buf;
out_buf = afl_realloc(AFL_BUF_PARAM(out), len);
if (unlikely(!out_buf)) { PFATAL("alloc"); }
memcpy(out_buf, in_buf, len);
queue_testcase_release(afl, target);
splicing_buf = NULL;
goto havoc_stage_puppet;
} /* if splice_cycle */
@ -4600,7 +4564,8 @@ pacemaker_fuzzing:
// if (afl->queue_cur->favored) --afl->pending_favored;
// }
munmap(orig_in, afl->queue_cur->len);
queue_testcase_release(afl, afl->queue_cur);
orig_in = NULL;
if (afl->key_puppet == 1) {

View File

@ -343,6 +343,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
q->depth = afl->cur_depth + 1;
q->passed_det = passed_det;
q->trace_mini = NULL;
q->testcase_buf = NULL;
if (q->depth > afl->max_depth) { afl->max_depth = q->depth; }
@ -891,3 +892,89 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
}
/* Tell afl that this testcase may be evicted from the cache */
inline void queue_testcase_release(afl_state_t *afl, struct queue_entry *q) {
(void)afl;
if (unlikely(q->testcase_refs == 0)) {
FATAL("Testcase refcount reduced past 0");
}
q->testcase_refs--;
}
/* Returns the testcase buf from the file behind this queue entry.
Increases the refcount. */
u8 *queue_testcase_take(afl_state_t *afl, struct queue_entry *q) {
if (!q->testcase_buf) {
u32 tid = 0;
/* Buf not cached, let's do that now */
if (likely(afl->q_testcase_cache_count == TESTCASE_CACHE_SIZE)) {
/* Cache full. We neet to evict one to map one.
Get a random one which is not in use */
do {
tid = rand_below(afl, afl->q_testcase_cache_count);
} while (afl->q_testcase_cache[tid]->testcase_refs > 0);
struct queue_entry *old_cached = afl->q_testcase_cache[tid];
/* free the current buf from cache */
munmap(old_cached->testcase_buf, old_cached->len);
old_cached->testcase_buf = NULL;
} else {
tid = afl->q_testcase_cache_count;
afl->q_testcase_cache_count++;
}
/* Map the test case into memory. */
int fd = open(q->fname, O_RDONLY);
if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", q->fname); }
u32 len = q->len;
q->testcase_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
if (unlikely(q->testcase_buf == MAP_FAILED)) {
PFATAL("Unable to mmap '%s' with len %d", q->fname, len);
}
close(fd);
/* Register us as cached */
afl->q_testcase_cache[tid] = q;
}
q->testcase_refs++;
if (unlikely(!q->testcase_buf || !q->testcase_refs)) {
if (!q->testcase_buf) {
FATAL("Testcase buf is NULL, this should never happen");
}
if (!q->testcase_refs) {
FATAL("Testcase ref overflow. Missing a testcase release somwhere?");
}
}
return q->testcase_buf;
}

View File

@ -1174,9 +1174,12 @@ int main(int argc, char **argv_orig, char **envp) {
if (extras_dir_cnt) {
for (i = 0; i < extras_dir_cnt; i++)
for (i = 0; i < extras_dir_cnt; i++) {
load_extras(afl, extras_dir[i]);
}
dedup_extras(afl);
OKF("Loaded a total of %u extras.", afl->extras_cnt);