switch to faster and better hash + random

This commit is contained in:
van Hauser
2020-06-12 16:08:49 +02:00
parent db2e04361d
commit a632c00b0d
18 changed files with 5850 additions and 62 deletions

View File

@ -222,7 +222,7 @@ static u32 analyze_run_target(char **argv, u8 *mem, u32 len, u8 first_run) {
int status = 0;
s32 prog_in_fd;
u32 cksum;
u64 cksum;
memset(trace_bits, 0, map_size);
MEM_BARRIER();
@ -321,7 +321,7 @@ static u32 analyze_run_target(char **argv, u8 *mem, u32 len, u8 first_run) {
}
cksum = hash32(trace_bits, map_size, HASH_CONST);
cksum = hash64(trace_bits, map_size, HASH_CONST);
/* We don't actually care if the target is crashing or not,
except that when it does, the checksum should be different. */

View File

@ -839,8 +839,9 @@ void afl_fsrv_write_to_testcase(afl_forkserver_t *fsrv, u8 *buf, size_t len) {
*fsrv->shmem_fuzz_len = len;
memcpy(fsrv->shmem_fuzz, buf, len);
#ifdef _DEBUG
fprintf(stderr, "FS crc: %08x len: %u\n", hash32(fsrv->shmem_fuzz,
*fsrv->shmem_fuzz_len, 0xa5b35705), *fsrv->shmem_fuzz_len);
fprintf(stderr, "FS crc: %08x len: %u\n",
hash64(fsrv->shmem_fuzz, *fsrv->shmem_fuzz_len, 0xa5b35705),
*fsrv->shmem_fuzz_len);
fprintf(stderr, "SHM :");
for (int i = 0; i < *fsrv->shmem_fuzz_len; i++)
fprintf(stderr, "%02x", fsrv->shmem_fuzz[i]);

View File

@ -546,7 +546,7 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
u8 fn[PATH_MAX];
/* Update path frequency. */
u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
struct queue_entry *q = afl->queue;
while (q) {

View File

@ -272,7 +272,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
sprintf(afl->stage_name_buf, "ptrim %s",
u_stringify_int(val_buf, trim_exec));
u32 cksum;
u64 cksum;
size_t retlen = mutator->afl_custom_trim(mutator->data, &retbuf);
@ -295,7 +295,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (cksum == q->exec_cksum) {

View File

@ -364,8 +364,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
s32 len, fd, temp_len, i, j;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0;
u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, prev_cksum;
u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1;
u8 ret_val = 1, doing_det = 0;
@ -653,7 +653,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (!afl->non_instrumented_mode && (afl->stage_cur & 7) == 7) {
u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
@ -821,14 +821,14 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (!eff_map[EFF_APOS(afl->stage_cur)]) {
u32 cksum;
u64 cksum;
/* If in non-instrumented mode or if the file is very short, just flag
everything without wasting time on checksums. */
if (!afl->non_instrumented_mode && len >= EFF_MIN_LEN) {
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
} else {
@ -2539,8 +2539,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
s32 len, fd, temp_len, i, j;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, cur_ms_lv;
u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, cur_ms_lv, prev_cksum;
u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1;
u8 ret_val = 1, doing_det = 0;
@ -2806,7 +2806,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
if (!afl->non_instrumented_mode && (afl->stage_cur & 7) == 7) {
u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
@ -2974,14 +2974,14 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
if (!eff_map[EFF_APOS(afl->stage_cur)]) {
u32 cksum;
u64 cksum;
/* If in non-instrumented mode or if the file is very short, just flag
everything without wasting time on checksums. */
if (!afl->non_instrumented_mode && len >= EFF_MIN_LEN) {
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
} else {

View File

@ -89,11 +89,11 @@ static struct range *pop_biggest_range(struct range **ranges) {
}
static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u32 *cksum) {
static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) {
if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; }
*cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
*cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
return 0;
}
@ -109,7 +109,7 @@ static void rand_replace(afl_state_t *afl, u8 *buf, u32 len) {
}
static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u32 exec_cksum) {
static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) {
struct range *ranges = add_range(NULL, 0, len);
u8 * backup = ck_alloc_nozero(len);
@ -137,7 +137,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u32 exec_cksum) {
memcpy(backup, buf + rng->start, s);
rand_replace(afl, buf + rng->start, s);
u32 cksum;
u64 cksum;
u64 start_us = get_cur_time_us();
if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) {
@ -695,7 +695,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
// afl->queue_cur->exec_cksum
u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
u32 exec_cksum) {
u64 exec_cksum) {
u8 r = 1;
if (afl->orig_cmp_map == NULL) {

View File

@ -256,7 +256,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
u32 cksum;
u64 cksum;
if (!first_run && !(afl->stage_cur % afl->stats_update_freq)) {
@ -281,7 +281,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
}
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (q->exec_cksum != cksum) {
hnb = has_new_bits(afl, afl->virgin_bits);
@ -646,7 +646,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
while (remove_pos < q->len) {
u32 trim_avail = MIN(remove_len, q->len - remove_pos);
u32 cksum;
u64 cksum;
write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
@ -658,7 +658,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
/* Note that we don't keep track of crashes or hangs here; maybe TODO?
*/
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
/* If the deletion had no impact on the trace, make it permanent. This
isn't perfect for variable-path inputs, but we're just making a

View File

@ -819,8 +819,14 @@ int main(int argc, char **argv_orig, char **envp) {
}
srandom((u32)afl->init_seed);
srand((u32)afl->init_seed); // in case it is a different implementation
if (afl->init_seed) {
afl->rand_seed[0] = afl->init_seed;
afl->rand_seed[1] = afl->init_seed ^ 0x1234567890abcdef;
afl->rand_seed[2] = afl->init_seed & 0x0123456789abcdef;
afl->rand_seed[3] = afl->init_seed | 0x01abcde43f567908;
}
//srandom((u32)afl->init_seed);
//srand((u32)afl->init_seed); // in case it is a different implementation
if (afl->use_radamsa) {

135
src/afl-performance.c Normal file
View File

@ -0,0 +1,135 @@
/* Written in 2019 by David Blackman and Sebastiano Vigna (vigna@acm.org)
To the extent possible under law, the author has dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
See <http://creativecommons.org/publicdomain/zero/1.0/>.
This is xoshiro256++ 1.0, one of our all-purpose, rock-solid generators.
It has excellent (sub-ns) speed, a state (256 bits) that is large
enough for any parallel application, and it passes all tests we are
aware of.
For generating just floating-point numbers, xoshiro256+ is even faster.
The state must be seeded so that it is not everywhere zero. If you have
a 64-bit seed, we suggest to seed a splitmix64 generator and use its
output to fill s. */
#include <stdint.h>
#include "afl-fuzz.h"
#include "types.h"
#include "xxh3.h"
static inline uint64_t rotl(const uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
uint64_t rand_next(afl_state_t *afl) {
const uint64_t result =
rotl(afl->rand_seed[0] + afl->rand_seed[3], 23) + afl->rand_seed[0];
const uint64_t t = afl->rand_seed[1] << 17;
afl->rand_seed[2] ^= afl->rand_seed[0];
afl->rand_seed[3] ^= afl->rand_seed[1];
afl->rand_seed[1] ^= afl->rand_seed[2];
afl->rand_seed[0] ^= afl->rand_seed[3];
afl->rand_seed[2] ^= t;
afl->rand_seed[3] = rotl(afl->rand_seed[3], 45);
return result;
}
/* This is the jump function for the generator. It is equivalent
to 2^128 calls to rand_next(); it can be used to generate 2^128
non-overlapping subsequences for parallel computations. */
void jump(afl_state_t *afl) {
static const uint64_t JUMP[] = {0x180ec6d33cfd0aba, 0xd5a61266f0c9392c,
0xa9582618e03fc9aa, 0x39abdc4529b1661c};
uint64_t s0 = 0;
uint64_t s1 = 0;
uint64_t s2 = 0;
uint64_t s3 = 0;
for (int i = 0; i < sizeof JUMP / sizeof *JUMP; i++)
for (int b = 0; b < 64; b++) {
if (JUMP[i] & UINT64_C(1) << b) {
s0 ^= afl->rand_seed[0];
s1 ^= afl->rand_seed[1];
s2 ^= afl->rand_seed[2];
s3 ^= afl->rand_seed[3];
}
rand_next(afl);
}
afl->rand_seed[0] = s0;
afl->rand_seed[1] = s1;
afl->rand_seed[2] = s2;
afl->rand_seed[3] = s3;
}
/* This is the long-jump function for the generator. It is equivalent to
2^192 calls to rand_next(); it can be used to generate 2^64 starting points,
from each of which jump() will generate 2^64 non-overlapping
subsequences for parallel distributed computations. */
void long_jump(afl_state_t *afl) {
static const uint64_t LONG_JUMP[] = {0x76e15d3efefdcbbf, 0xc5004e441c522fb3,
0x77710069854ee241, 0x39109bb02acbe635};
uint64_t s0 = 0;
uint64_t s1 = 0;
uint64_t s2 = 0;
uint64_t s3 = 0;
for (int i = 0; i < sizeof LONG_JUMP / sizeof *LONG_JUMP; i++)
for (int b = 0; b < 64; b++) {
if (LONG_JUMP[i] & UINT64_C(1) << b) {
s0 ^= afl->rand_seed[0];
s1 ^= afl->rand_seed[1];
s2 ^= afl->rand_seed[2];
s3 ^= afl->rand_seed[3];
}
rand_next(afl);
}
afl->rand_seed[0] = s0;
afl->rand_seed[1] = s1;
afl->rand_seed[2] = s2;
afl->rand_seed[3] = s3;
}
u32 hash32(const void *key, u32 len, u32 seed) {
return XXH64(key, len, seed) % 0x100000000;
}
u64 hash64(const void *key, u32 len, u64 seed) {
return XXH64(key, len, seed);
}

View File

@ -300,7 +300,7 @@ static u8 tmin_run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len,
if (ret == FSRV_RUN_NOINST) { FATAL("Binary not instrumented?"); }
u32 cksum = hash32(fsrv->trace_bits, fsrv->map_size, HASH_CONST);
u64 cksum = hash64(fsrv->trace_bits, fsrv->map_size, HASH_CONST);
if (first_run) { orig_cksum = cksum; }