switch to faster and better hash + random

This commit is contained in:
van Hauser
2020-06-12 16:08:49 +02:00
parent db2e04361d
commit a632c00b0d
18 changed files with 5850 additions and 62 deletions

View File

@ -50,10 +50,15 @@ else
endif
endif
ifeq "$(shell echo 'int main() {return 0; }' | $(CC) -fno-move-loop-invariants -fdisable-tree-cunrolli -x c - -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli
endif
ifneq "$(shell uname)" "Darwin"
#ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
# CFLAGS_OPT += -march=native
#endif
ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
#CFLAGS_OPT += -march=native
SPECIAL_PERFORMANCE += -march=native
endif
# OS X does not like _FORTIFY_SOURCE=2
CFLAGS_OPT += -D_FORTIFY_SOURCE=2
endif
@ -350,6 +355,9 @@ afl-as: src/afl-as.c include/afl-as.h $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) src/$@.c -o $@ $(LDFLAGS)
ln -sf afl-as as
src/afl-performance.o : $(COMM_HDR) src/afl-performance.c include/hash.h
$(CC) -Iinclude $(SPECIAL_PERFORMANCE) -O3 -fno-unroll-loops -c src/afl-performance.c -o src/afl-performance.o
src/afl-common.o : $(COMM_HDR) src/afl-common.c include/common.h
$(CC) $(CFLAGS) $(CFLAGS_FLTO) -c src/afl-common.c -o src/afl-common.o
@ -365,25 +373,25 @@ radamsa: src/third_party/libradamsa/libradamsa.so
src/third_party/libradamsa/libradamsa.so: src/third_party/libradamsa/libradamsa.c src/third_party/libradamsa/radamsa.h
$(MAKE) -C src/third_party/libradamsa/ CFLAGS="$(CFLAGS)"
afl-fuzz: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o -o $@ $(PYFLAGS) $(LDFLAGS)
afl-fuzz: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(PYFLAGS) $(LDFLAGS)
afl-showmap: src/afl-showmap.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o -o $@ $(LDFLAGS)
afl-tmin: src/afl-tmin.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o -o $@ $(LDFLAGS)
afl-tmin: src/afl-tmin.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(LDFLAGS)
afl-analyze: src/afl-analyze.c src/afl-common.o src/afl-sharedmem.o $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o -o $@ $(LDFLAGS)
afl-analyze: src/afl-analyze.c src/afl-common.o src/afl-sharedmem.o src/afl-performance.o $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-performance.o -o $@ $(LDFLAGS)
afl-gotcpu: src/afl-gotcpu.c src/afl-common.o $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o -o $@ $(LDFLAGS)
# document all mutations and only do one run (use with only one input file!)
document: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o | test_x86
$(CC) -D_AFL_DOCUMENT_MUTATIONS $(CFLAGS) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o -o afl-fuzz-document $(PYFLAGS) $(LDFLAGS)
document: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o | test_x86
$(CC) -D_AFL_DOCUMENT_MUTATIONS $(CFLAGS) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o afl-fuzz-document $(PYFLAGS) $(LDFLAGS)
test/unittests/unit_maybe_alloc.o : $(COMM_HDR) include/alloc-inl.h test/unittests/unit_maybe_alloc.c $(AFL_FUZZ_FILES)
@$(CC) $(CFLAGS) $(ASAN_CFLAGS) -c test/unittests/unit_maybe_alloc.c -o test/unittests/unit_maybe_alloc.o

View File

@ -16,6 +16,8 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
exactly one main node is present and warn otherwise
- If no main node is present at a sync one secondary node automatically becomes
a temporary main node until a real main nodes shows up
- switched murmur2 hashing and random() for xxh3 and xoshiro256**, giving up to 5.5% speed
increase
- fix/update to MOpt (thanks to arnow117)
- added MOpt dictionary support from repo
- llvm_mode:

View File

@ -139,13 +139,13 @@ struct queue_entry {
fully_colorized; /* Do not run redqueen stage again */
u32 bitmap_size, /* Number of bits set in bitmap */
fuzz_level, /* Number of fuzzing iterations */
exec_cksum; /* Checksum of the execution trace */
fuzz_level; /* Number of fuzzing iterations */
u64 exec_us, /* Execution time (us) */
handicap, /* Number of queue cycles behind */
n_fuzz, /* Number of fuzz, does not overflow */
depth; /* Path depth */
n_fuzz, /* Number of fuzz, does not overflow*/
depth, /* Path depth */
exec_cksum; /* Checksum of the execution trace */
u8 *trace_mini; /* Trace bytes, if kept */
u32 tc_ref; /* Trace bytes ref count */
@ -520,11 +520,11 @@ typedef struct afl_state {
u64 stage_finds[32], /* Patterns found per fuzz stage */
stage_cycles[32]; /* Execs per fuzz stage */
#ifndef HAVE_ARC4RANDOM
//#ifndef HAVE_ARC4RANDOM
u32 rand_cnt; /* Random number counter */
#endif
//#endif
u32 rand_seed[2];
u64 rand_seed[4];
s64 init_seed;
u64 total_cal_us, /* Total calibration time (us) */
@ -942,7 +942,10 @@ u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8 *out_buf, u32 len);
/* RedQueen */
u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
u32 exec_cksum);
u64 exec_cksum);
/* xoshiro256** */
uint64_t rand_next(afl_state_t *afl);
/**** Inline routines ****/
@ -951,24 +954,25 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
static inline u32 rand_below(afl_state_t *afl, u32 limit) {
#ifdef HAVE_ARC4RANDOM
if (unlikely(afl->fixed_seed)) { return random() % limit; }
//#ifdef HAVE_ARC4RANDOM
// if (unlikely(afl->fixed_seed)) { return random() % limit; }
/* The boundary not being necessarily a power of 2,
we need to ensure the result uniformity. */
return arc4random_uniform(limit);
#else
// return arc4random_uniform(limit);
//#else
if (unlikely(!afl->rand_cnt--) && likely(!afl->fixed_seed)) {
ck_read(afl->fsrv.dev_urandom_fd, &afl->rand_seed, sizeof(afl->rand_seed),
"/dev/urandom");
srandom(afl->rand_seed[0]);
// srandom(afl->rand_seed[0]);
afl->rand_cnt = (RESEED_RNG / 2) + (afl->rand_seed[1] % RESEED_RNG);
}
return random() % limit;
#endif
// return random() % limit;
return rand_next(afl) % limit;
//#endif
}

View File

@ -293,7 +293,7 @@
/* Call count interval between reseeding the libc PRNG from /dev/urandom: */
#define RESEED_RNG 10000
#define RESEED_RNG 100000
/* Maximum line length passed from GCC to 'as' and used for parsing
configuration files: */

View File

@ -30,9 +30,16 @@
#include "types.h"
#ifdef __x86_64__
u32 hash32(const void *key, u32 len, u32 seed);
u64 hash64(const void *key, u32 len, u64 seed);
#define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r))))
#if 0
The following code is disabled because xxh3 with a 32 bit resukt is 30% faster
#ifdef __x86_64__
#define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r))))
static inline u32 hash32(const void *key, u32 len, u32 seed) {
@ -65,9 +72,9 @@ static inline u32 hash32(const void *key, u32 len, u32 seed) {
}
#else
#else
#define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r))))
#define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r))))
static inline u32 hash32(const void *key, u32 len, u32 seed) {
@ -100,7 +107,8 @@ static inline u32 hash32(const void *key, u32 len, u32 seed) {
}
#endif /* ^__x86_64__ */
#endif /* ^__x86_64__ */
#endif
#endif /* !_HAVE_HASH_H */

3185
include/xxh3.h Normal file

File diff suppressed because it is too large Load Diff

2438
include/xxhash.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -139,7 +139,8 @@ static void __afl_map_shm_fuzz() {
}
map = (u8 *)mmap(0, MAX_FILE + sizeof(u32), PROT_READ, MAP_SHARED, shm_fd, 0);
map =
(u8 *)mmap(0, MAX_FILE + sizeof(u32), PROT_READ, MAP_SHARED, shm_fd, 0);
#else
u32 shm_id = atoi(id_str);

View File

@ -222,7 +222,7 @@ static u32 analyze_run_target(char **argv, u8 *mem, u32 len, u8 first_run) {
int status = 0;
s32 prog_in_fd;
u32 cksum;
u64 cksum;
memset(trace_bits, 0, map_size);
MEM_BARRIER();
@ -321,7 +321,7 @@ static u32 analyze_run_target(char **argv, u8 *mem, u32 len, u8 first_run) {
}
cksum = hash32(trace_bits, map_size, HASH_CONST);
cksum = hash64(trace_bits, map_size, HASH_CONST);
/* We don't actually care if the target is crashing or not,
except that when it does, the checksum should be different. */

View File

@ -839,8 +839,9 @@ void afl_fsrv_write_to_testcase(afl_forkserver_t *fsrv, u8 *buf, size_t len) {
*fsrv->shmem_fuzz_len = len;
memcpy(fsrv->shmem_fuzz, buf, len);
#ifdef _DEBUG
fprintf(stderr, "FS crc: %08x len: %u\n", hash32(fsrv->shmem_fuzz,
*fsrv->shmem_fuzz_len, 0xa5b35705), *fsrv->shmem_fuzz_len);
fprintf(stderr, "FS crc: %08x len: %u\n",
hash64(fsrv->shmem_fuzz, *fsrv->shmem_fuzz_len, 0xa5b35705),
*fsrv->shmem_fuzz_len);
fprintf(stderr, "SHM :");
for (int i = 0; i < *fsrv->shmem_fuzz_len; i++)
fprintf(stderr, "%02x", fsrv->shmem_fuzz[i]);

View File

@ -546,7 +546,7 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
u8 fn[PATH_MAX];
/* Update path frequency. */
u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
struct queue_entry *q = afl->queue;
while (q) {

View File

@ -272,7 +272,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
sprintf(afl->stage_name_buf, "ptrim %s",
u_stringify_int(val_buf, trim_exec));
u32 cksum;
u64 cksum;
size_t retlen = mutator->afl_custom_trim(mutator->data, &retbuf);
@ -295,7 +295,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf,
if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (cksum == q->exec_cksum) {

View File

@ -364,8 +364,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
s32 len, fd, temp_len, i, j;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0;
u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, prev_cksum;
u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1;
u8 ret_val = 1, doing_det = 0;
@ -653,7 +653,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (!afl->non_instrumented_mode && (afl->stage_cur & 7) == 7) {
u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
@ -821,14 +821,14 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (!eff_map[EFF_APOS(afl->stage_cur)]) {
u32 cksum;
u64 cksum;
/* If in non-instrumented mode or if the file is very short, just flag
everything without wasting time on checksums. */
if (!afl->non_instrumented_mode && len >= EFF_MIN_LEN) {
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
} else {
@ -2539,8 +2539,8 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
s32 len, fd, temp_len, i, j;
u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0;
u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, cur_ms_lv;
u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1;
u64 havoc_queued = 0, orig_hit_cnt, new_hit_cnt = 0, cur_ms_lv, prev_cksum;
u32 splice_cycle = 0, perf_score = 100, orig_perf, eff_cnt = 1;
u8 ret_val = 1, doing_det = 0;
@ -2806,7 +2806,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
if (!afl->non_instrumented_mode && (afl->stage_cur & 7) == 7) {
u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
@ -2974,14 +2974,14 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
if (!eff_map[EFF_APOS(afl->stage_cur)]) {
u32 cksum;
u64 cksum;
/* If in non-instrumented mode or if the file is very short, just flag
everything without wasting time on checksums. */
if (!afl->non_instrumented_mode && len >= EFF_MIN_LEN) {
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
} else {

View File

@ -89,11 +89,11 @@ static struct range *pop_biggest_range(struct range **ranges) {
}
static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u32 *cksum) {
static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u64 *cksum) {
if (unlikely(common_fuzz_stuff(afl, buf, len))) { return 1; }
*cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
*cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
return 0;
}
@ -109,7 +109,7 @@ static void rand_replace(afl_state_t *afl, u8 *buf, u32 len) {
}
static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u32 exec_cksum) {
static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u64 exec_cksum) {
struct range *ranges = add_range(NULL, 0, len);
u8 * backup = ck_alloc_nozero(len);
@ -137,7 +137,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len, u32 exec_cksum) {
memcpy(backup, buf + rng->start, s);
rand_replace(afl, buf + rng->start, s);
u32 cksum;
u64 cksum;
u64 start_us = get_cur_time_us();
if (unlikely(get_exec_checksum(afl, buf, len, &cksum))) {
@ -695,7 +695,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
// afl->queue_cur->exec_cksum
u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
u32 exec_cksum) {
u64 exec_cksum) {
u8 r = 1;
if (afl->orig_cmp_map == NULL) {

View File

@ -256,7 +256,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
u32 cksum;
u64 cksum;
if (!first_run && !(afl->stage_cur % afl->stats_update_freq)) {
@ -281,7 +281,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
}
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (q->exec_cksum != cksum) {
hnb = has_new_bits(afl, afl->virgin_bits);
@ -646,7 +646,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
while (remove_pos < q->len) {
u32 trim_avail = MIN(remove_len, q->len - remove_pos);
u32 cksum;
u64 cksum;
write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
@ -658,7 +658,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
/* Note that we don't keep track of crashes or hangs here; maybe TODO?
*/
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
/* If the deletion had no impact on the trace, make it permanent. This
isn't perfect for variable-path inputs, but we're just making a

View File

@ -819,8 +819,14 @@ int main(int argc, char **argv_orig, char **envp) {
}
srandom((u32)afl->init_seed);
srand((u32)afl->init_seed); // in case it is a different implementation
if (afl->init_seed) {
afl->rand_seed[0] = afl->init_seed;
afl->rand_seed[1] = afl->init_seed ^ 0x1234567890abcdef;
afl->rand_seed[2] = afl->init_seed & 0x0123456789abcdef;
afl->rand_seed[3] = afl->init_seed | 0x01abcde43f567908;
}
//srandom((u32)afl->init_seed);
//srand((u32)afl->init_seed); // in case it is a different implementation
if (afl->use_radamsa) {

135
src/afl-performance.c Normal file
View File

@ -0,0 +1,135 @@
/* Written in 2019 by David Blackman and Sebastiano Vigna (vigna@acm.org)
To the extent possible under law, the author has dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
See <http://creativecommons.org/publicdomain/zero/1.0/>.
This is xoshiro256++ 1.0, one of our all-purpose, rock-solid generators.
It has excellent (sub-ns) speed, a state (256 bits) that is large
enough for any parallel application, and it passes all tests we are
aware of.
For generating just floating-point numbers, xoshiro256+ is even faster.
The state must be seeded so that it is not everywhere zero. If you have
a 64-bit seed, we suggest to seed a splitmix64 generator and use its
output to fill s. */
#include <stdint.h>
#include "afl-fuzz.h"
#include "types.h"
#include "xxh3.h"
static inline uint64_t rotl(const uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
uint64_t rand_next(afl_state_t *afl) {
const uint64_t result =
rotl(afl->rand_seed[0] + afl->rand_seed[3], 23) + afl->rand_seed[0];
const uint64_t t = afl->rand_seed[1] << 17;
afl->rand_seed[2] ^= afl->rand_seed[0];
afl->rand_seed[3] ^= afl->rand_seed[1];
afl->rand_seed[1] ^= afl->rand_seed[2];
afl->rand_seed[0] ^= afl->rand_seed[3];
afl->rand_seed[2] ^= t;
afl->rand_seed[3] = rotl(afl->rand_seed[3], 45);
return result;
}
/* This is the jump function for the generator. It is equivalent
to 2^128 calls to rand_next(); it can be used to generate 2^128
non-overlapping subsequences for parallel computations. */
void jump(afl_state_t *afl) {
static const uint64_t JUMP[] = {0x180ec6d33cfd0aba, 0xd5a61266f0c9392c,
0xa9582618e03fc9aa, 0x39abdc4529b1661c};
uint64_t s0 = 0;
uint64_t s1 = 0;
uint64_t s2 = 0;
uint64_t s3 = 0;
for (int i = 0; i < sizeof JUMP / sizeof *JUMP; i++)
for (int b = 0; b < 64; b++) {
if (JUMP[i] & UINT64_C(1) << b) {
s0 ^= afl->rand_seed[0];
s1 ^= afl->rand_seed[1];
s2 ^= afl->rand_seed[2];
s3 ^= afl->rand_seed[3];
}
rand_next(afl);
}
afl->rand_seed[0] = s0;
afl->rand_seed[1] = s1;
afl->rand_seed[2] = s2;
afl->rand_seed[3] = s3;
}
/* This is the long-jump function for the generator. It is equivalent to
2^192 calls to rand_next(); it can be used to generate 2^64 starting points,
from each of which jump() will generate 2^64 non-overlapping
subsequences for parallel distributed computations. */
void long_jump(afl_state_t *afl) {
static const uint64_t LONG_JUMP[] = {0x76e15d3efefdcbbf, 0xc5004e441c522fb3,
0x77710069854ee241, 0x39109bb02acbe635};
uint64_t s0 = 0;
uint64_t s1 = 0;
uint64_t s2 = 0;
uint64_t s3 = 0;
for (int i = 0; i < sizeof LONG_JUMP / sizeof *LONG_JUMP; i++)
for (int b = 0; b < 64; b++) {
if (LONG_JUMP[i] & UINT64_C(1) << b) {
s0 ^= afl->rand_seed[0];
s1 ^= afl->rand_seed[1];
s2 ^= afl->rand_seed[2];
s3 ^= afl->rand_seed[3];
}
rand_next(afl);
}
afl->rand_seed[0] = s0;
afl->rand_seed[1] = s1;
afl->rand_seed[2] = s2;
afl->rand_seed[3] = s3;
}
u32 hash32(const void *key, u32 len, u32 seed) {
return XXH64(key, len, seed) % 0x100000000;
}
u64 hash64(const void *key, u32 len, u64 seed) {
return XXH64(key, len, seed);
}

View File

@ -300,7 +300,7 @@ static u8 tmin_run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len,
if (ret == FSRV_RUN_NOINST) { FATAL("Binary not instrumented?"); }
u32 cksum = hash32(fsrv->trace_bits, fsrv->map_size, HASH_CONST);
u64 cksum = hash64(fsrv->trace_bits, fsrv->map_size, HASH_CONST);
if (first_run) { orig_cksum = cksum; }