mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-09 08:41:32 +00:00
Merge pull request #649 from AFLplusplus/skim_romu
switch to romu and skim
This commit is contained in:
commit
47e04548d4
11
GNUmakefile
11
GNUmakefile
@ -77,13 +77,17 @@ ifeq "$(shell echo 'int main() {return 0; }' | $(CC) -fno-move-loop-invariants -
|
|||||||
SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli
|
SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq "$(shell uname)" "Darwin"
|
|
||||||
ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
|
ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
|
||||||
ifndef SOURCE_DATE_EPOCH
|
ifndef SOURCE_DATE_EPOCH
|
||||||
#CFLAGS_OPT += -march=native
|
HAVE_MARCHNATIVE = 1
|
||||||
SPECIAL_PERFORMANCE += -march=native
|
CFLAGS_OPT += -march=native
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifneq "$(shell uname)" "Darwin"
|
||||||
|
ifeq "$(HAVE_MARCHNATIVE)" "1"
|
||||||
|
SPECIAL_PERFORMANCE += -march=native
|
||||||
|
endif
|
||||||
# OS X does not like _FORTIFY_SOURCE=2
|
# OS X does not like _FORTIFY_SOURCE=2
|
||||||
ifndef DEBUG
|
ifndef DEBUG
|
||||||
CFLAGS_OPT += -D_FORTIFY_SOURCE=2
|
CFLAGS_OPT += -D_FORTIFY_SOURCE=2
|
||||||
@ -117,6 +121,7 @@ ifdef INTROSPECTION
|
|||||||
CFLAGS_OPT += -DINTROSPECTION=1
|
CFLAGS_OPT += -DINTROSPECTION=1
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
ifneq "$(shell uname -m)" "x86_64"
|
ifneq "$(shell uname -m)" "x86_64"
|
||||||
ifneq "$(patsubst i%86,i386,$(shell uname -m))" "i386"
|
ifneq "$(patsubst i%86,i386,$(shell uname -m))" "i386"
|
||||||
ifneq "$(shell uname -m)" "amd64"
|
ifneq "$(shell uname -m)" "amd64"
|
||||||
|
@ -10,8 +10,10 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
|
|||||||
|
|
||||||
|
|
||||||
### Version ++3.01a (release)
|
### Version ++3.01a (release)
|
||||||
- fix crash for very, very fast targets+systems (thanks to mhlakhani
|
- afl-fuzz
|
||||||
for reporting)
|
- fix crash for very, very fast targets+systems, thanks for reporting @mhlakhani
|
||||||
|
- switched to a faster RNG
|
||||||
|
- added hghwng's patch for faster trace map analysis
|
||||||
- added dummy Makefile to instrumentation/
|
- added dummy Makefile to instrumentation/
|
||||||
- afl-cc
|
- afl-cc
|
||||||
- allow instrumenting LLVMFuzzerTestOneInput
|
- allow instrumenting LLVMFuzzerTestOneInput
|
||||||
|
@ -134,6 +134,12 @@
|
|||||||
// Little helper to access the ptr to afl->##name_buf - for use in afl_realloc.
|
// Little helper to access the ptr to afl->##name_buf - for use in afl_realloc.
|
||||||
#define AFL_BUF_PARAM(name) ((void **)&afl->name##_buf)
|
#define AFL_BUF_PARAM(name) ((void **)&afl->name##_buf)
|
||||||
|
|
||||||
|
#ifdef WORD_SIZE_64
|
||||||
|
#define AFL_RAND_RETURN u64
|
||||||
|
#else
|
||||||
|
#define AFL_RAND_RETURN u32
|
||||||
|
#endif
|
||||||
|
|
||||||
extern s8 interesting_8[INTERESTING_8_LEN];
|
extern s8 interesting_8[INTERESTING_8_LEN];
|
||||||
extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
|
extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
|
||||||
extern s32
|
extern s32
|
||||||
@ -580,7 +586,7 @@ typedef struct afl_state {
|
|||||||
|
|
||||||
u32 rand_cnt; /* Random number counter */
|
u32 rand_cnt; /* Random number counter */
|
||||||
|
|
||||||
u64 rand_seed[4];
|
u64 rand_seed[3];
|
||||||
s64 init_seed;
|
s64 init_seed;
|
||||||
|
|
||||||
u64 total_cal_us, /* Total calibration time (us) */
|
u64 total_cal_us, /* Total calibration time (us) */
|
||||||
@ -1014,13 +1020,9 @@ void write_bitmap(afl_state_t *);
|
|||||||
u32 count_bits(afl_state_t *, u8 *);
|
u32 count_bits(afl_state_t *, u8 *);
|
||||||
u32 count_bytes(afl_state_t *, u8 *);
|
u32 count_bytes(afl_state_t *, u8 *);
|
||||||
u32 count_non_255_bytes(afl_state_t *, u8 *);
|
u32 count_non_255_bytes(afl_state_t *, u8 *);
|
||||||
#ifdef WORD_SIZE_64
|
void simplify_trace(afl_state_t *, u8 *);
|
||||||
void simplify_trace(afl_state_t *, u64 *);
|
|
||||||
void classify_counts(afl_forkserver_t *);
|
void classify_counts(afl_forkserver_t *);
|
||||||
#else
|
void discover_word(u8 *ret, u64 *current, u64 *virgin);
|
||||||
void simplify_trace(afl_state_t *, u32 *);
|
|
||||||
void classify_counts(afl_forkserver_t *);
|
|
||||||
#endif
|
|
||||||
void init_count_class16(void);
|
void init_count_class16(void);
|
||||||
void minimize_bits(afl_state_t *, u8 *, u8 *);
|
void minimize_bits(afl_state_t *, u8 *, u8 *);
|
||||||
#ifndef SIMPLE_FILES
|
#ifndef SIMPLE_FILES
|
||||||
@ -1028,6 +1030,7 @@ u8 *describe_op(afl_state_t *, u8, size_t);
|
|||||||
#endif
|
#endif
|
||||||
u8 save_if_interesting(afl_state_t *, void *, u32, u8);
|
u8 save_if_interesting(afl_state_t *, void *, u32, u8);
|
||||||
u8 has_new_bits(afl_state_t *, u8 *);
|
u8 has_new_bits(afl_state_t *, u8 *);
|
||||||
|
u8 has_new_bits_unclassified(afl_state_t *, u8 *);
|
||||||
|
|
||||||
/* Extras */
|
/* Extras */
|
||||||
|
|
||||||
@ -1111,8 +1114,7 @@ u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8 *out_buf, u32 len);
|
|||||||
u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
|
u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
|
||||||
u64 exec_cksum);
|
u64 exec_cksum);
|
||||||
|
|
||||||
/* xoshiro256** */
|
AFL_RAND_RETURN rand_next(afl_state_t *afl);
|
||||||
uint64_t rand_next(afl_state_t *afl);
|
|
||||||
|
|
||||||
/* probability between 0.0 and 1.0 */
|
/* probability between 0.0 and 1.0 */
|
||||||
double rand_next_percent(afl_state_t *afl);
|
double rand_next_percent(afl_state_t *afl);
|
||||||
|
112
include/coverage-32.h
Normal file
112
include/coverage-32.h
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
#include "config.h"
|
||||||
|
#include "types.h"
|
||||||
|
|
||||||
|
u32 skim(const u32 *virgin, const u32 *current, const u32 *current_end);
|
||||||
|
u32 classify_word(u32 word);
|
||||||
|
|
||||||
|
inline u32 classify_word(u32 word) {
|
||||||
|
|
||||||
|
u16 mem16[2];
|
||||||
|
memcpy(mem16, &word, sizeof(mem16));
|
||||||
|
|
||||||
|
mem16[0] = count_class_lookup16[mem16[0]];
|
||||||
|
mem16[1] = count_class_lookup16[mem16[1]];
|
||||||
|
|
||||||
|
memcpy(&word, mem16, sizeof(mem16));
|
||||||
|
return word;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void simplify_trace(afl_state_t *afl, u8 *bytes) {
|
||||||
|
|
||||||
|
u32 *mem = (u32 *)fsrv->trace_bits;
|
||||||
|
u32 i = (fsrv->map_size >> 2);
|
||||||
|
|
||||||
|
while (i--) {
|
||||||
|
|
||||||
|
/* Optimize for sparse bitmaps. */
|
||||||
|
|
||||||
|
if (unlikely(*mem)) {
|
||||||
|
|
||||||
|
u8 *mem8 = (u8 *)mem;
|
||||||
|
|
||||||
|
mem8[0] = simplify_lookup[mem8[0]];
|
||||||
|
mem8[1] = simplify_lookup[mem8[1]];
|
||||||
|
mem8[2] = simplify_lookup[mem8[2]];
|
||||||
|
mem8[3] = simplify_lookup[mem8[3]];
|
||||||
|
|
||||||
|
} else
|
||||||
|
|
||||||
|
*mem = 0x01010101;
|
||||||
|
|
||||||
|
mem++;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void classify_counts(u8 *bytes) {
|
||||||
|
|
||||||
|
u64 *mem = (u64 *)bytes;
|
||||||
|
u32 i = MAP_SIZE >> 2;
|
||||||
|
|
||||||
|
while (i--) {
|
||||||
|
|
||||||
|
/* Optimize for sparse bitmaps. */
|
||||||
|
|
||||||
|
if (unlikely(*mem)) { *mem = classify_word(*mem); }
|
||||||
|
|
||||||
|
mem++;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Updates the virgin bits, then reflects whether a new count or a new tuple is
|
||||||
|
* seen in ret. */
|
||||||
|
inline void discover_word(u8 *ret, u32 *current, u32 *virgin) {
|
||||||
|
|
||||||
|
/* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
|
||||||
|
that have not been already cleared from the virgin map - since this will
|
||||||
|
almost always be the case. */
|
||||||
|
|
||||||
|
if (*current & *virgin) {
|
||||||
|
|
||||||
|
if (likely(*ret < 2)) {
|
||||||
|
|
||||||
|
u8 *cur = (u8 *)current;
|
||||||
|
u8 *vir = (u8 *)virgin;
|
||||||
|
|
||||||
|
/* Looks like we have not found any new bytes yet; see if any non-zero
|
||||||
|
bytes in current[] are pristine in virgin[]. */
|
||||||
|
|
||||||
|
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
|
||||||
|
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff))
|
||||||
|
*ret = 2;
|
||||||
|
else
|
||||||
|
*ret = 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*virgin &= ~*current;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#define PACK_SIZE 16
|
||||||
|
inline u32 skim(const u32 *virgin, const u32 *current, const u32 *current_end) {
|
||||||
|
|
||||||
|
for (; current != current_end; virgin += 4, current += 4) {
|
||||||
|
|
||||||
|
if (current[0] && classify_word(current[0]) & virgin[0]) return 1;
|
||||||
|
if (current[1] && classify_word(current[1]) & virgin[1]) return 1;
|
||||||
|
if (current[2] && classify_word(current[2]) & virgin[2]) return 1;
|
||||||
|
if (current[3] && classify_word(current[3]) & virgin[3]) return 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
189
include/coverage-64.h
Normal file
189
include/coverage-64.h
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
#include "config.h"
|
||||||
|
#include "types.h"
|
||||||
|
|
||||||
|
#if (defined(__AVX512F__) && defined(__AVX512DQ__)) || defined(__AVX2__)
|
||||||
|
#include <immintrin.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end);
|
||||||
|
u64 classify_word(u64 word);
|
||||||
|
|
||||||
|
inline u64 classify_word(u64 word) {
|
||||||
|
|
||||||
|
u16 mem16[4];
|
||||||
|
memcpy(mem16, &word, sizeof(mem16));
|
||||||
|
|
||||||
|
mem16[0] = count_class_lookup16[mem16[0]];
|
||||||
|
mem16[1] = count_class_lookup16[mem16[1]];
|
||||||
|
mem16[2] = count_class_lookup16[mem16[2]];
|
||||||
|
mem16[3] = count_class_lookup16[mem16[3]];
|
||||||
|
|
||||||
|
memcpy(&word, mem16, sizeof(mem16));
|
||||||
|
return word;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void simplify_trace(afl_state_t *afl, u8 *bytes) {
|
||||||
|
|
||||||
|
u64 *mem = (u64 *)bytes;
|
||||||
|
u32 i = (afl->fsrv.map_size >> 3);
|
||||||
|
|
||||||
|
while (i--) {
|
||||||
|
|
||||||
|
/* Optimize for sparse bitmaps. */
|
||||||
|
|
||||||
|
if (unlikely(*mem)) {
|
||||||
|
|
||||||
|
u8 *mem8 = (u8 *)mem;
|
||||||
|
|
||||||
|
mem8[0] = simplify_lookup[mem8[0]];
|
||||||
|
mem8[1] = simplify_lookup[mem8[1]];
|
||||||
|
mem8[2] = simplify_lookup[mem8[2]];
|
||||||
|
mem8[3] = simplify_lookup[mem8[3]];
|
||||||
|
mem8[4] = simplify_lookup[mem8[4]];
|
||||||
|
mem8[5] = simplify_lookup[mem8[5]];
|
||||||
|
mem8[6] = simplify_lookup[mem8[6]];
|
||||||
|
mem8[7] = simplify_lookup[mem8[7]];
|
||||||
|
|
||||||
|
} else
|
||||||
|
|
||||||
|
*mem = 0x0101010101010101ULL;
|
||||||
|
|
||||||
|
mem++;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void classify_counts(afl_forkserver_t *fsrv) {
|
||||||
|
|
||||||
|
u64 *mem = (u64 *)fsrv->trace_bits;
|
||||||
|
u32 i = (fsrv->map_size >> 3);
|
||||||
|
|
||||||
|
while (i--) {
|
||||||
|
|
||||||
|
/* Optimize for sparse bitmaps. */
|
||||||
|
|
||||||
|
if (unlikely(*mem)) { *mem = classify_word(*mem); }
|
||||||
|
|
||||||
|
mem++;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Updates the virgin bits, then reflects whether a new count or a new tuple is
|
||||||
|
* seen in ret. */
|
||||||
|
inline void discover_word(u8 *ret, u64 *current, u64 *virgin) {
|
||||||
|
|
||||||
|
/* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
|
||||||
|
that have not been already cleared from the virgin map - since this will
|
||||||
|
almost always be the case. */
|
||||||
|
|
||||||
|
if (*current & *virgin) {
|
||||||
|
|
||||||
|
if (likely(*ret < 2)) {
|
||||||
|
|
||||||
|
u8 *cur = (u8 *)current;
|
||||||
|
u8 *vir = (u8 *)virgin;
|
||||||
|
|
||||||
|
/* Looks like we have not found any new bytes yet; see if any non-zero
|
||||||
|
bytes in current[] are pristine in virgin[]. */
|
||||||
|
|
||||||
|
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
|
||||||
|
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
|
||||||
|
(cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
|
||||||
|
(cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff))
|
||||||
|
*ret = 2;
|
||||||
|
else
|
||||||
|
*ret = 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
*virgin &= ~*current;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(__AVX512F__) && defined(__AVX512DQ__)
|
||||||
|
#define PACK_SIZE 64
|
||||||
|
inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
|
||||||
|
|
||||||
|
for (; current != current_end; virgin += 8, current += 8) {
|
||||||
|
|
||||||
|
__m512i value = *(__m512i *)current;
|
||||||
|
__mmask8 mask = _mm512_testn_epi64_mask(value, value);
|
||||||
|
|
||||||
|
/* All bytes are zero. */
|
||||||
|
if (mask == 0xff) continue;
|
||||||
|
|
||||||
|
/* Look for nonzero bytes and check for new bits. */
|
||||||
|
#define UNROLL(x) \
|
||||||
|
if (!(mask & (1 << x)) && classify_word(current[x]) & virgin[x]) return 1
|
||||||
|
UNROLL(0);
|
||||||
|
UNROLL(1);
|
||||||
|
UNROLL(2);
|
||||||
|
UNROLL(3);
|
||||||
|
UNROLL(4);
|
||||||
|
UNROLL(5);
|
||||||
|
UNROLL(6);
|
||||||
|
UNROLL(7);
|
||||||
|
#undef UNROLL
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(PACK_SIZE) && defined(__AVX2__)
|
||||||
|
#define PACK_SIZE 32
|
||||||
|
inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
|
||||||
|
|
||||||
|
__m256i zeroes = _mm256_setzero_si256();
|
||||||
|
|
||||||
|
for (; current != current_end; virgin += 4, current += 4) {
|
||||||
|
|
||||||
|
__m256i value = *(__m256i *)current;
|
||||||
|
__m256i cmp = _mm256_cmpeq_epi64(value, zeroes);
|
||||||
|
u32 mask = _mm256_movemask_epi8(cmp);
|
||||||
|
|
||||||
|
/* All bytes are zero. */
|
||||||
|
if (mask == (u32)-1) continue;
|
||||||
|
|
||||||
|
/* Look for nonzero bytes and check for new bits. */
|
||||||
|
if (!(mask & 0xff) && classify_word(current[0]) & virgin[0]) return 1;
|
||||||
|
if (!(mask & 0xff00) && classify_word(current[1]) & virgin[1]) return 1;
|
||||||
|
if (!(mask & 0xff0000) && classify_word(current[2]) & virgin[2]) return 1;
|
||||||
|
if (!(mask & 0xff000000) && classify_word(current[3]) & virgin[3]) return 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(PACK_SIZE)
|
||||||
|
#define PACK_SIZE 32
|
||||||
|
inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
|
||||||
|
|
||||||
|
for (; current != current_end; virgin += 4, current += 4) {
|
||||||
|
|
||||||
|
if (current[0] && classify_word(current[0]) & virgin[0]) return 1;
|
||||||
|
if (current[1] && classify_word(current[1]) & virgin[1]) return 1;
|
||||||
|
if (current[2] && classify_word(current[2]) & virgin[2]) return 1;
|
||||||
|
if (current[3] && classify_word(current[3]) & virgin[3]) return 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
@ -236,8 +236,8 @@ static void __afl_map_shm(void) {
|
|||||||
|
|
||||||
if (__afl_final_loc) {
|
if (__afl_final_loc) {
|
||||||
|
|
||||||
if (__afl_final_loc % 8)
|
if (__afl_final_loc % 32)
|
||||||
__afl_final_loc = (((__afl_final_loc + 7) >> 3) << 3);
|
__afl_final_loc = (((__afl_final_loc + 31) >> 5) << 5);
|
||||||
__afl_map_size = __afl_final_loc;
|
__afl_map_size = __afl_final_loc;
|
||||||
|
|
||||||
if (__afl_final_loc > MAP_SIZE) {
|
if (__afl_final_loc > MAP_SIZE) {
|
||||||
|
@ -641,11 +641,11 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
|
|||||||
|
|
||||||
if (!fsrv->map_size) { fsrv->map_size = MAP_SIZE; }
|
if (!fsrv->map_size) { fsrv->map_size = MAP_SIZE; }
|
||||||
|
|
||||||
if (unlikely(tmp_map_size % 8)) {
|
if (unlikely(tmp_map_size % 32)) {
|
||||||
|
|
||||||
// should not happen
|
// should not happen
|
||||||
WARNF("Target reported non-aligned map size of %u", tmp_map_size);
|
WARNF("Target reported non-aligned map size of %u", tmp_map_size);
|
||||||
tmp_map_size = (((tmp_map_size + 8) >> 3) << 3);
|
tmp_map_size = (((tmp_map_size + 31) >> 5) << 5);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,101 +49,6 @@ void write_bitmap(afl_state_t *afl) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check if the current execution path brings anything new to the table.
|
|
||||||
Update virgin bits to reflect the finds. Returns 1 if the only change is
|
|
||||||
the hit-count for a particular tuple; 2 if there are new tuples seen.
|
|
||||||
Updates the map, so subsequent calls will always return 0.
|
|
||||||
|
|
||||||
This function is called after every exec() on a fairly large buffer, so
|
|
||||||
it needs to be fast. We do this in 32-bit and 64-bit flavors. */
|
|
||||||
|
|
||||||
u8 __attribute__((hot)) has_new_bits(afl_state_t *afl, u8 *virgin_map) {
|
|
||||||
|
|
||||||
#ifdef WORD_SIZE_64
|
|
||||||
|
|
||||||
u64 *current = (u64 *)afl->fsrv.trace_bits;
|
|
||||||
u64 *virgin = (u64 *)virgin_map;
|
|
||||||
|
|
||||||
u32 i = (afl->fsrv.map_size >> 3);
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
u32 *current = (u32 *)afl->fsrv.trace_bits;
|
|
||||||
u32 *virgin = (u32 *)virgin_map;
|
|
||||||
|
|
||||||
u32 i = (afl->fsrv.map_size >> 2);
|
|
||||||
|
|
||||||
#endif /* ^WORD_SIZE_64 */
|
|
||||||
// the map size must be a minimum of 8 bytes.
|
|
||||||
// for variable/dynamic map sizes this is ensured in the forkserver
|
|
||||||
|
|
||||||
u8 ret = 0;
|
|
||||||
|
|
||||||
while (i--) {
|
|
||||||
|
|
||||||
/* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
|
|
||||||
that have not been already cleared from the virgin map - since this will
|
|
||||||
almost always be the case. */
|
|
||||||
|
|
||||||
// the (*current) is unnecessary but speeds up the overall comparison
|
|
||||||
if (unlikely(*current) && unlikely(*current & *virgin)) {
|
|
||||||
|
|
||||||
if (likely(ret < 2)) {
|
|
||||||
|
|
||||||
u8 *cur = (u8 *)current;
|
|
||||||
u8 *vir = (u8 *)virgin;
|
|
||||||
|
|
||||||
/* Looks like we have not found any new bytes yet; see if any non-zero
|
|
||||||
bytes in current[] are pristine in virgin[]. */
|
|
||||||
|
|
||||||
#ifdef WORD_SIZE_64
|
|
||||||
|
|
||||||
if (*virgin == 0xffffffffffffffff || (cur[0] && vir[0] == 0xff) ||
|
|
||||||
(cur[1] && vir[1] == 0xff) || (cur[2] && vir[2] == 0xff) ||
|
|
||||||
(cur[3] && vir[3] == 0xff) || (cur[4] && vir[4] == 0xff) ||
|
|
||||||
(cur[5] && vir[5] == 0xff) || (cur[6] && vir[6] == 0xff) ||
|
|
||||||
(cur[7] && vir[7] == 0xff)) {
|
|
||||||
|
|
||||||
ret = 2;
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
ret = 1;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
if (*virgin == 0xffffffff || (cur[0] && vir[0] == 0xff) ||
|
|
||||||
(cur[1] && vir[1] == 0xff) || (cur[2] && vir[2] == 0xff) ||
|
|
||||||
(cur[3] && vir[3] == 0xff))
|
|
||||||
ret = 2;
|
|
||||||
else
|
|
||||||
ret = 1;
|
|
||||||
|
|
||||||
#endif /* ^WORD_SIZE_64 */
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
*virgin &= ~*current;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
++current;
|
|
||||||
++virgin;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(ret) && likely(virgin_map == afl->virgin_bits)) {
|
|
||||||
|
|
||||||
afl->bitmap_changed = 1;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Count the number of bits set in the provided bitmap. Used for the status
|
/* Count the number of bits set in the provided bitmap. Used for the status
|
||||||
screen several times every second, does not have to be fast. */
|
screen several times every second, does not have to be fast. */
|
||||||
|
|
||||||
@ -242,77 +147,11 @@ const u8 simplify_lookup[256] = {
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef WORD_SIZE_64
|
|
||||||
|
|
||||||
void simplify_trace(afl_state_t *afl, u64 *mem) {
|
|
||||||
|
|
||||||
u32 i = (afl->fsrv.map_size >> 3);
|
|
||||||
|
|
||||||
while (i--) {
|
|
||||||
|
|
||||||
/* Optimize for sparse bitmaps. */
|
|
||||||
|
|
||||||
if (unlikely(*mem)) {
|
|
||||||
|
|
||||||
u8 *mem8 = (u8 *)mem;
|
|
||||||
|
|
||||||
mem8[0] = simplify_lookup[mem8[0]];
|
|
||||||
mem8[1] = simplify_lookup[mem8[1]];
|
|
||||||
mem8[2] = simplify_lookup[mem8[2]];
|
|
||||||
mem8[3] = simplify_lookup[mem8[3]];
|
|
||||||
mem8[4] = simplify_lookup[mem8[4]];
|
|
||||||
mem8[5] = simplify_lookup[mem8[5]];
|
|
||||||
mem8[6] = simplify_lookup[mem8[6]];
|
|
||||||
mem8[7] = simplify_lookup[mem8[7]];
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
*mem = 0x0101010101010101ULL;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
++mem;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
void simplify_trace(afl_state_t *afl, u32 *mem) {
|
|
||||||
|
|
||||||
u32 i = (afl->fsrv.map_size >> 2);
|
|
||||||
|
|
||||||
while (i--) {
|
|
||||||
|
|
||||||
/* Optimize for sparse bitmaps. */
|
|
||||||
|
|
||||||
if (unlikely(*mem)) {
|
|
||||||
|
|
||||||
u8 *mem8 = (u8 *)mem;
|
|
||||||
|
|
||||||
mem8[0] = simplify_lookup[mem8[0]];
|
|
||||||
mem8[1] = simplify_lookup[mem8[1]];
|
|
||||||
mem8[2] = simplify_lookup[mem8[2]];
|
|
||||||
mem8[3] = simplify_lookup[mem8[3]];
|
|
||||||
|
|
||||||
} else
|
|
||||||
|
|
||||||
*mem = 0x01010101;
|
|
||||||
|
|
||||||
++mem;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* ^WORD_SIZE_64 */
|
|
||||||
|
|
||||||
/* Destructively classify execution counts in a trace. This is used as a
|
/* Destructively classify execution counts in a trace. This is used as a
|
||||||
preprocessing step for any newly acquired traces. Called on every exec,
|
preprocessing step for any newly acquired traces. Called on every exec,
|
||||||
must be fast. */
|
must be fast. */
|
||||||
|
|
||||||
static const u8 count_class_lookup8[256] = {
|
const u8 count_class_lookup8[256] = {
|
||||||
|
|
||||||
[0] = 0,
|
[0] = 0,
|
||||||
[1] = 1,
|
[1] = 1,
|
||||||
@ -326,7 +165,7 @@ static const u8 count_class_lookup8[256] = {
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static u16 count_class_lookup16[65536];
|
u16 count_class_lookup16[65536];
|
||||||
|
|
||||||
void init_count_class16(void) {
|
void init_count_class16(void) {
|
||||||
|
|
||||||
@ -345,64 +184,88 @@ void init_count_class16(void) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Import coverage processing routines. */
|
||||||
|
|
||||||
|
#ifdef WORD_SIZE_64
|
||||||
|
#include "coverage-64.h"
|
||||||
|
#else
|
||||||
|
#include "coverage-32.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Check if the current execution path brings anything new to the table.
|
||||||
|
Update virgin bits to reflect the finds. Returns 1 if the only change is
|
||||||
|
the hit-count for a particular tuple; 2 if there are new tuples seen.
|
||||||
|
Updates the map, so subsequent calls will always return 0.
|
||||||
|
|
||||||
|
This function is called after every exec() on a fairly large buffer, so
|
||||||
|
it needs to be fast. We do this in 32-bit and 64-bit flavors. */
|
||||||
|
|
||||||
|
inline u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
|
||||||
|
|
||||||
#ifdef WORD_SIZE_64
|
#ifdef WORD_SIZE_64
|
||||||
|
|
||||||
void __attribute__((hot)) classify_counts(afl_forkserver_t *fsrv) {
|
u64 *current = (u64 *)afl->fsrv.trace_bits;
|
||||||
|
u64 *virgin = (u64 *)virgin_map;
|
||||||
|
|
||||||
u64 *mem = (u64 *)fsrv->trace_bits;
|
u32 i = (afl->fsrv.map_size >> 3);
|
||||||
|
|
||||||
u32 i = (fsrv->map_size >> 3);
|
|
||||||
|
|
||||||
while (i--) {
|
|
||||||
|
|
||||||
/* Optimize for sparse bitmaps. */
|
|
||||||
|
|
||||||
if (unlikely(*mem)) {
|
|
||||||
|
|
||||||
u16 *mem16 = (u16 *)mem;
|
|
||||||
|
|
||||||
mem16[0] = count_class_lookup16[mem16[0]];
|
|
||||||
mem16[1] = count_class_lookup16[mem16[1]];
|
|
||||||
mem16[2] = count_class_lookup16[mem16[2]];
|
|
||||||
mem16[3] = count_class_lookup16[mem16[3]];
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
++mem;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
void __attribute__((hot)) classify_counts(afl_forkserver_t *fsrv) {
|
u32 *current = (u32 *)afl->fsrv.trace_bits;
|
||||||
|
u32 *virgin = (u32 *)virgin_map;
|
||||||
|
|
||||||
u32 *mem = (u32 *)fsrv->trace_bits;
|
u32 i = (afl->fsrv.map_size >> 2);
|
||||||
|
|
||||||
u32 i = (fsrv->map_size >> 2);
|
|
||||||
|
|
||||||
while (i--) {
|
|
||||||
|
|
||||||
/* Optimize for sparse bitmaps. */
|
|
||||||
|
|
||||||
if (unlikely(*mem)) {
|
|
||||||
|
|
||||||
u16 *mem16 = (u16 *)mem;
|
|
||||||
|
|
||||||
mem16[0] = count_class_lookup16[mem16[0]];
|
|
||||||
mem16[1] = count_class_lookup16[mem16[1]];
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
++mem;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* ^WORD_SIZE_64 */
|
#endif /* ^WORD_SIZE_64 */
|
||||||
|
|
||||||
|
u8 ret = 0;
|
||||||
|
while (i--) {
|
||||||
|
|
||||||
|
if (unlikely(*current)) discover_word(&ret, current, virgin);
|
||||||
|
|
||||||
|
current++;
|
||||||
|
virgin++;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(ret) && likely(virgin_map == afl->virgin_bits))
|
||||||
|
afl->bitmap_changed = 1;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* A combination of classify_counts and has_new_bits. If 0 is returned, then the
|
||||||
|
* trace bits are kept as-is. Otherwise, the trace bits are overwritten with
|
||||||
|
* classified values.
|
||||||
|
*
|
||||||
|
* This accelerates the processing: in most cases, no interesting behavior
|
||||||
|
* happen, and the trace bits will be discarded soon. This function optimizes
|
||||||
|
* for such cases: one-pass scan on trace bits without modifying anything. Only
|
||||||
|
* on rare cases it fall backs to the slow path: classify_counts() first, then
|
||||||
|
* return has_new_bits(). */
|
||||||
|
|
||||||
|
inline u8 has_new_bits_unclassified(afl_state_t *afl, u8 *virgin_map) {
|
||||||
|
|
||||||
|
/* Handle the hot path first: no new coverage */
|
||||||
|
u8 *end = afl->fsrv.trace_bits + afl->fsrv.map_size;
|
||||||
|
|
||||||
|
#ifdef WORD_SIZE_64
|
||||||
|
|
||||||
|
if (!skim((u64 *)virgin_map, (u64 *)afl->fsrv.trace_bits, (u64 *)end))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
if (!skim((u32 *)virgin_map, (u32 *)afl->fsrv.trace_bits, (u32 *)end))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
#endif /* ^WORD_SIZE_64 */
|
||||||
|
classify_counts(&afl->fsrv);
|
||||||
|
return has_new_bits(afl, virgin_map);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/* Compact trace bytes into a smaller bitmap. We effectively just drop the
|
/* Compact trace bytes into a smaller bitmap. We effectively just drop the
|
||||||
count information here. This is called only sporadically, for some
|
count information here. This is called only sporadically, for some
|
||||||
new paths. */
|
new paths. */
|
||||||
@ -581,7 +444,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
|
|||||||
u8 *queue_fn = "";
|
u8 *queue_fn = "";
|
||||||
u8 new_bits = '\0';
|
u8 new_bits = '\0';
|
||||||
s32 fd;
|
s32 fd;
|
||||||
u8 keeping = 0, res;
|
u8 keeping = 0, res, classified = 0;
|
||||||
u64 cksum = 0;
|
u64 cksum = 0;
|
||||||
|
|
||||||
u8 fn[PATH_MAX];
|
u8 fn[PATH_MAX];
|
||||||
@ -605,13 +468,17 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
|
|||||||
/* Keep only if there are new bits in the map, add to queue for
|
/* Keep only if there are new bits in the map, add to queue for
|
||||||
future fuzzing, etc. */
|
future fuzzing, etc. */
|
||||||
|
|
||||||
if (!(new_bits = has_new_bits(afl, afl->virgin_bits))) {
|
new_bits = has_new_bits_unclassified(afl, afl->virgin_bits);
|
||||||
|
|
||||||
|
if (likely(!new_bits)) {
|
||||||
|
|
||||||
if (unlikely(afl->crash_mode)) { ++afl->total_crashes; }
|
if (unlikely(afl->crash_mode)) { ++afl->total_crashes; }
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
classified = new_bits;
|
||||||
|
|
||||||
#ifndef SIMPLE_FILES
|
#ifndef SIMPLE_FILES
|
||||||
|
|
||||||
queue_fn = alloc_printf(
|
queue_fn = alloc_printf(
|
||||||
@ -715,11 +582,14 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
|
|||||||
|
|
||||||
if (likely(!afl->non_instrumented_mode)) {
|
if (likely(!afl->non_instrumented_mode)) {
|
||||||
|
|
||||||
#ifdef WORD_SIZE_64
|
if (!classified) {
|
||||||
simplify_trace(afl, (u64 *)afl->fsrv.trace_bits);
|
|
||||||
#else
|
classify_counts(&afl->fsrv);
|
||||||
simplify_trace(afl, (u32 *)afl->fsrv.trace_bits);
|
classified = 1;
|
||||||
#endif /* ^WORD_SIZE_64 */
|
|
||||||
|
}
|
||||||
|
|
||||||
|
simplify_trace(afl, afl->fsrv.trace_bits);
|
||||||
|
|
||||||
if (!has_new_bits(afl, afl->virgin_tmout)) { return keeping; }
|
if (!has_new_bits(afl, afl->virgin_tmout)) { return keeping; }
|
||||||
|
|
||||||
@ -764,6 +634,7 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
|
|||||||
u8 new_fault;
|
u8 new_fault;
|
||||||
write_to_testcase(afl, mem, len);
|
write_to_testcase(afl, mem, len);
|
||||||
new_fault = fuzz_run_target(afl, &afl->fsrv, afl->hang_tmout);
|
new_fault = fuzz_run_target(afl, &afl->fsrv, afl->hang_tmout);
|
||||||
|
classify_counts(&afl->fsrv);
|
||||||
|
|
||||||
/* A corner case that one user reported bumping into: increasing the
|
/* A corner case that one user reported bumping into: increasing the
|
||||||
timeout actually uncovers a crash. Make sure we don't discard it if
|
timeout actually uncovers a crash. Make sure we don't discard it if
|
||||||
@ -812,11 +683,14 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
|
|||||||
|
|
||||||
if (likely(!afl->non_instrumented_mode)) {
|
if (likely(!afl->non_instrumented_mode)) {
|
||||||
|
|
||||||
#ifdef WORD_SIZE_64
|
if (!classified) {
|
||||||
simplify_trace(afl, (u64 *)afl->fsrv.trace_bits);
|
|
||||||
#else
|
classify_counts(&afl->fsrv);
|
||||||
simplify_trace(afl, (u32 *)afl->fsrv.trace_bits);
|
classified = 1;
|
||||||
#endif /* ^WORD_SIZE_64 */
|
|
||||||
|
}
|
||||||
|
|
||||||
|
simplify_trace(afl, afl->fsrv.trace_bits);
|
||||||
|
|
||||||
if (!has_new_bits(afl, afl->virgin_crash)) { return keeping; }
|
if (!has_new_bits(afl, afl->virgin_crash)) { return keeping; }
|
||||||
|
|
||||||
|
@ -62,8 +62,6 @@ fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv, u32 timeout) {
|
|||||||
time_spent_start = (spec.tv_sec * 1000000000) + spec.tv_nsec;
|
time_spent_start = (spec.tv_sec * 1000000000) + spec.tv_nsec;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// TODO: Don't classify for faults?
|
|
||||||
classify_counts(fsrv);
|
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -379,6 +377,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
classify_counts(&afl->fsrv);
|
||||||
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
|
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
|
||||||
if (q->exec_cksum != cksum) {
|
if (q->exec_cksum != cksum) {
|
||||||
|
|
||||||
@ -767,13 +766,14 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
|
|||||||
write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
|
write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
|
||||||
|
|
||||||
fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
|
fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
|
||||||
++afl->trim_execs;
|
|
||||||
|
|
||||||
if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
|
if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
|
||||||
|
|
||||||
/* Note that we don't keep track of crashes or hangs here; maybe TODO?
|
/* Note that we don't keep track of crashes or hangs here; maybe TODO?
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
++afl->trim_execs;
|
||||||
|
classify_counts(&afl->fsrv);
|
||||||
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
|
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
|
||||||
|
|
||||||
/* If the deletion had no impact on the trace, make it permanent. This
|
/* If the deletion had no impact on the trace, make it permanent. This
|
||||||
|
@ -27,46 +27,50 @@
|
|||||||
#include "xxhash.h"
|
#include "xxhash.h"
|
||||||
#undef XXH_INLINE_ALL
|
#undef XXH_INLINE_ALL
|
||||||
|
|
||||||
/* we use xoshiro256** instead of rand/random because it is 10x faster and has
|
|
||||||
better randomness properties. */
|
|
||||||
|
|
||||||
static inline uint64_t rotl(const uint64_t x, int k) {
|
|
||||||
|
|
||||||
return (x << k) | (x >> (64 - k));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void rand_set_seed(afl_state_t *afl, s64 init_seed) {
|
void rand_set_seed(afl_state_t *afl, s64 init_seed) {
|
||||||
|
|
||||||
afl->init_seed = init_seed;
|
afl->init_seed = init_seed;
|
||||||
afl->rand_seed[0] =
|
afl->rand_seed[0] =
|
||||||
hash64((u8 *)&afl->init_seed, sizeof(afl->init_seed), HASH_CONST);
|
hash64((u8 *)&afl->init_seed, sizeof(afl->init_seed), HASH_CONST);
|
||||||
afl->rand_seed[1] = afl->rand_seed[0] ^ 0x1234567890abcdef;
|
afl->rand_seed[1] = afl->rand_seed[0] ^ 0x1234567890abcdef;
|
||||||
afl->rand_seed[2] = afl->rand_seed[0] & 0x0123456789abcdef;
|
afl->rand_seed[2] = (afl->rand_seed[0] & 0x1234567890abcdef) ^
|
||||||
afl->rand_seed[3] = afl->rand_seed[0] | 0x01abcde43f567908;
|
(afl->rand_seed[1] | 0xfedcba9876543210);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline uint64_t rand_next(afl_state_t *afl) {
|
#define ROTL(d, lrot) ((d << (lrot)) | (d >> (8 * sizeof(d) - (lrot))))
|
||||||
|
|
||||||
const uint64_t result =
|
#ifdef WORD_SIZE_64
|
||||||
rotl(afl->rand_seed[0] + afl->rand_seed[3], 23) + afl->rand_seed[0];
|
// romuDuoJr
|
||||||
|
inline AFL_RAND_RETURN rand_next(afl_state_t *afl) {
|
||||||
|
|
||||||
const uint64_t t = afl->rand_seed[1] << 17;
|
AFL_RAND_RETURN xp = afl->rand_seed[0];
|
||||||
|
afl->rand_seed[0] = 15241094284759029579u * afl->rand_seed[1];
|
||||||
afl->rand_seed[2] ^= afl->rand_seed[0];
|
afl->rand_seed[1] = afl->rand_seed[1] - xp;
|
||||||
afl->rand_seed[3] ^= afl->rand_seed[1];
|
afl->rand_seed[1] = ROTL(afl->rand_seed[1], 27);
|
||||||
afl->rand_seed[1] ^= afl->rand_seed[2];
|
return xp;
|
||||||
afl->rand_seed[0] ^= afl->rand_seed[3];
|
|
||||||
|
|
||||||
afl->rand_seed[2] ^= t;
|
|
||||||
|
|
||||||
afl->rand_seed[3] = rotl(afl->rand_seed[3], 45);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
// RomuTrio32
|
||||||
|
inline AFL_RAND_RETURN rand_next(afl_state_t *afl) {
|
||||||
|
|
||||||
|
AFL_RAND_RETURN xp = afl->rand_seed[0], yp = afl->rand_seed[1],
|
||||||
|
zp = afl->rand_seed[2];
|
||||||
|
afl->rand_seed[0] = 3323815723u * zp;
|
||||||
|
afl->rand_seed[1] = yp - xp;
|
||||||
|
afl->rand_seed[1] = ROTL(afl->rand_seed[1], 6);
|
||||||
|
afl->rand_seed[2] = zp - yp;
|
||||||
|
afl->rand_seed[2] = ROTL(afl->rand_seed[2], 22);
|
||||||
|
return xp;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#undef ROTL
|
||||||
|
|
||||||
/* returns a double between 0.000000000 and 1.000000000 */
|
/* returns a double between 0.000000000 and 1.000000000 */
|
||||||
|
|
||||||
inline double rand_next_percent(afl_state_t *afl) {
|
inline double rand_next_percent(afl_state_t *afl) {
|
||||||
@ -75,80 +79,6 @@ inline double rand_next_percent(afl_state_t *afl) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is the jump function for the generator. It is equivalent
|
|
||||||
to 2^128 calls to rand_next(); it can be used to generate 2^128
|
|
||||||
non-overlapping subsequences for parallel computations. */
|
|
||||||
|
|
||||||
void jump(afl_state_t *afl) {
|
|
||||||
|
|
||||||
static const uint64_t JUMP[] = {0x180ec6d33cfd0aba, 0xd5a61266f0c9392c,
|
|
||||||
0xa9582618e03fc9aa, 0x39abdc4529b1661c};
|
|
||||||
size_t i, b;
|
|
||||||
uint64_t s0 = 0;
|
|
||||||
uint64_t s1 = 0;
|
|
||||||
uint64_t s2 = 0;
|
|
||||||
uint64_t s3 = 0;
|
|
||||||
for (i = 0; i < (sizeof(JUMP) / sizeof(*JUMP)); i++)
|
|
||||||
for (b = 0; b < 64; b++) {
|
|
||||||
|
|
||||||
if (JUMP[i] & UINT64_C(1) << b) {
|
|
||||||
|
|
||||||
s0 ^= afl->rand_seed[0];
|
|
||||||
s1 ^= afl->rand_seed[1];
|
|
||||||
s2 ^= afl->rand_seed[2];
|
|
||||||
s3 ^= afl->rand_seed[3];
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
rand_next(afl);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
afl->rand_seed[0] = s0;
|
|
||||||
afl->rand_seed[1] = s1;
|
|
||||||
afl->rand_seed[2] = s2;
|
|
||||||
afl->rand_seed[3] = s3;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This is the long-jump function for the generator. It is equivalent to
|
|
||||||
2^192 calls to rand_next(); it can be used to generate 2^64 starting points,
|
|
||||||
from each of which jump() will generate 2^64 non-overlapping
|
|
||||||
subsequences for parallel distributed computations. */
|
|
||||||
|
|
||||||
void long_jump(afl_state_t *afl) {
|
|
||||||
|
|
||||||
static const uint64_t LONG_JUMP[] = {0x76e15d3efefdcbbf, 0xc5004e441c522fb3,
|
|
||||||
0x77710069854ee241, 0x39109bb02acbe635};
|
|
||||||
|
|
||||||
size_t i, b;
|
|
||||||
uint64_t s0 = 0;
|
|
||||||
uint64_t s1 = 0;
|
|
||||||
uint64_t s2 = 0;
|
|
||||||
uint64_t s3 = 0;
|
|
||||||
for (i = 0; i < (sizeof(LONG_JUMP) / sizeof(*LONG_JUMP)); i++)
|
|
||||||
for (b = 0; b < 64; b++) {
|
|
||||||
|
|
||||||
if (LONG_JUMP[i] & UINT64_C(1) << b) {
|
|
||||||
|
|
||||||
s0 ^= afl->rand_seed[0];
|
|
||||||
s1 ^= afl->rand_seed[1];
|
|
||||||
s2 ^= afl->rand_seed[2];
|
|
||||||
s3 ^= afl->rand_seed[3];
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
rand_next(afl);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
afl->rand_seed[0] = s0;
|
|
||||||
afl->rand_seed[1] = s1;
|
|
||||||
afl->rand_seed[2] = s2;
|
|
||||||
afl->rand_seed[3] = s3;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* we switch from afl's murmur implementation to xxh3 as it is 30% faster -
|
/* we switch from afl's murmur implementation to xxh3 as it is 30% faster -
|
||||||
and get 64 bit hashes instead of just 32 bit. Less collisions! :-) */
|
and get 64 bit hashes instead of just 32 bit. Less collisions! :-) */
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user