improve performance for default power schedule

This commit is contained in:
van Hauser 2020-06-15 20:02:28 +02:00
parent af8d4d49ed
commit ada59feda8
5 changed files with 52 additions and 29 deletions

View File

@ -16,6 +16,7 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
that ensure exactly one main node is present and warn otherwise
- If no main node is present at a sync one secondary node automatically
becomes a temporary main node until a real main nodes shows up
- Fixed a mayor performance issue we inherited from AFLfast
- switched murmur2 hashing and random() for xxh3 and xoshiro256**,
resulting in an up to 5.5% speed increase
- Resizing the window does not crash afl-fuzz anymore

View File

@ -223,13 +223,13 @@ enum {
enum {
/* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */
/* 01 */ FAST, /* Exponential schedule */
/* 02 */ COE, /* Cut-Off Exponential schedule */
/* 03 */ LIN, /* Linear schedule */
/* 04 */ QUAD, /* Quadratic schedule */
/* 05 */ EXPLOIT, /* AFL's exploitation-based const. */
/* 06 */ MMOPT, /* Modified MOPT schedule */
/* 07 */ RARE, /* Rare edges */
/* 01 */ EXPLOIT, /* AFL's exploitation-based const. */
/* 02 */ FAST, /* Exponential schedule */
/* 03 */ COE, /* Cut-Off Exponential schedule */
/* 04 */ LIN, /* Linear schedule */
/* 05 */ QUAD, /* Quadratic schedule */
/* 06 */ RARE, /* Rare edges */
/* 07 */ MMOPT, /* Modified MOPT schedule */
POWER_SCHEDULES_NUM

View File

@ -542,24 +542,32 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
u8 hnb = '\0';
s32 fd;
u8 keeping = 0, res;
u64 cksum = 0;
u8 fn[PATH_MAX];
/* Update path frequency. */
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
struct queue_entry *q = afl->queue;
while (q) {
/* Generating a hash on every input is super expensive. Bad idea and should
only be used for special schedules */
if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
if (q->exec_cksum == cksum) {
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
q->n_fuzz = q->n_fuzz + 1;
break;
struct queue_entry *q = afl->queue;
while (q) {
if (q->exec_cksum == cksum) {
q->n_fuzz = q->n_fuzz + 1;
break;
}
q = q->next;
}
q = q->next;
}
if (unlikely(fault == afl->crash_mode)) {
@ -595,7 +603,11 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
}
afl->queue_top->exec_cksum = cksum;
if (cksum)
afl->queue_top->exec_cksum = cksum;
else
afl->queue_top->exec_cksum =
hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
/* Try to calibrate inline; this also calls update_bitmap_score() when
successful. */

View File

@ -566,12 +566,10 @@ u8 fuzz_one_original(afl_state_t *afl) {
if it has gone through deterministic testing in earlier, resumed runs
(passed_det). */
if (afl->skip_deterministic ||
((!afl->queue_cur->passed_det) &&
perf_score < (afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100
? afl->queue_cur->depth * 30
: afl->havoc_max_mult * 100)) ||
afl->queue_cur->passed_det) {
if (likely(afl->queue_cur->passed_det) || likely(afl->skip_deterministic)
|| likely(perf_score <
(afl->queue_cur->depth * 30 <= afl->havoc_max_mult * 100 ?
afl->queue_cur->depth * 30 : afl->havoc_max_mult * 100))) {
goto custom_mutator_stage;

View File

@ -194,9 +194,14 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
u32 i;
u64 fav_factor;
u64 fuzz_p2 = next_pow2(q->n_fuzz);
u64 fuzz_p2;
if (afl->schedule == MMOPT || afl->schedule == RARE ||
if (unlikely(afl->schedule >= FAST))
fuzz_p2 = next_pow2(q->n_fuzz);
else
fuzz_p2 = q->fuzz_level;
if (unlikely(afl->schedule == MMOPT || afl->schedule == RARE) ||
unlikely(afl->fixed_seed)) {
fav_factor = q->len << 2;
@ -217,9 +222,13 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
/* Faster-executing or smaller test cases are favored. */
u64 top_rated_fav_factor;
u64 top_rated_fuzz_p2 = next_pow2(afl->top_rated[i]->n_fuzz);
u64 top_rated_fuzz_p2;
if (unlikely(afl->schedule >= FAST))
top_rated_fuzz_p2 = next_pow2(afl->top_rated[i]->n_fuzz);
else
top_rated_fuzz_p2 = afl->top_rated[i]->fuzz_level;
if (afl->schedule == MMOPT || afl->schedule == RARE ||
if (unlikely(afl->schedule == MMOPT || afl->schedule == RARE) ||
unlikely(afl->fixed_seed)) {
top_rated_fav_factor = afl->top_rated[i]->len << 2;
@ -241,7 +250,7 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
}
if (afl->schedule == MMOPT || afl->schedule == RARE ||
if (unlikely(afl->schedule == MMOPT || afl->schedule == RARE) ||
unlikely(afl->fixed_seed)) {
if (fav_factor > afl->top_rated[i]->len << 2) { continue; }
@ -593,9 +602,12 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
}
if (factor > MAX_FACTOR) { factor = MAX_FACTOR; }
if (unlikely(afl->schedule >= FAST)) {
perf_score *= factor / POWER_BETA;
if (factor > MAX_FACTOR) { factor = MAX_FACTOR; }
perf_score *= factor / POWER_BETA;
}
// MOpt mode
if (afl->limit_time_sig != 0 && afl->max_depth - q->depth < 3) {