mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-13 10:38:07 +00:00
run code formatter
This commit is contained in:
@ -43,7 +43,6 @@ void mark_as_det_done(struct queue_entry* q) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* Mark as variable. Create symlinks if possible to make it easier to examine
|
||||
the files. */
|
||||
|
||||
@ -69,7 +68,6 @@ void mark_as_variable(struct queue_entry* q) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
|
||||
but may be useful for post-processing datasets. */
|
||||
|
||||
@ -102,18 +100,17 @@ void mark_as_redundant(struct queue_entry* q, u8 state) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* Append new test case to the queue. */
|
||||
|
||||
void add_to_queue(u8* fname, u32 len, u8 passed_det) {
|
||||
|
||||
struct queue_entry* q = ck_alloc(sizeof(struct queue_entry));
|
||||
|
||||
q->fname = fname;
|
||||
q->len = len;
|
||||
q->depth = cur_depth + 1;
|
||||
q->passed_det = passed_det;
|
||||
q->n_fuzz = 1;
|
||||
q->fname = fname;
|
||||
q->len = len;
|
||||
q->depth = cur_depth + 1;
|
||||
q->passed_det = passed_det;
|
||||
q->n_fuzz = 1;
|
||||
|
||||
if (q->depth > max_depth) max_depth = q->depth;
|
||||
|
||||
@ -122,7 +119,9 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) {
|
||||
queue_top->next = q;
|
||||
queue_top = q;
|
||||
|
||||
} else q_prev100 = queue = queue_top = q;
|
||||
} else
|
||||
|
||||
q_prev100 = queue = queue_top = q;
|
||||
|
||||
++queued_paths;
|
||||
++pending_not_fuzzed;
|
||||
@ -140,7 +139,6 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* Destroy the entire queue. */
|
||||
|
||||
void destroy_queue(void) {
|
||||
@ -159,7 +157,6 @@ void destroy_queue(void) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* When we bump into a new path, we call this to see if the path appears
|
||||
more "favorable" than any of the existing ones. The purpose of the
|
||||
"favorables" is to have a minimal set of paths that trigger all the bits
|
||||
@ -170,12 +167,11 @@ void destroy_queue(void) {
|
||||
for every byte in the bitmap. We win that slot if there is no previous
|
||||
contender, or if the contender has a more favorable speed x size factor. */
|
||||
|
||||
|
||||
void update_bitmap_score(struct queue_entry* q) {
|
||||
|
||||
u32 i;
|
||||
u64 fav_factor = q->exec_us * q->len;
|
||||
u64 fuzz_p2 = next_p2 (q->n_fuzz);
|
||||
u64 fuzz_p2 = next_p2(q->n_fuzz);
|
||||
|
||||
/* For every byte set in trace_bits[], see if there is a previous winner,
|
||||
and how it compares to us. */
|
||||
@ -184,48 +180,54 @@ void update_bitmap_score(struct queue_entry* q) {
|
||||
|
||||
if (trace_bits[i]) {
|
||||
|
||||
if (top_rated[i]) {
|
||||
if (top_rated[i]) {
|
||||
|
||||
/* Faster-executing or smaller test cases are favored. */
|
||||
u64 top_rated_fuzz_p2 = next_p2 (top_rated[i]->n_fuzz);
|
||||
u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
|
||||
/* Faster-executing or smaller test cases are favored. */
|
||||
u64 top_rated_fuzz_p2 = next_p2(top_rated[i]->n_fuzz);
|
||||
u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
|
||||
|
||||
if (fuzz_p2 > top_rated_fuzz_p2) {
|
||||
continue;
|
||||
} else if (fuzz_p2 == top_rated_fuzz_p2) {
|
||||
if (fav_factor > top_rated_fav_factor)
|
||||
continue;
|
||||
}
|
||||
if (fuzz_p2 > top_rated_fuzz_p2) {
|
||||
|
||||
if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
|
||||
continue;
|
||||
|
||||
/* Looks like we're going to win. Decrease ref count for the
|
||||
previous winner, discard its trace_bits[] if necessary. */
|
||||
} else if (fuzz_p2 == top_rated_fuzz_p2) {
|
||||
|
||||
if (!--top_rated[i]->tc_ref) {
|
||||
ck_free(top_rated[i]->trace_mini);
|
||||
top_rated[i]->trace_mini = 0;
|
||||
}
|
||||
if (fav_factor > top_rated_fav_factor) continue;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/* Insert ourselves as the new winner. */
|
||||
if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
|
||||
|
||||
top_rated[i] = q;
|
||||
++q->tc_ref;
|
||||
/* Looks like we're going to win. Decrease ref count for the
|
||||
previous winner, discard its trace_bits[] if necessary. */
|
||||
|
||||
if (!q->trace_mini) {
|
||||
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
|
||||
minimize_bits(q->trace_mini, trace_bits);
|
||||
}
|
||||
if (!--top_rated[i]->tc_ref) {
|
||||
|
||||
score_changed = 1;
|
||||
ck_free(top_rated[i]->trace_mini);
|
||||
top_rated[i]->trace_mini = 0;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Insert ourselves as the new winner. */
|
||||
|
||||
top_rated[i] = q;
|
||||
++q->tc_ref;
|
||||
|
||||
if (!q->trace_mini) {
|
||||
|
||||
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
|
||||
minimize_bits(q->trace_mini, trace_bits);
|
||||
|
||||
}
|
||||
|
||||
score_changed = 1;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* The second part of the mechanism discussed above is a routine that
|
||||
goes over top_rated[] entries, and then sequentially grabs winners for
|
||||
previously-unseen bytes (temp_v) and marks them as favored, at least
|
||||
@ -235,8 +237,8 @@ void update_bitmap_score(struct queue_entry* q) {
|
||||
void cull_queue(void) {
|
||||
|
||||
struct queue_entry* q;
|
||||
static u8 temp_v[MAP_SIZE >> 3];
|
||||
u32 i;
|
||||
static u8 temp_v[MAP_SIZE >> 3];
|
||||
u32 i;
|
||||
|
||||
if (dumb_mode || !score_changed) return;
|
||||
|
||||
@ -244,14 +246,16 @@ void cull_queue(void) {
|
||||
|
||||
memset(temp_v, 255, MAP_SIZE >> 3);
|
||||
|
||||
queued_favored = 0;
|
||||
queued_favored = 0;
|
||||
pending_favored = 0;
|
||||
|
||||
q = queue;
|
||||
|
||||
while (q) {
|
||||
|
||||
q->favored = 0;
|
||||
q = q->next;
|
||||
|
||||
}
|
||||
|
||||
/* Let's see if anything in the bitmap isn't captured in temp_v.
|
||||
@ -264,27 +268,29 @@ void cull_queue(void) {
|
||||
|
||||
/* Remove all bits belonging to the current entry from temp_v. */
|
||||
|
||||
while (j--)
|
||||
while (j--)
|
||||
if (top_rated[i]->trace_mini[j])
|
||||
temp_v[j] &= ~top_rated[i]->trace_mini[j];
|
||||
|
||||
top_rated[i]->favored = 1;
|
||||
++queued_favored;
|
||||
|
||||
if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored;
|
||||
if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed)
|
||||
++pending_favored;
|
||||
|
||||
}
|
||||
|
||||
q = queue;
|
||||
|
||||
while (q) {
|
||||
|
||||
mark_as_redundant(q, !q->favored);
|
||||
q = q->next;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* Calculate case desirability score to adjust the length of havoc fuzzing.
|
||||
A helper function for fuzz_one(). Maybe some of these constants should
|
||||
go into config.h. */
|
||||
@ -305,34 +311,51 @@ u32 calculate_score(struct queue_entry* q) {
|
||||
// Longer execution time means longer work on the input, the deeper in
|
||||
// coverage, the better the fuzzing, right? -mh
|
||||
|
||||
if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10;
|
||||
else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25;
|
||||
else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50;
|
||||
else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75;
|
||||
else if (q->exec_us * 4 < avg_exec_us) perf_score = 300;
|
||||
else if (q->exec_us * 3 < avg_exec_us) perf_score = 200;
|
||||
else if (q->exec_us * 2 < avg_exec_us) perf_score = 150;
|
||||
if (q->exec_us * 0.1 > avg_exec_us)
|
||||
perf_score = 10;
|
||||
else if (q->exec_us * 0.25 > avg_exec_us)
|
||||
perf_score = 25;
|
||||
else if (q->exec_us * 0.5 > avg_exec_us)
|
||||
perf_score = 50;
|
||||
else if (q->exec_us * 0.75 > avg_exec_us)
|
||||
perf_score = 75;
|
||||
else if (q->exec_us * 4 < avg_exec_us)
|
||||
perf_score = 300;
|
||||
else if (q->exec_us * 3 < avg_exec_us)
|
||||
perf_score = 200;
|
||||
else if (q->exec_us * 2 < avg_exec_us)
|
||||
perf_score = 150;
|
||||
|
||||
/* Adjust score based on bitmap size. The working theory is that better
|
||||
coverage translates to better targets. Multiplier from 0.25x to 3x. */
|
||||
|
||||
if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3;
|
||||
else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2;
|
||||
else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5;
|
||||
else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25;
|
||||
else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5;
|
||||
else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75;
|
||||
if (q->bitmap_size * 0.3 > avg_bitmap_size)
|
||||
perf_score *= 3;
|
||||
else if (q->bitmap_size * 0.5 > avg_bitmap_size)
|
||||
perf_score *= 2;
|
||||
else if (q->bitmap_size * 0.75 > avg_bitmap_size)
|
||||
perf_score *= 1.5;
|
||||
else if (q->bitmap_size * 3 < avg_bitmap_size)
|
||||
perf_score *= 0.25;
|
||||
else if (q->bitmap_size * 2 < avg_bitmap_size)
|
||||
perf_score *= 0.5;
|
||||
else if (q->bitmap_size * 1.5 < avg_bitmap_size)
|
||||
perf_score *= 0.75;
|
||||
|
||||
/* Adjust score based on handicap. Handicap is proportional to how late
|
||||
in the game we learned about this path. Latecomers are allowed to run
|
||||
for a bit longer until they catch up with the rest. */
|
||||
|
||||
if (q->handicap >= 4) {
|
||||
|
||||
perf_score *= 4;
|
||||
q->handicap -= 4;
|
||||
|
||||
} else if (q->handicap) {
|
||||
|
||||
perf_score *= 2;
|
||||
--q->handicap;
|
||||
|
||||
}
|
||||
|
||||
/* Final adjustment based on input depth, under the assumption that fuzzing
|
||||
@ -341,11 +364,11 @@ u32 calculate_score(struct queue_entry* q) {
|
||||
|
||||
switch (q->depth) {
|
||||
|
||||
case 0 ... 3: break;
|
||||
case 4 ... 7: perf_score *= 2; break;
|
||||
case 8 ... 13: perf_score *= 3; break;
|
||||
case 0 ... 3: break;
|
||||
case 4 ... 7: perf_score *= 2; break;
|
||||
case 8 ... 13: perf_score *= 3; break;
|
||||
case 14 ... 25: perf_score *= 4; break;
|
||||
default: perf_score *= 5;
|
||||
default: perf_score *= 5;
|
||||
|
||||
}
|
||||
|
||||
@ -357,61 +380,69 @@ u32 calculate_score(struct queue_entry* q) {
|
||||
|
||||
switch (schedule) {
|
||||
|
||||
case EXPLORE:
|
||||
break;
|
||||
case EXPLORE: break;
|
||||
|
||||
case EXPLOIT:
|
||||
factor = MAX_FACTOR;
|
||||
break;
|
||||
case EXPLOIT: factor = MAX_FACTOR; break;
|
||||
|
||||
case COE:
|
||||
fuzz_total = 0;
|
||||
n_paths = 0;
|
||||
|
||||
struct queue_entry *queue_it = queue;
|
||||
struct queue_entry* queue_it = queue;
|
||||
while (queue_it) {
|
||||
|
||||
fuzz_total += queue_it->n_fuzz;
|
||||
n_paths ++;
|
||||
n_paths++;
|
||||
queue_it = queue_it->next;
|
||||
|
||||
}
|
||||
|
||||
fuzz_mu = fuzz_total / n_paths;
|
||||
if (fuzz <= fuzz_mu) {
|
||||
|
||||
if (q->fuzz_level < 16)
|
||||
factor = ((u32) (1 << q->fuzz_level));
|
||||
factor = ((u32)(1 << q->fuzz_level));
|
||||
else
|
||||
factor = MAX_FACTOR;
|
||||
|
||||
} else {
|
||||
|
||||
factor = 0;
|
||||
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case FAST:
|
||||
if (q->fuzz_level < 16) {
|
||||
factor = ((u32) (1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
|
||||
|
||||
factor = ((u32)(1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
|
||||
|
||||
} else
|
||||
factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2 (fuzz));
|
||||
|
||||
factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2(fuzz));
|
||||
break;
|
||||
|
||||
case LIN:
|
||||
factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
|
||||
break;
|
||||
case LIN: factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); break;
|
||||
|
||||
case QUAD:
|
||||
factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
|
||||
break;
|
||||
|
||||
default:
|
||||
PFATAL ("Unknown Power Schedule");
|
||||
default: PFATAL("Unknown Power Schedule");
|
||||
|
||||
}
|
||||
if (factor > MAX_FACTOR)
|
||||
factor = MAX_FACTOR;
|
||||
|
||||
if (factor > MAX_FACTOR) factor = MAX_FACTOR;
|
||||
|
||||
perf_score *= factor / POWER_BETA;
|
||||
|
||||
// MOpt mode
|
||||
if (limit_time_sig != 0 && max_depth - q->depth < 3) perf_score *= 2;
|
||||
else if (perf_score < 1) perf_score = 1; // Add a lower bound to AFLFast's energy assignment strategies
|
||||
if (limit_time_sig != 0 && max_depth - q->depth < 3)
|
||||
perf_score *= 2;
|
||||
else if (perf_score < 1)
|
||||
perf_score =
|
||||
1; // Add a lower bound to AFLFast's energy assignment strategies
|
||||
|
||||
/* Make sure that we don't go over limit. */
|
||||
|
||||
@ -420,3 +451,4 @@ u32 calculate_score(struct queue_entry* q) {
|
||||
return perf_score;
|
||||
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user