fix -Z, remove q->next

This commit is contained in:
aflpp
2021-01-31 17:29:37 +01:00
parent 9d08f0d098
commit e5116c6d55
12 changed files with 100 additions and 143 deletions

View File

@ -154,6 +154,7 @@ struct queue_entry {
u8 *fname; /* File name for the test case */
u32 len; /* Input length */
u32 id; /* entry number in queue_buf */
u8 colorized, /* Do not run redqueen stage again */
cal_failed; /* Calibration failed? */
@ -191,8 +192,7 @@ struct queue_entry {
u8 * cmplog_colorinput; /* the result buf of colorization */
struct tainted *taint; /* Taint information from CmpLog */
struct queue_entry *mother, /* queue entry this based on */
*next; /* Next element, if any */
struct queue_entry *mother; /* queue entry this based on */
};

View File

@ -287,7 +287,7 @@ typedef uint32_t XXH32_hash_t;
#else
#include <limits.h>
#if UINT_MAX == 0xFFFFFFFFUL
typedef unsigned int XXH32_hash_t;
typedef unsigned int XXH32_hash_t;
#else
#if ULONG_MAX == 0xFFFFFFFFUL
typedef unsigned long XXH32_hash_t;

View File

@ -68,7 +68,7 @@ class CompareTransform : public ModulePass {
const char *getPassName() const override {
#else
StringRef getPassName() const override {
StringRef getPassName() const override {
#endif
return "transforms compare functions";

View File

@ -817,12 +817,15 @@ void read_testcases(afl_state_t *afl, u8 *directory) {
void perform_dry_run(afl_state_t *afl) {
struct queue_entry *q = afl->queue;
u32 cal_failures = 0;
struct queue_entry *q;
u32 cal_failures = 0, idx;
u8 * skip_crashes = afl->afl_env.afl_skip_crashes;
u8 * use_mem;
while (q) {
for (idx = 0; idx < afl->queued_paths; idx++) {
q = afl->queue_buf[idx];
if (unlikely(q->disabled)) { continue; }
u8 res;
s32 fd;
@ -1052,20 +1055,22 @@ void perform_dry_run(afl_state_t *afl) {
p->disabled = 1;
p->perf_score = 0;
while (p && p->next != q)
p = p->next;
if (p)
p->next = q->next;
else
afl->queue = q->next;
u32 i = 0;
while (unlikely(afl->queue_buf[i]->disabled)) {
++i;
}
afl->queue = afl->queue_buf[i];
afl->max_depth = 0;
p = afl->queue;
while (p) {
for (i = 0; i < afl->queued_paths; i++) {
if (p->depth > afl->max_depth) afl->max_depth = p->depth;
p = p->next;
if (!afl->queue_buf[i]->disabled &&
afl->queue_buf[i]->depth > afl->max_depth)
afl->max_depth = afl->queue_buf[i]->depth;
}
@ -1098,8 +1103,6 @@ void perform_dry_run(afl_state_t *afl) {
}
q = q->next;
}
if (cal_failures) {
@ -1125,31 +1128,23 @@ void perform_dry_run(afl_state_t *afl) {
/* Now we remove all entries from the queue that have a duplicate trace map */
q = afl->queue;
struct queue_entry *p, *prev = NULL;
int duplicates = 0;
u32 duplicates = 0, i;
restart_outer_cull_loop:
for (idx = 0; idx < afl->queued_paths; idx++) {
while (q) {
q = afl->queue_buf[idx];
if (q->disabled || q->cal_failed || !q->exec_cksum) { continue; }
if (q->cal_failed || !q->exec_cksum) { goto next_entry; }
u32 done = 0;
for (i = idx + 1; i < afl->queued_paths && !done; i++) {
restart_inner_cull_loop:
struct queue_entry *p = afl->queue_buf[i];
if (p->disabled || p->cal_failed || !p->exec_cksum) { continue; }
p = q->next;
while (p) {
if (!p->cal_failed && p->exec_cksum == q->exec_cksum) {
if (p->exec_cksum == q->exec_cksum) {
duplicates = 1;
// We do not remove any of the memory allocated because for
// splicing the data might still be interesting.
// We only decouple them from the linked list.
// This will result in some leaks at exit, but who cares.
// we keep the shorter file
if (p->len >= q->len) {
@ -1163,8 +1158,6 @@ restart_outer_cull_loop:
p->disabled = 1;
p->perf_score = 0;
q->next = p->next;
goto restart_inner_cull_loop;
} else {
@ -1178,35 +1171,26 @@ restart_outer_cull_loop:
q->disabled = 1;
q->perf_score = 0;
if (prev)
prev->next = q = p;
else
afl->queue = q = p;
goto restart_outer_cull_loop;
done = 1;
}
}
p = p->next;
}
next_entry:
prev = q;
q = q->next;
}
if (duplicates) {
afl->max_depth = 0;
q = afl->queue;
while (q) {
if (q->depth > afl->max_depth) afl->max_depth = q->depth;
q = q->next;
for (idx = 0; idx < afl->queued_paths; idx++) {
if (!afl->queue_buf[idx]->disabled &&
afl->queue_buf[idx]->depth > afl->max_depth)
afl->max_depth = afl->queue_buf[idx]->depth;
}
@ -1256,11 +1240,15 @@ static void link_or_copy(u8 *old_path, u8 *new_path) {
void pivot_inputs(afl_state_t *afl) {
struct queue_entry *q = afl->queue;
u32 id = 0;
u32 id = 0, i;
ACTF("Creating hard links for all input files...");
while (q) {
for (i = 0; i < afl->queued_paths; i++) {
q = afl->queue_buf[i];
if (unlikely(q->disabled)) { continue; }
u8 *nfn, *rsl = strrchr(q->fname, '/');
u32 orig_id;
@ -1288,19 +1276,14 @@ void pivot_inputs(afl_state_t *afl) {
afl->resuming_fuzz = 1;
nfn = alloc_printf("%s/queue/%s", afl->out_dir, rsl);
/* Since we're at it, let's also try to find parent and figure out the
/* Since we're at it, let's also get the parent and figure out the
appropriate depth for this entry. */
src_str = strchr(rsl + 3, ':');
if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
struct queue_entry *s = afl->queue;
while (src_id-- && s) {
s = s->next;
}
struct queue_entry *s = afl->queue_buf[src_id];
if (s) { q->depth = s->depth + 1; }
@ -1348,7 +1331,6 @@ void pivot_inputs(afl_state_t *afl) {
if (q->passed_det) { mark_as_det_done(afl, q); }
q = q->next;
++id;
}

View File

@ -544,7 +544,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (likely(!afl->old_seed_selection))
orig_perf = perf_score = afl->queue_cur->perf_score;
else
orig_perf = perf_score = calculate_score(afl, afl->queue_cur);
afl->queue_cur->perf_score = orig_perf = perf_score =
calculate_score(afl, afl->queue_cur);
if (unlikely(perf_score <= 0)) { goto abandon_entry; }

View File

@ -143,7 +143,7 @@ void create_alias_table(afl_state_t *afl) {
struct queue_entry *q = afl->queue_buf[i];
if (!q->disabled) { q->perf_score = calculate_score(afl, q); }
if (likely(!q->disabled)) { q->perf_score = calculate_score(afl, q); }
sum += q->perf_score;
@ -444,7 +444,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
if (afl->queue_top) {
afl->queue_top->next = q;
afl->queue_top = q;
} else {
@ -465,6 +464,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
AFL_BUF_PARAM(queue), afl->queued_paths * sizeof(struct queue_entry *));
if (unlikely(!queue_buf)) { PFATAL("alloc"); }
queue_buf[afl->queued_paths - 1] = q;
q->id = afl->queued_paths - 1;
afl->last_path_time = get_cur_time();
@ -641,10 +641,9 @@ void cull_queue(afl_state_t *afl) {
if (likely(!afl->score_changed || afl->non_instrumented_mode)) { return; }
struct queue_entry *q;
u32 len = (afl->fsrv.map_size >> 3);
u32 i;
u8 * temp_v = afl->map_tmp_buf;
u32 len = (afl->fsrv.map_size >> 3);
u32 i;
u8 *temp_v = afl->map_tmp_buf;
afl->score_changed = 0;
@ -653,12 +652,9 @@ void cull_queue(afl_state_t *afl) {
afl->queued_favored = 0;
afl->pending_favored = 0;
q = afl->queue;
for (i = 0; i < afl->queued_paths; i++) {
while (q) {
q->favored = 0;
q = q->next;
afl->queue_buf[i]->favored = 0;
}
@ -697,12 +693,13 @@ void cull_queue(afl_state_t *afl) {
}
q = afl->queue;
for (i = 0; i < afl->queued_paths; i++) {
while (q) {
if (likely(!afl->queue_buf[i]->disabled)) {
mark_as_redundant(afl, q, !q->favored);
q = q->next;
mark_as_redundant(afl, afl->queue_buf[i], !afl->queue_buf[i]->favored);
}
}
@ -852,13 +849,15 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
// Don't modify perf_score for unfuzzed seeds
if (q->fuzz_level == 0) break;
struct queue_entry *queue_it = afl->queue;
while (queue_it) {
u32 i;
for (i = 0; i < afl->queued_paths; i++) {
fuzz_mu += log2(afl->n_fuzz[q->n_fuzz_entry]);
n_paths++;
if (likely(!afl->queue_buf[i]->disabled)) {
queue_it = queue_it->next;
fuzz_mu += log2(afl->n_fuzz[afl->queue_buf[i]->n_fuzz_entry]);
n_paths++;
}
}

View File

@ -1014,8 +1014,8 @@ void show_stats(afl_state_t *afl) {
void show_init_stats(afl_state_t *afl) {
struct queue_entry *q = afl->queue;
u32 min_bits = 0, max_bits = 0, max_len = 0, count = 0;
struct queue_entry *q;
u32 min_bits = 0, max_bits = 0, max_len = 0, count = 0, i;
u64 min_us = 0, max_us = 0;
u64 avg_us = 0;
@ -1028,7 +1028,10 @@ void show_init_stats(afl_state_t *afl) {
}
while (q) {
for (i = 0; i < afl->queued_paths; i++) {
q = afl->queue_buf[i];
if (unlikely(q->disabled)) { continue; }
if (!min_us || q->exec_us < min_us) { min_us = q->exec_us; }
if (q->exec_us > max_us) { max_us = q->exec_us; }
@ -1039,7 +1042,6 @@ void show_init_stats(afl_state_t *afl) {
if (q->len > max_len) { max_len = q->len; }
++count;
q = q->next;
}

View File

@ -1558,45 +1558,6 @@ int main(int argc, char **argv_orig, char **envp) {
perform_dry_run(afl);
/*
if (!user_set_cache && afl->q_testcase_max_cache_size) {
/ * The user defined not a fixed number of entries for the cache.
Hence we autodetect a good value. After the dry run inputs are
trimmed and we know the average and max size of the input seeds.
We use this information to set a fitting size to max entries
based on the cache size. * /
struct queue_entry *q = afl->queue;
u64 size = 0, count = 0, avg = 0, max = 0;
while (q) {
++count;
size += q->len;
if (max < q->len) { max = q->len; }
q = q->next;
}
if (count) {
avg = size / count;
avg = ((avg + max) / 2) + 1;
}
if (avg < 10240) { avg = 10240; }
afl->q_testcase_max_cache_entries = afl->q_testcase_max_cache_size / avg;
if (afl->q_testcase_max_cache_entries > 32768)
afl->q_testcase_max_cache_entries = 32768;
}
*/
if (afl->q_testcase_max_cache_entries) {
afl->q_testcase_cache =
@ -1668,7 +1629,10 @@ int main(int argc, char **argv_orig, char **envp) {
if (unlikely(afl->old_seed_selection)) {
afl->current_entry = 0;
afl->queue_cur = afl->queue;
while (unlikely(afl->queue_buf[afl->current_entry]->disabled)) {
++afl->current_entry;
}
afl->queue_cur = afl->queue_buf[afl->current_entry];
if (unlikely(seek_to)) {
@ -1800,12 +1764,14 @@ int main(int argc, char **argv_orig, char **envp) {
}
struct queue_entry *q = afl->queue;
// we must recalculate the scores of all queue entries
while (q) {
for (i = 0; i < (s32)afl->queued_paths; i++) {
update_bitmap_score(afl, q);
q = q->next;
if (likely(!afl->queue_buf[i]->disabled)) {
update_bitmap_score(afl, afl->queue_buf[i]);
}
}
@ -1847,8 +1813,15 @@ int main(int argc, char **argv_orig, char **envp) {
if (unlikely(afl->old_seed_selection)) {
afl->queue_cur = afl->queue_cur->next;
++afl->current_entry;
while (++afl->current_entry < afl->queued_paths &&
afl->queue_buf[afl->current_entry]->disabled)
;
if (unlikely(afl->current_entry >= afl->queued_paths ||
afl->queue_buf[afl->current_entry] == NULL ||
afl->queue_buf[afl->current_entry]->disabled))
afl->queue_cur = NULL;
else
afl->queue_cur = afl->queue_buf[afl->current_entry];
}

View File

@ -284,7 +284,7 @@ library_list_t *find_library(char *name) {
// this seems to work for clang too. nice :) requires gcc 4.4+
#pragma GCC push_options
#pragma GCC optimize("O0")
void breakpoint(void) {
void breakpoint(void) {
if (debug) fprintf(stderr, "Breakpoint function \"breakpoint\" reached.\n");

View File

@ -161,8 +161,8 @@ static void __tokencap_load_mappings(void) {
#elif defined __FreeBSD__ || defined __OpenBSD__ || defined __NetBSD__
#if defined __FreeBSD__
int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, __tokencap_pid};
#if defined __FreeBSD__
int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, __tokencap_pid};
#elif defined __OpenBSD__
int mib[] = {CTL_KERN, KERN_PROC_VMMAP, __tokencap_pid};
#elif defined __NetBSD__
@ -177,7 +177,7 @@ static void __tokencap_load_mappings(void) {
#if defined __FreeBSD__ || defined __NetBSD__
len = len * 4 / 3;
#elif defined __OpenBSD__
#elif defined __OpenBSD__
len -= len % sizeof(struct kinfo_vmentry);
#endif
@ -202,8 +202,8 @@ static void __tokencap_load_mappings(void) {
#if defined __FreeBSD__ || defined __NetBSD__
#if defined __FreeBSD__
size_t size = region->kve_structsize;
#if defined __FreeBSD__
size_t size = region->kve_structsize;
if (size == 0) break;
#elif defined __NetBSD__

View File

@ -51,7 +51,7 @@ __AFL_FUZZ_INIT();
/* To ensure checks are not optimized out it is recommended to disable
code optimization for the fuzzer harness main() */
#pragma clang optimize off
#pragma GCC optimize("O0")
#pragma GCC optimize("O0")
int main(int argc, char **argv) {

View File

@ -24,7 +24,7 @@ __AFL_FUZZ_INIT();
/* To ensure checks are not optimized out it is recommended to disable
code optimization for the fuzzer harness main() */
#pragma clang optimize off
#pragma GCC optimize("O0")
#pragma GCC optimize("O0")
int main(int argc, char **argv) {