mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-14 11:08:06 +00:00
fix -Z, remove q->next
This commit is contained in:
@ -154,6 +154,7 @@ struct queue_entry {
|
|||||||
|
|
||||||
u8 *fname; /* File name for the test case */
|
u8 *fname; /* File name for the test case */
|
||||||
u32 len; /* Input length */
|
u32 len; /* Input length */
|
||||||
|
u32 id; /* entry number in queue_buf */
|
||||||
|
|
||||||
u8 colorized, /* Do not run redqueen stage again */
|
u8 colorized, /* Do not run redqueen stage again */
|
||||||
cal_failed; /* Calibration failed? */
|
cal_failed; /* Calibration failed? */
|
||||||
@ -191,8 +192,7 @@ struct queue_entry {
|
|||||||
u8 * cmplog_colorinput; /* the result buf of colorization */
|
u8 * cmplog_colorinput; /* the result buf of colorization */
|
||||||
struct tainted *taint; /* Taint information from CmpLog */
|
struct tainted *taint; /* Taint information from CmpLog */
|
||||||
|
|
||||||
struct queue_entry *mother, /* queue entry this based on */
|
struct queue_entry *mother; /* queue entry this based on */
|
||||||
*next; /* Next element, if any */
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -287,7 +287,7 @@ typedef uint32_t XXH32_hash_t;
|
|||||||
#else
|
#else
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
#if UINT_MAX == 0xFFFFFFFFUL
|
#if UINT_MAX == 0xFFFFFFFFUL
|
||||||
typedef unsigned int XXH32_hash_t;
|
typedef unsigned int XXH32_hash_t;
|
||||||
#else
|
#else
|
||||||
#if ULONG_MAX == 0xFFFFFFFFUL
|
#if ULONG_MAX == 0xFFFFFFFFUL
|
||||||
typedef unsigned long XXH32_hash_t;
|
typedef unsigned long XXH32_hash_t;
|
||||||
|
@ -68,7 +68,7 @@ class CompareTransform : public ModulePass {
|
|||||||
const char *getPassName() const override {
|
const char *getPassName() const override {
|
||||||
|
|
||||||
#else
|
#else
|
||||||
StringRef getPassName() const override {
|
StringRef getPassName() const override {
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
return "transforms compare functions";
|
return "transforms compare functions";
|
||||||
|
@ -817,12 +817,15 @@ void read_testcases(afl_state_t *afl, u8 *directory) {
|
|||||||
|
|
||||||
void perform_dry_run(afl_state_t *afl) {
|
void perform_dry_run(afl_state_t *afl) {
|
||||||
|
|
||||||
struct queue_entry *q = afl->queue;
|
struct queue_entry *q;
|
||||||
u32 cal_failures = 0;
|
u32 cal_failures = 0, idx;
|
||||||
u8 * skip_crashes = afl->afl_env.afl_skip_crashes;
|
u8 * skip_crashes = afl->afl_env.afl_skip_crashes;
|
||||||
u8 * use_mem;
|
u8 * use_mem;
|
||||||
|
|
||||||
while (q) {
|
for (idx = 0; idx < afl->queued_paths; idx++) {
|
||||||
|
|
||||||
|
q = afl->queue_buf[idx];
|
||||||
|
if (unlikely(q->disabled)) { continue; }
|
||||||
|
|
||||||
u8 res;
|
u8 res;
|
||||||
s32 fd;
|
s32 fd;
|
||||||
@ -1052,20 +1055,22 @@ void perform_dry_run(afl_state_t *afl) {
|
|||||||
|
|
||||||
p->disabled = 1;
|
p->disabled = 1;
|
||||||
p->perf_score = 0;
|
p->perf_score = 0;
|
||||||
while (p && p->next != q)
|
|
||||||
p = p->next;
|
|
||||||
|
|
||||||
if (p)
|
u32 i = 0;
|
||||||
p->next = q->next;
|
while (unlikely(afl->queue_buf[i]->disabled)) {
|
||||||
else
|
|
||||||
afl->queue = q->next;
|
++i;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
afl->queue = afl->queue_buf[i];
|
||||||
|
|
||||||
afl->max_depth = 0;
|
afl->max_depth = 0;
|
||||||
p = afl->queue;
|
for (i = 0; i < afl->queued_paths; i++) {
|
||||||
while (p) {
|
|
||||||
|
|
||||||
if (p->depth > afl->max_depth) afl->max_depth = p->depth;
|
if (!afl->queue_buf[i]->disabled &&
|
||||||
p = p->next;
|
afl->queue_buf[i]->depth > afl->max_depth)
|
||||||
|
afl->max_depth = afl->queue_buf[i]->depth;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1098,8 +1103,6 @@ void perform_dry_run(afl_state_t *afl) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
q = q->next;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cal_failures) {
|
if (cal_failures) {
|
||||||
@ -1125,31 +1128,23 @@ void perform_dry_run(afl_state_t *afl) {
|
|||||||
|
|
||||||
/* Now we remove all entries from the queue that have a duplicate trace map */
|
/* Now we remove all entries from the queue that have a duplicate trace map */
|
||||||
|
|
||||||
q = afl->queue;
|
u32 duplicates = 0, i;
|
||||||
struct queue_entry *p, *prev = NULL;
|
|
||||||
int duplicates = 0;
|
|
||||||
|
|
||||||
restart_outer_cull_loop:
|
for (idx = 0; idx < afl->queued_paths; idx++) {
|
||||||
|
|
||||||
while (q) {
|
q = afl->queue_buf[idx];
|
||||||
|
if (q->disabled || q->cal_failed || !q->exec_cksum) { continue; }
|
||||||
|
|
||||||
if (q->cal_failed || !q->exec_cksum) { goto next_entry; }
|
u32 done = 0;
|
||||||
|
for (i = idx + 1; i < afl->queued_paths && !done; i++) {
|
||||||
|
|
||||||
restart_inner_cull_loop:
|
struct queue_entry *p = afl->queue_buf[i];
|
||||||
|
if (p->disabled || p->cal_failed || !p->exec_cksum) { continue; }
|
||||||
|
|
||||||
p = q->next;
|
if (p->exec_cksum == q->exec_cksum) {
|
||||||
|
|
||||||
while (p) {
|
|
||||||
|
|
||||||
if (!p->cal_failed && p->exec_cksum == q->exec_cksum) {
|
|
||||||
|
|
||||||
duplicates = 1;
|
duplicates = 1;
|
||||||
|
|
||||||
// We do not remove any of the memory allocated because for
|
|
||||||
// splicing the data might still be interesting.
|
|
||||||
// We only decouple them from the linked list.
|
|
||||||
// This will result in some leaks at exit, but who cares.
|
|
||||||
|
|
||||||
// we keep the shorter file
|
// we keep the shorter file
|
||||||
if (p->len >= q->len) {
|
if (p->len >= q->len) {
|
||||||
|
|
||||||
@ -1163,8 +1158,6 @@ restart_outer_cull_loop:
|
|||||||
|
|
||||||
p->disabled = 1;
|
p->disabled = 1;
|
||||||
p->perf_score = 0;
|
p->perf_score = 0;
|
||||||
q->next = p->next;
|
|
||||||
goto restart_inner_cull_loop;
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
@ -1178,35 +1171,26 @@ restart_outer_cull_loop:
|
|||||||
|
|
||||||
q->disabled = 1;
|
q->disabled = 1;
|
||||||
q->perf_score = 0;
|
q->perf_score = 0;
|
||||||
if (prev)
|
|
||||||
prev->next = q = p;
|
done = 1;
|
||||||
else
|
|
||||||
afl->queue = q = p;
|
|
||||||
goto restart_outer_cull_loop;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
p = p->next;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
next_entry:
|
|
||||||
|
|
||||||
prev = q;
|
|
||||||
q = q->next;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (duplicates) {
|
if (duplicates) {
|
||||||
|
|
||||||
afl->max_depth = 0;
|
afl->max_depth = 0;
|
||||||
q = afl->queue;
|
|
||||||
while (q) {
|
|
||||||
|
|
||||||
if (q->depth > afl->max_depth) afl->max_depth = q->depth;
|
for (idx = 0; idx < afl->queued_paths; idx++) {
|
||||||
q = q->next;
|
|
||||||
|
if (!afl->queue_buf[idx]->disabled &&
|
||||||
|
afl->queue_buf[idx]->depth > afl->max_depth)
|
||||||
|
afl->max_depth = afl->queue_buf[idx]->depth;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1256,11 +1240,15 @@ static void link_or_copy(u8 *old_path, u8 *new_path) {
|
|||||||
void pivot_inputs(afl_state_t *afl) {
|
void pivot_inputs(afl_state_t *afl) {
|
||||||
|
|
||||||
struct queue_entry *q = afl->queue;
|
struct queue_entry *q = afl->queue;
|
||||||
u32 id = 0;
|
u32 id = 0, i;
|
||||||
|
|
||||||
ACTF("Creating hard links for all input files...");
|
ACTF("Creating hard links for all input files...");
|
||||||
|
|
||||||
while (q) {
|
for (i = 0; i < afl->queued_paths; i++) {
|
||||||
|
|
||||||
|
q = afl->queue_buf[i];
|
||||||
|
|
||||||
|
if (unlikely(q->disabled)) { continue; }
|
||||||
|
|
||||||
u8 *nfn, *rsl = strrchr(q->fname, '/');
|
u8 *nfn, *rsl = strrchr(q->fname, '/');
|
||||||
u32 orig_id;
|
u32 orig_id;
|
||||||
@ -1288,19 +1276,14 @@ void pivot_inputs(afl_state_t *afl) {
|
|||||||
afl->resuming_fuzz = 1;
|
afl->resuming_fuzz = 1;
|
||||||
nfn = alloc_printf("%s/queue/%s", afl->out_dir, rsl);
|
nfn = alloc_printf("%s/queue/%s", afl->out_dir, rsl);
|
||||||
|
|
||||||
/* Since we're at it, let's also try to find parent and figure out the
|
/* Since we're at it, let's also get the parent and figure out the
|
||||||
appropriate depth for this entry. */
|
appropriate depth for this entry. */
|
||||||
|
|
||||||
src_str = strchr(rsl + 3, ':');
|
src_str = strchr(rsl + 3, ':');
|
||||||
|
|
||||||
if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
|
if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) {
|
||||||
|
|
||||||
struct queue_entry *s = afl->queue;
|
struct queue_entry *s = afl->queue_buf[src_id];
|
||||||
while (src_id-- && s) {
|
|
||||||
|
|
||||||
s = s->next;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (s) { q->depth = s->depth + 1; }
|
if (s) { q->depth = s->depth + 1; }
|
||||||
|
|
||||||
@ -1348,7 +1331,6 @@ void pivot_inputs(afl_state_t *afl) {
|
|||||||
|
|
||||||
if (q->passed_det) { mark_as_det_done(afl, q); }
|
if (q->passed_det) { mark_as_det_done(afl, q); }
|
||||||
|
|
||||||
q = q->next;
|
|
||||||
++id;
|
++id;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -544,7 +544,8 @@ u8 fuzz_one_original(afl_state_t *afl) {
|
|||||||
if (likely(!afl->old_seed_selection))
|
if (likely(!afl->old_seed_selection))
|
||||||
orig_perf = perf_score = afl->queue_cur->perf_score;
|
orig_perf = perf_score = afl->queue_cur->perf_score;
|
||||||
else
|
else
|
||||||
orig_perf = perf_score = calculate_score(afl, afl->queue_cur);
|
afl->queue_cur->perf_score = orig_perf = perf_score =
|
||||||
|
calculate_score(afl, afl->queue_cur);
|
||||||
|
|
||||||
if (unlikely(perf_score <= 0)) { goto abandon_entry; }
|
if (unlikely(perf_score <= 0)) { goto abandon_entry; }
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ void create_alias_table(afl_state_t *afl) {
|
|||||||
|
|
||||||
struct queue_entry *q = afl->queue_buf[i];
|
struct queue_entry *q = afl->queue_buf[i];
|
||||||
|
|
||||||
if (!q->disabled) { q->perf_score = calculate_score(afl, q); }
|
if (likely(!q->disabled)) { q->perf_score = calculate_score(afl, q); }
|
||||||
|
|
||||||
sum += q->perf_score;
|
sum += q->perf_score;
|
||||||
|
|
||||||
@ -444,7 +444,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
|
|||||||
|
|
||||||
if (afl->queue_top) {
|
if (afl->queue_top) {
|
||||||
|
|
||||||
afl->queue_top->next = q;
|
|
||||||
afl->queue_top = q;
|
afl->queue_top = q;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@ -465,6 +464,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
|
|||||||
AFL_BUF_PARAM(queue), afl->queued_paths * sizeof(struct queue_entry *));
|
AFL_BUF_PARAM(queue), afl->queued_paths * sizeof(struct queue_entry *));
|
||||||
if (unlikely(!queue_buf)) { PFATAL("alloc"); }
|
if (unlikely(!queue_buf)) { PFATAL("alloc"); }
|
||||||
queue_buf[afl->queued_paths - 1] = q;
|
queue_buf[afl->queued_paths - 1] = q;
|
||||||
|
q->id = afl->queued_paths - 1;
|
||||||
|
|
||||||
afl->last_path_time = get_cur_time();
|
afl->last_path_time = get_cur_time();
|
||||||
|
|
||||||
@ -641,10 +641,9 @@ void cull_queue(afl_state_t *afl) {
|
|||||||
|
|
||||||
if (likely(!afl->score_changed || afl->non_instrumented_mode)) { return; }
|
if (likely(!afl->score_changed || afl->non_instrumented_mode)) { return; }
|
||||||
|
|
||||||
struct queue_entry *q;
|
u32 len = (afl->fsrv.map_size >> 3);
|
||||||
u32 len = (afl->fsrv.map_size >> 3);
|
u32 i;
|
||||||
u32 i;
|
u8 *temp_v = afl->map_tmp_buf;
|
||||||
u8 * temp_v = afl->map_tmp_buf;
|
|
||||||
|
|
||||||
afl->score_changed = 0;
|
afl->score_changed = 0;
|
||||||
|
|
||||||
@ -653,12 +652,9 @@ void cull_queue(afl_state_t *afl) {
|
|||||||
afl->queued_favored = 0;
|
afl->queued_favored = 0;
|
||||||
afl->pending_favored = 0;
|
afl->pending_favored = 0;
|
||||||
|
|
||||||
q = afl->queue;
|
for (i = 0; i < afl->queued_paths; i++) {
|
||||||
|
|
||||||
while (q) {
|
afl->queue_buf[i]->favored = 0;
|
||||||
|
|
||||||
q->favored = 0;
|
|
||||||
q = q->next;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -697,12 +693,13 @@ void cull_queue(afl_state_t *afl) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
q = afl->queue;
|
for (i = 0; i < afl->queued_paths; i++) {
|
||||||
|
|
||||||
while (q) {
|
if (likely(!afl->queue_buf[i]->disabled)) {
|
||||||
|
|
||||||
mark_as_redundant(afl, q, !q->favored);
|
mark_as_redundant(afl, afl->queue_buf[i], !afl->queue_buf[i]->favored);
|
||||||
q = q->next;
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -852,13 +849,15 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
|
|||||||
// Don't modify perf_score for unfuzzed seeds
|
// Don't modify perf_score for unfuzzed seeds
|
||||||
if (q->fuzz_level == 0) break;
|
if (q->fuzz_level == 0) break;
|
||||||
|
|
||||||
struct queue_entry *queue_it = afl->queue;
|
u32 i;
|
||||||
while (queue_it) {
|
for (i = 0; i < afl->queued_paths; i++) {
|
||||||
|
|
||||||
fuzz_mu += log2(afl->n_fuzz[q->n_fuzz_entry]);
|
if (likely(!afl->queue_buf[i]->disabled)) {
|
||||||
n_paths++;
|
|
||||||
|
|
||||||
queue_it = queue_it->next;
|
fuzz_mu += log2(afl->n_fuzz[afl->queue_buf[i]->n_fuzz_entry]);
|
||||||
|
n_paths++;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1014,8 +1014,8 @@ void show_stats(afl_state_t *afl) {
|
|||||||
|
|
||||||
void show_init_stats(afl_state_t *afl) {
|
void show_init_stats(afl_state_t *afl) {
|
||||||
|
|
||||||
struct queue_entry *q = afl->queue;
|
struct queue_entry *q;
|
||||||
u32 min_bits = 0, max_bits = 0, max_len = 0, count = 0;
|
u32 min_bits = 0, max_bits = 0, max_len = 0, count = 0, i;
|
||||||
u64 min_us = 0, max_us = 0;
|
u64 min_us = 0, max_us = 0;
|
||||||
u64 avg_us = 0;
|
u64 avg_us = 0;
|
||||||
|
|
||||||
@ -1028,7 +1028,10 @@ void show_init_stats(afl_state_t *afl) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
while (q) {
|
for (i = 0; i < afl->queued_paths; i++) {
|
||||||
|
|
||||||
|
q = afl->queue_buf[i];
|
||||||
|
if (unlikely(q->disabled)) { continue; }
|
||||||
|
|
||||||
if (!min_us || q->exec_us < min_us) { min_us = q->exec_us; }
|
if (!min_us || q->exec_us < min_us) { min_us = q->exec_us; }
|
||||||
if (q->exec_us > max_us) { max_us = q->exec_us; }
|
if (q->exec_us > max_us) { max_us = q->exec_us; }
|
||||||
@ -1039,7 +1042,6 @@ void show_init_stats(afl_state_t *afl) {
|
|||||||
if (q->len > max_len) { max_len = q->len; }
|
if (q->len > max_len) { max_len = q->len; }
|
||||||
|
|
||||||
++count;
|
++count;
|
||||||
q = q->next;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1558,45 +1558,6 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
perform_dry_run(afl);
|
perform_dry_run(afl);
|
||||||
|
|
||||||
/*
|
|
||||||
if (!user_set_cache && afl->q_testcase_max_cache_size) {
|
|
||||||
|
|
||||||
/ * The user defined not a fixed number of entries for the cache.
|
|
||||||
Hence we autodetect a good value. After the dry run inputs are
|
|
||||||
trimmed and we know the average and max size of the input seeds.
|
|
||||||
We use this information to set a fitting size to max entries
|
|
||||||
based on the cache size. * /
|
|
||||||
|
|
||||||
struct queue_entry *q = afl->queue;
|
|
||||||
u64 size = 0, count = 0, avg = 0, max = 0;
|
|
||||||
|
|
||||||
while (q) {
|
|
||||||
|
|
||||||
++count;
|
|
||||||
size += q->len;
|
|
||||||
if (max < q->len) { max = q->len; }
|
|
||||||
q = q->next;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (count) {
|
|
||||||
|
|
||||||
avg = size / count;
|
|
||||||
avg = ((avg + max) / 2) + 1;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (avg < 10240) { avg = 10240; }
|
|
||||||
|
|
||||||
afl->q_testcase_max_cache_entries = afl->q_testcase_max_cache_size / avg;
|
|
||||||
|
|
||||||
if (afl->q_testcase_max_cache_entries > 32768)
|
|
||||||
afl->q_testcase_max_cache_entries = 32768;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (afl->q_testcase_max_cache_entries) {
|
if (afl->q_testcase_max_cache_entries) {
|
||||||
|
|
||||||
afl->q_testcase_cache =
|
afl->q_testcase_cache =
|
||||||
@ -1668,7 +1629,10 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
if (unlikely(afl->old_seed_selection)) {
|
if (unlikely(afl->old_seed_selection)) {
|
||||||
|
|
||||||
afl->current_entry = 0;
|
afl->current_entry = 0;
|
||||||
afl->queue_cur = afl->queue;
|
while (unlikely(afl->queue_buf[afl->current_entry]->disabled)) {
|
||||||
|
++afl->current_entry;
|
||||||
|
}
|
||||||
|
afl->queue_cur = afl->queue_buf[afl->current_entry];
|
||||||
|
|
||||||
if (unlikely(seek_to)) {
|
if (unlikely(seek_to)) {
|
||||||
|
|
||||||
@ -1800,12 +1764,14 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct queue_entry *q = afl->queue;
|
|
||||||
// we must recalculate the scores of all queue entries
|
// we must recalculate the scores of all queue entries
|
||||||
while (q) {
|
for (i = 0; i < (s32)afl->queued_paths; i++) {
|
||||||
|
|
||||||
update_bitmap_score(afl, q);
|
if (likely(!afl->queue_buf[i]->disabled)) {
|
||||||
q = q->next;
|
|
||||||
|
update_bitmap_score(afl, afl->queue_buf[i]);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1847,8 +1813,15 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
if (unlikely(afl->old_seed_selection)) {
|
if (unlikely(afl->old_seed_selection)) {
|
||||||
|
|
||||||
afl->queue_cur = afl->queue_cur->next;
|
while (++afl->current_entry < afl->queued_paths &&
|
||||||
++afl->current_entry;
|
afl->queue_buf[afl->current_entry]->disabled)
|
||||||
|
;
|
||||||
|
if (unlikely(afl->current_entry >= afl->queued_paths ||
|
||||||
|
afl->queue_buf[afl->current_entry] == NULL ||
|
||||||
|
afl->queue_buf[afl->current_entry]->disabled))
|
||||||
|
afl->queue_cur = NULL;
|
||||||
|
else
|
||||||
|
afl->queue_cur = afl->queue_buf[afl->current_entry];
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,7 +284,7 @@ library_list_t *find_library(char *name) {
|
|||||||
// this seems to work for clang too. nice :) requires gcc 4.4+
|
// this seems to work for clang too. nice :) requires gcc 4.4+
|
||||||
#pragma GCC push_options
|
#pragma GCC push_options
|
||||||
#pragma GCC optimize("O0")
|
#pragma GCC optimize("O0")
|
||||||
void breakpoint(void) {
|
void breakpoint(void) {
|
||||||
|
|
||||||
if (debug) fprintf(stderr, "Breakpoint function \"breakpoint\" reached.\n");
|
if (debug) fprintf(stderr, "Breakpoint function \"breakpoint\" reached.\n");
|
||||||
|
|
||||||
|
@ -161,8 +161,8 @@ static void __tokencap_load_mappings(void) {
|
|||||||
|
|
||||||
#elif defined __FreeBSD__ || defined __OpenBSD__ || defined __NetBSD__
|
#elif defined __FreeBSD__ || defined __OpenBSD__ || defined __NetBSD__
|
||||||
|
|
||||||
#if defined __FreeBSD__
|
#if defined __FreeBSD__
|
||||||
int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, __tokencap_pid};
|
int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, __tokencap_pid};
|
||||||
#elif defined __OpenBSD__
|
#elif defined __OpenBSD__
|
||||||
int mib[] = {CTL_KERN, KERN_PROC_VMMAP, __tokencap_pid};
|
int mib[] = {CTL_KERN, KERN_PROC_VMMAP, __tokencap_pid};
|
||||||
#elif defined __NetBSD__
|
#elif defined __NetBSD__
|
||||||
@ -177,7 +177,7 @@ static void __tokencap_load_mappings(void) {
|
|||||||
|
|
||||||
#if defined __FreeBSD__ || defined __NetBSD__
|
#if defined __FreeBSD__ || defined __NetBSD__
|
||||||
len = len * 4 / 3;
|
len = len * 4 / 3;
|
||||||
#elif defined __OpenBSD__
|
#elif defined __OpenBSD__
|
||||||
len -= len % sizeof(struct kinfo_vmentry);
|
len -= len % sizeof(struct kinfo_vmentry);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -202,8 +202,8 @@ static void __tokencap_load_mappings(void) {
|
|||||||
|
|
||||||
#if defined __FreeBSD__ || defined __NetBSD__
|
#if defined __FreeBSD__ || defined __NetBSD__
|
||||||
|
|
||||||
#if defined __FreeBSD__
|
#if defined __FreeBSD__
|
||||||
size_t size = region->kve_structsize;
|
size_t size = region->kve_structsize;
|
||||||
|
|
||||||
if (size == 0) break;
|
if (size == 0) break;
|
||||||
#elif defined __NetBSD__
|
#elif defined __NetBSD__
|
||||||
|
@ -51,7 +51,7 @@ __AFL_FUZZ_INIT();
|
|||||||
/* To ensure checks are not optimized out it is recommended to disable
|
/* To ensure checks are not optimized out it is recommended to disable
|
||||||
code optimization for the fuzzer harness main() */
|
code optimization for the fuzzer harness main() */
|
||||||
#pragma clang optimize off
|
#pragma clang optimize off
|
||||||
#pragma GCC optimize("O0")
|
#pragma GCC optimize("O0")
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
int main(int argc, char **argv) {
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ __AFL_FUZZ_INIT();
|
|||||||
/* To ensure checks are not optimized out it is recommended to disable
|
/* To ensure checks are not optimized out it is recommended to disable
|
||||||
code optimization for the fuzzer harness main() */
|
code optimization for the fuzzer harness main() */
|
||||||
#pragma clang optimize off
|
#pragma clang optimize off
|
||||||
#pragma GCC optimize("O0")
|
#pragma GCC optimize("O0")
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
int main(int argc, char **argv) {
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user