This commit is contained in:
Andrea Fioraldi
2020-01-17 16:41:30 +01:00
parent f24135f1ed
commit b6c5974b37
10 changed files with 239 additions and 200 deletions

View File

@ -652,8 +652,9 @@ void save_cmdline(u32, char**);
extern u8* cmplog_binary;
extern s32 cmplog_forksrv_pid;
void init_cmplog_forkserver(char **argv);
u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, u32 exec_cksum);
void init_cmplog_forkserver(char** argv);
u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len,
u32 exec_cksum);
/**** Inline routines ****/

View File

@ -6,7 +6,7 @@
#define CMP_MAP_W 65536
#define CMP_MAP_H 256
#define SHAPE_BYTES(x) (x+1)
#define SHAPE_BYTES(x) (x + 1)
#define CMP_TYPE_INS 0
#define CMP_TYPE_RTN 1
@ -18,9 +18,9 @@ struct cmp_header {
unsigned cnt : 20;
unsigned id : 16;
unsigned shape : 5; // from 0 to 31
unsigned shape : 5; // from 0 to 31
unsigned type : 1;
} __attribute__((packed));
struct cmp_operands {
@ -41,9 +41,10 @@ typedef struct cmp_operands cmp_map_list[CMP_MAP_H];
struct cmp_map {
struct cmp_header headers[CMP_MAP_W];
struct cmp_header headers[CMP_MAP_W];
struct cmp_operands log[CMP_MAP_W][CMP_MAP_H];
};
#endif

View File

@ -62,13 +62,13 @@
/* Default memory limit for child process (MB): */
#ifndef __NetBSD__
# ifndef WORD_SIZE_64
# define MEM_LIMIT 25
# else
# define MEM_LIMIT 50
# endif /* ^!WORD_SIZE_64 */
#ifndef WORD_SIZE_64
#define MEM_LIMIT 25
#else
# define MEM_LIMIT 200
#define MEM_LIMIT 50
#endif /* ^!WORD_SIZE_64 */
#else
#define MEM_LIMIT 200
#endif
/* Default memory limit when running in QEMU mode (MB): */

View File

@ -200,11 +200,12 @@ static void edit_params(u32 argc, char** argv) {
if (getenv("AFL_CMPLOG"))
cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard,trace-cmp";
else
cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default
// cc_params[cc_par_cnt++] = "-mllvm";
// cc_params[cc_par_cnt++] =
// "-fsanitize-coverage=trace-cmp,trace-div,trace-gep";
// cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0";
cc_params[cc_par_cnt++] =
"-fsanitize-coverage=trace-pc-guard"; // edge coverage by default
// cc_params[cc_par_cnt++] = "-mllvm";
// cc_params[cc_par_cnt++] =
// "-fsanitize-coverage=trace-cmp,trace-div,trace-gep";
// cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0";
#else
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = "-load";

View File

@ -67,7 +67,7 @@ __thread u32 __afl_prev_loc;
#endif
struct cmp_map* __afl_cmp_map;
__thread u32 __afl_cmp_counter;
__thread u32 __afl_cmp_counter;
/* Running in persistent mode? */
@ -128,26 +128,26 @@ static void __afl_map_shm(void) {
__afl_area_ptr[0] = 1;
}
if (getenv("__AFL_CMPLOG_MODE__")) {
id_str = getenv(CMPLOG_SHM_ENV_VAR);
if (id_str) {
u32 shm_id = atoi(id_str);
__afl_cmp_map = shmat(shm_id, NULL, 0);
if (__afl_cmp_map == (void*)-1) _exit(1);
}
} else if (getenv("AFL_CMPLOG")) {
// during compilation, do this to avoid segfault
__afl_cmp_map = calloc(sizeof(struct cmp_map), 1);
}
}
@ -161,7 +161,7 @@ static void __afl_start_forkserver(void) {
u8 child_stopped = 0;
void (*old_sigchld_handler)(int)=0;// = signal(SIGCHLD, SIG_DFL);
void (*old_sigchld_handler)(int) = 0; // = signal(SIGCHLD, SIG_DFL);
/* Phone home and tell the parent that we're OK. If parent isn't there,
assume we're not running in forkserver mode and just execute program. */
@ -325,61 +325,63 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) {
///// CmpLog instrumentation
void __sanitizer_cov_trace_cmp1(uint8_t Arg1, uint8_t Arg2) {
return;
return;
}
void __sanitizer_cov_trace_cmp2(uint16_t Arg1, uint16_t Arg2) {
uintptr_t k = (uintptr_t)__builtin_return_address(0);
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits+1;
__afl_cmp_map->headers[k].hits = hits + 1;
// if (!__afl_cmp_map->headers[k].cnt)
// __afl_cmp_map->headers[k].cnt = __afl_cmp_counter++;
__afl_cmp_map->headers[k].shape = 1;
//__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
hits &= CMP_MAP_H -1;
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = Arg1;
__afl_cmp_map->log[k][hits].v1 = Arg2;
}
void __sanitizer_cov_trace_cmp4(uint32_t Arg1, uint32_t Arg2) {
uintptr_t k = (uintptr_t)__builtin_return_address(0);
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits+1;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = 3;
hits &= CMP_MAP_H -1;
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = Arg1;
__afl_cmp_map->log[k][hits].v1 = Arg2;
}
void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint64_t Arg2) {
uintptr_t k = (uintptr_t)__builtin_return_address(0);
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits+1;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = 7;
hits &= CMP_MAP_H -1;
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = Arg1;
__afl_cmp_map->log[k][hits].v1 = Arg2;
}
#if defined(__APPLE__)
@ -396,30 +398,29 @@ void __sanitizer_cov_trace_const_cmp4(uint32_t Arg1, uint32_t Arg2)
__attribute__((alias("__sanitizer_cov_trace_cmp4")));
void __sanitizer_cov_trace_const_cmp8(uint64_t Arg1, uint64_t Arg2)
__attribute__((alias("__sanitizer_cov_trace_cmp8")));
#endif /* defined(__APPLE__) */
#endif /* defined(__APPLE__) */
void __sanitizer_cov_trace_switch(uint64_t Val, uint64_t* Cases) {
for (uint64_t i = 0; i < Cases[0]; i++) {
uintptr_t k = (uintptr_t)__builtin_return_address(0) +i;
uintptr_t k = (uintptr_t)__builtin_return_address(0) + i;
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits+1;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = 7;
hits &= CMP_MAP_H -1;
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = Val;
__afl_cmp_map->log[k][hits].v1 = Cases[i + 2];
}
}
/* The following stuff deals with supporting -fsanitize-coverage=trace-pc-guard.
It remains non-operational in the traditional, plugin-backed LLVM mode.
For more info about 'trace-pc-guard', see README.llvm.

View File

@ -524,7 +524,12 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
struct queue_entry* q = queue;
while (q) {
if (q->exec_cksum == cksum) { q->n_fuzz = q->n_fuzz + 1; break ; }
if (q->exec_cksum == cksum) {
q->n_fuzz = q->n_fuzz + 1;
break;
}
q = q->next;

View File

@ -17,17 +17,19 @@
u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len);
extern struct cmp_map* cmp_map; // defined in afl-sharedmem.c
extern struct cmp_map* cmp_map; // defined in afl-sharedmem.c
u8* cmplog_binary;
u8* cmplog_binary;
char** its_argv;
///// Colorization
struct range {
u32 start;
u32 end;
struct range * next;
u32 start;
u32 end;
struct range* next;
};
struct range* add_range(struct range* ranges, u32 start, u32 end) {
@ -46,36 +48,42 @@ struct range* pop_biggest_range(struct range** ranges) {
struct range* prev = NULL;
struct range* rmax = NULL;
struct range* prev_rmax = NULL;
u32 max_size = 0;
u32 max_size = 0;
while (r) {
u32 s = r->end - r->start;
if (s >= max_size) {
max_size = s;
prev_rmax = prev;
rmax = r;
}
prev = r;
r = r->next;
}
if (rmax) {
if (prev_rmax)
prev_rmax->next = rmax->next;
else
*ranges = rmax->next;
}
return rmax;
}
u8 get_exec_checksum(u8* buf, u32 len, u32* cksum) {
if (unlikely(common_fuzz_stuff(its_argv, buf, len)))
return 1;
*cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1;
*cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
return 0;
}
@ -91,50 +99,51 @@ static void rand_replace(u8* buf, u32 len) {
u8 colorization(u8* buf, u32 len, u32 exec_cksum) {
struct range* ranges = add_range(NULL, 0, len);
u8* backup = ck_alloc_nozero(len);
u8* backup = ck_alloc_nozero(len);
u64 orig_hit_cnt, new_hit_cnt;
orig_hit_cnt = queued_paths + unique_crashes;
stage_name = "colorization";
stage_short = "colorization";
stage_max = 1000;
struct range* rng;
stage_cur = stage_max;
while ((rng = pop_biggest_range(&ranges)) != NULL && stage_cur) {
u32 s = rng->end - rng->start;
memcpy(backup, buf + rng->start, s);
rand_replace(buf + rng->start, s);
u32 cksum;
if (unlikely(get_exec_checksum(buf, len, &cksum)))
return 1;
if (unlikely(get_exec_checksum(buf, len, &cksum))) return 1;
if (cksum != exec_cksum) {
ranges = add_range(ranges, rng->start, rng->start + s/2);
ranges = add_range(ranges, rng->start + s/2 +1, rng->end);
ranges = add_range(ranges, rng->start, rng->start + s / 2);
ranges = add_range(ranges, rng->start + s / 2 + 1, rng->end);
memcpy(buf + rng->start, backup, s);
}
ck_free(rng);
--stage_cur;
}
new_hit_cnt = queued_paths + unique_crashes;
stage_finds[STAGE_COLORIZATION] += new_hit_cnt - orig_hit_cnt;
stage_cycles[STAGE_COLORIZATION] += stage_max - stage_cur;
while (ranges) {
rng = ranges;
ranges = ranges->next;
ck_free(rng);
}
return 0;
}
@ -147,21 +156,20 @@ u8 its_fuzz(u32 idx, u32 size, u8* buf, u32 len, u8* status) {
orig_hit_cnt = queued_paths + unique_crashes;
if (unlikely(common_fuzz_stuff(its_argv, buf, len)))
return 1;
if (unlikely(common_fuzz_stuff(its_argv, buf, len))) return 1;
new_hit_cnt = queued_paths + unique_crashes;
if (unlikely(new_hit_cnt != orig_hit_cnt)) {
*status = 1;
} else {
if (size >= MIN_AUTO_EXTRA && size <= MAX_AUTO_EXTRA)
maybe_add_auto(&buf[idx], size);
*status = 2;
}
return 0;
@ -169,8 +177,9 @@ u8 its_fuzz(u32 idx, u32 size, u8* buf, u32 len, u8* status) {
}
u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
u8* orig_buf, u8* buf, u32 len, u8 do_reverse, u8* status) {
u8* orig_buf, u8* buf, u32 len, u8 do_reverse,
u8* status) {
u64* buf_64 = (u64*)&buf[idx];
u32* buf_32 = (u32*)&buf[idx];
u16* buf_16 = (u16*)&buf[idx];
@ -179,59 +188,75 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
u32* o_buf_32 = (u32*)&orig_buf[idx];
u16* o_buf_16 = (u16*)&orig_buf[idx];
// u8* o_buf_8 = &orig_buf[idx];
u32 its_len = len - idx;
*status = 0;
if (SHAPE_BYTES(h->shape) == 8) {
if (its_len >= 8 && *buf_64 == pattern && *o_buf_64 == pattern) {
*buf_64 = repl;
if (unlikely(its_fuzz(idx, 8, buf, len, status)))
return 1;
if (unlikely(its_fuzz(idx, 8, buf, len, status))) return 1;
*buf_64 = pattern;
}
// reverse encoding
if (do_reverse)
if (unlikely(cmp_extend_encoding(h, SWAP64(pattern), SWAP64(repl), idx,
orig_buf, buf, len, 0, status)))
return 1;
}
if (SHAPE_BYTES(h->shape) == 4 || *status == 2) {
if (its_len >= 4 && *buf_32 == (u32)pattern && *o_buf_32 == (u32)pattern) {
*buf_32 = (u32)repl;
if (unlikely(its_fuzz(idx, 4, buf, len, status)))
orig_buf, buf, len, 0, status)))
return 1;
}
if (SHAPE_BYTES(h->shape) == 4 || *status == 2) {
if (its_len >= 4 && *buf_32 == (u32)pattern && *o_buf_32 == (u32)pattern) {
*buf_32 = (u32)repl;
if (unlikely(its_fuzz(idx, 4, buf, len, status))) return 1;
*buf_32 = pattern;
}
// reverse encoding
if (do_reverse)
if (unlikely(cmp_extend_encoding(h, SWAP32(pattern), SWAP32(repl), idx,
orig_buf, buf, len, 0, status)))
return 1;
orig_buf, buf, len, 0, status)))
return 1;
}
if (SHAPE_BYTES(h->shape) == 2 || *status == 2) {
if (its_len >= 2 && *buf_16 == (u16)pattern && *o_buf_16 == (u16)pattern) {
*buf_16 = (u16)repl;
if (unlikely(its_fuzz(idx, 2, buf, len, status)))
return 1;
if (unlikely(its_fuzz(idx, 2, buf, len, status))) return 1;
*buf_16 = (u16)pattern;
}
// reverse encoding
if (do_reverse)
if (unlikely(cmp_extend_encoding(h, SWAP16(pattern), SWAP16(repl), idx,
orig_buf, buf, len, 0, status)))
return 1;
orig_buf, buf, len, 0, status)))
return 1;
}
/*if (SHAPE_BYTES(h->shape) == 1 || *status == 2) {
if (its_len >= 2 && *buf_8 == (u8)pattern && *o_buf_8 == (u8)pattern) {
*buf_8 = (u8)repl;
if (unlikely(its_fuzz(idx, 1, buf, len, status)))
return 1;
*buf_16 = (u16)pattern;
}
}*/
return 0;
@ -241,44 +266,49 @@ u8 cmp_extend_encoding(struct cmp_header* h, u64 pattern, u64 repl, u32 idx,
u8 cmp_fuzz(u32 key, u8* orig_buf, u8* buf, u32 len) {
struct cmp_header* h = &cmp_map->headers[key];
u32 i, j, idx;
u32 i, j, idx;
u32 loggeds = h->hits;
if (h->hits > CMP_MAP_H)
loggeds = CMP_MAP_H;
if (h->hits > CMP_MAP_H) loggeds = CMP_MAP_H;
u8 status;
// opt not in the paper
u32 fails = 0;
for (i = 0; i < loggeds; ++i) {
struct cmp_operands* o = &cmp_map->log[key][i];
// opt not in the paper
for (j = 0; j < i; ++j)
if (cmp_map->log[key][j].v0 == o->v0 && cmp_map->log[key][i].v1 == o->v1)
goto cmp_fuzz_next_iter;
for (idx = 0; idx < len && fails < 8; ++idx) {
if (unlikely(cmp_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len, 1, &status)))
if (unlikely(cmp_extend_encoding(h, o->v0, o->v1, idx, orig_buf, buf, len,
1, &status)))
return 1;
if (status == 2) ++fails;
else if (status == 1) break;
if (unlikely(cmp_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len, 1, &status)))
if (status == 2)
++fails;
else if (status == 1)
break;
if (unlikely(cmp_extend_encoding(h, o->v1, o->v0, idx, orig_buf, buf, len,
1, &status)))
return 1;
if (status == 2) ++fails;
else if (status == 1) break;
if (status == 2)
++fails;
else if (status == 1)
break;
}
cmp_fuzz_next_iter:
cmp_fuzz_next_iter:
stage_cur++;
}
return 0;
}
@ -286,67 +316,60 @@ cmp_fuzz_next_iter:
///// Input to State stage
// queue_cur->exec_cksum
u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len, u32 exec_cksum) {
u8 input_to_state_stage(char** argv, u8* orig_buf, u8* buf, u32 len,
u32 exec_cksum) {
its_argv = argv;
if (unlikely(colorization(buf, len, exec_cksum)))
return 1;
if (unlikely(colorization(buf, len, exec_cksum))) return 1;
// do it manually, forkserver clear only trace_bits
memset(cmp_map->headers, 0, sizeof(cmp_map->headers));
if (unlikely(common_fuzz_cmplog_stuff(argv, buf, len)))
return 1;
if (unlikely(common_fuzz_cmplog_stuff(argv, buf, len))) return 1;
u64 orig_hit_cnt, new_hit_cnt;
u64 orig_execs = total_execs;
orig_hit_cnt = queued_paths + unique_crashes;
stage_name = "input-to-state";
stage_short = "its";
stage_max = 0;
stage_cur = 0;
u32 k;
for (k = 0; k < CMP_MAP_W; ++k) {
if (!cmp_map->headers[k].hits)
continue;
if (!cmp_map->headers[k].hits) continue;
if (cmp_map->headers[k].hits > CMP_MAP_H)
stage_max += CMP_MAP_H;
else
stage_max += cmp_map->headers[k].hits;
}
for (k = 0; k < CMP_MAP_W; ++k) {
if (!cmp_map->headers[k].hits)
continue;
if (!cmp_map->headers[k].hits) continue;
cmp_fuzz(k, orig_buf, buf, len);
}
memcpy(buf, orig_buf, len);
new_hit_cnt = queued_paths + unique_crashes;
stage_finds[STAGE_ITS] += new_hit_cnt - orig_hit_cnt;
stage_cycles[STAGE_ITS] += total_execs - orig_execs;
return 0;
}
//// CmpLog forkserver
s32 cmplog_forksrv_pid,
cmplog_child_pid,
cmplog_fsrv_ctl_fd,
cmplog_fsrv_st_fd;
s32 cmplog_forksrv_pid, cmplog_child_pid, cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd;
void init_cmplog_forkserver(char **argv) {
void init_cmplog_forkserver(char** argv) {
static struct itimerval it;
int st_pipe[2], ctl_pipe[2];
@ -475,7 +498,7 @@ void init_cmplog_forkserver(char **argv) {
/* Use a distinctive bitmap signature to tell the parent about execv()
falling through. */
*(u32 *)trace_bits = EXEC_FAIL_SIG;
*(u32*)trace_bits = EXEC_FAIL_SIG;
exit(0);
}
@ -519,7 +542,9 @@ void init_cmplog_forkserver(char **argv) {
}
if (child_timed_out)
FATAL("Timeout while initializing cmplog fork server (adjusting -t may help)");
FATAL(
"Timeout while initializing cmplog fork server (adjusting -t may "
"help)");
if (waitpid(cmplog_forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
@ -598,7 +623,7 @@ void init_cmplog_forkserver(char **argv) {
}
if (*(u32 *)trace_bits == EXEC_FAIL_SIG)
if (*(u32*)trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute target application ('%s')", argv[0]);
if (mem_limit && mem_limit < 500 && uses_asan) {
@ -757,7 +782,7 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"symbolize=0:"
"msan_track_origins=0", 0);
setenv("__AFL_CMPLOG_MODE__", "1", 1);
argv[0] = cmplog_binary;
@ -781,18 +806,21 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
if ((res = write(cmplog_fsrv_ctl_fd, &prev_timed_out, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from cmplog fork server (OOM?)");
RPFATAL(res,
"Unable to request new process from cmplog fork server (OOM?)");
}
if ((res = read(cmplog_fsrv_st_fd, &cmplog_child_pid, 4)) != 4) {
if (stop_soon) return 0;
RPFATAL(res, "Unable to request new process from cmplog fork server (OOM?)");
RPFATAL(res,
"Unable to request new process from cmplog fork server (OOM?)");
}
if (cmplog_child_pid <= 0) FATAL("Cmplog fork server is misbehaving (OOM?)");
if (cmplog_child_pid <= 0)
FATAL("Cmplog fork server is misbehaving (OOM?)");
}
@ -804,7 +832,8 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
setitimer(ITIMER_REAL, &it, NULL);
/* The SIGALRM handler simply kills the cmplog_child_pid and sets child_timed_out. */
/* The SIGALRM handler simply kills the cmplog_child_pid and sets
* child_timed_out. */
if (dumb_mode == 1 || no_forkserver) {
@ -900,7 +929,7 @@ u8 run_cmplog_target(char** argv, u32 timeout) {
u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len) {
u8 fault;
if (post_handler) {
out_buf = post_handler(out_buf, &len);
@ -948,3 +977,4 @@ u8 common_fuzz_cmplog_stuff(char** argv, u8* out_buf, u32 len) {
return 0;
}

View File

@ -532,10 +532,10 @@ u8 fuzz_one_original(char** argv) {
}
if (cmplog_mode) {
if(input_to_state_stage(argv, in_buf, out_buf, len, queue_cur->exec_cksum))
if (input_to_state_stage(argv, in_buf, out_buf, len, queue_cur->exec_cksum))
goto abandon_entry;
}
/* Skip right away if -d is given, if it has not been chosen sufficiently

View File

@ -334,9 +334,9 @@ void show_stats(void) {
/* Lord, forgive me this. */
SAYF(SET_G1 bSTG bLT bH bSTOP cCYA
SAYF(SET_G1 bSTG bLT bH bSTOP cCYA
" process timing " bSTG bH30 bH5 bH bHB bH bSTOP cCYA
" overall results " bSTG bH2 bH2 bRT "\n");
" overall results " bSTG bH2 bH2 bRT "\n");
if (dumb_mode) {
@ -413,9 +413,9 @@ void show_stats(void) {
" uniq hangs : " cRST "%-6s" bSTG bV "\n",
DTD(cur_ms, last_hang_time), tmp);
SAYF(bVR bH bSTOP cCYA
SAYF(bVR bH bSTOP cCYA
" cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
" map coverage " bSTG bH bHT bH20 bH2 bVL "\n");
" map coverage " bSTG bH bHT bH20 bH2 bVL "\n");
/* This gets funny because we want to print several variable-length variables
together, but then cram them into a fixed-width field - so we need to
@ -443,9 +443,9 @@ void show_stats(void) {
SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp);
SAYF(bVR bH bSTOP cCYA
SAYF(bVR bH bSTOP cCYA
" stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
" findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n");
" findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n");
sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
((double)queued_favored) * 100 / queued_paths);
@ -514,7 +514,7 @@ void show_stats(void) {
/* Aaaalmost there... hold on! */
SAYF(bVR bH cCYA bSTOP
SAYF(bVR bH cCYA bSTOP
" fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA
" path geometry " bSTG bH5 bH2 bVL "\n");
@ -598,7 +598,8 @@ void show_stats(void) {
sprintf(tmp, "%s/%s, %s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_PYTHON]),
DI(stage_cycles[STAGE_PYTHON]), DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
DI(stage_cycles[STAGE_CUSTOM_MUTATOR]), DI(stage_finds[STAGE_COLORIZATION]),
DI(stage_cycles[STAGE_CUSTOM_MUTATOR]),
DI(stage_finds[STAGE_COLORIZATION]),
DI(stage_cycles[STAGE_COLORIZATION]), DI(stage_finds[STAGE_ITS]),
DI(stage_cycles[STAGE_ITS]));

View File

@ -72,8 +72,8 @@ static s32 shm_id; /* ID of the SHM region */
static s32 cmplog_shm_id;
#endif
int cmplog_mode;
struct cmp_map* cmp_map;
int cmplog_mode;
struct cmp_map *cmp_map;
/* Get rid of shared memory (atexit handler). */
@ -96,8 +96,7 @@ void remove_shm(void) {
#else
shmctl(shm_id, IPC_RMID, NULL);
if (cmplog_mode)
shmctl(cmplog_shm_id, IPC_RMID, NULL);
if (cmplog_mode) shmctl(cmplog_shm_id, IPC_RMID, NULL);
#endif
}
@ -155,15 +154,16 @@ void setup_shm(unsigned char dumb_mode) {
shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600);
if (shm_id < 0) PFATAL("shmget() failed");
if (cmplog_mode) {
cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map), IPC_CREAT | IPC_EXCL | 0600);
cmplog_shm_id = shmget(IPC_PRIVATE, sizeof(struct cmp_map),
IPC_CREAT | IPC_EXCL | 0600);
if (cmplog_shm_id < 0) PFATAL("shmget() failed");
}
atexit(remove_shm);
shm_str = alloc_printf("%d", shm_id);
@ -176,21 +176,20 @@ void setup_shm(unsigned char dumb_mode) {
if (!dumb_mode) setenv(SHM_ENV_VAR, shm_str, 1);
ck_free(shm_str);
if (cmplog_mode) {
shm_str = alloc_printf("%d", cmplog_shm_id);
if (!dumb_mode) setenv(CMPLOG_SHM_ENV_VAR, shm_str, 1);
ck_free(shm_str);
}
trace_bits = shmat(shm_id, NULL, 0);
if (cmplog_mode)
cmp_map = shmat(cmplog_shm_id, NULL, 0);
if (cmplog_mode) cmp_map = shmat(cmplog_shm_id, NULL, 0);
if (!trace_bits) PFATAL("shmat() failed");