run code formatter

This commit is contained in:
Andrea Fioraldi
2019-09-02 18:49:43 +02:00
parent 2ae4ca91b4
commit b24639d011
57 changed files with 8674 additions and 7125 deletions

View File

@ -159,8 +159,8 @@ afl-gotcpu: src/afl-gotcpu.c $(COMM_HDR) | test_x86
code-format:
./.custom-format.py -i src/*
./.custom-format.py -i include/*
./.custom-format.py -i src/*.c
./.custom-format.py -i include/*.h
./.custom-format.py -i libdislocator/*.c
./.custom-format.py -i libtokencap/*.c
./.custom-format.py -i llvm_mode/*.c

View File

@ -765,3 +765,4 @@ static const u8* main_payload_64 =
"\n";
#endif /* !_HAVE_AFL_AS_H */

View File

@ -118,15 +118,17 @@ struct queue_entry {
};
struct extra_data {
u8* data; /* Dictionary token data */
u32 len; /* Dictionary token length */
u32 hit_cnt; /* Use count in the corpus */
};
};
/* Fuzzing stages */
enum {
/* 00 */ STAGE_FLIP1,
/* 01 */ STAGE_FLIP2,
/* 02 */ STAGE_FLIP4,
@ -146,52 +148,42 @@ enum {
/* 16 */ STAGE_SPLICE,
/* 17 */ STAGE_PYTHON,
/* 18 */ STAGE_CUSTOM_MUTATOR
};
/* Stage value types */
enum {
/* 00 */ STAGE_VAL_NONE,
/* 01 */ STAGE_VAL_LE,
/* 02 */ STAGE_VAL_BE
};
/* Execution status fault codes */
enum {
/* 00 */ FAULT_NONE,
/* 01 */ FAULT_TMOUT,
/* 02 */ FAULT_CRASH,
/* 03 */ FAULT_ERROR,
/* 04 */ FAULT_NOINST,
/* 05 */ FAULT_NOBITS
};
};
/* MOpt:
Lots of globals, but mostly for the status UI and other things where it
really makes no sense to haul them around as function parameters. */
extern u64 limit_time_puppet,
orig_hit_cnt_puppet,
last_limit_time_start,
tmp_pilot_time,
total_pacemaker_time,
total_puppet_find,
temp_puppet_find,
most_time_key,
most_time,
most_execs_key,
most_execs,
old_hit_count;
extern u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start,
tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find,
most_time_key, most_time, most_execs_key, most_execs, old_hit_count;
extern s32 SPLICE_CYCLES_puppet,
limit_time_sig,
key_puppet,
key_module;
extern s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module;
extern double w_init,
w_end,
w_now;
extern double w_init, w_end, w_now;
extern s32 g_now;
extern s32 g_max;
@ -203,15 +195,13 @@ extern s32 g_max;
extern u64 tmp_core_time;
extern s32 swarm_now;
extern double x_now[swarm_num][operator_num],
L_best[swarm_num][operator_num],
eff_best[swarm_num][operator_num],
G_best[operator_num],
v_now[swarm_num][operator_num],
probability_now[swarm_num][operator_num],
extern double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num],
eff_best[swarm_num][operator_num], G_best[operator_num],
v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num],
swarm_fitness[swarm_num];
extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per fuzz stage */
extern u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per
fuzz stage */
stage_finds_puppet_v2[swarm_num][operator_num],
stage_cycles_puppet_v2[swarm_num][operator_num],
stage_cycles_puppet_v3[swarm_num][operator_num],
@ -258,13 +248,13 @@ extern u32 hang_tmout; /* Timeout used for hang det (ms) */
extern u64 mem_limit; /* Memory cap for child (MB) */
extern u8 cal_cycles, /* Calibration cycles defaults */
cal_cycles_long,
debug, /* Debug mode */
cal_cycles_long, debug, /* Debug mode */
python_only; /* Python-only mode */
extern u32 stats_update_freq; /* Stats update frequency (execs) */
enum {
/* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */
/* 01 */ FAST, /* Exponential schedule */
/* 02 */ COE, /* Cut-Off Exponential schedule */
@ -273,6 +263,7 @@ enum {
/* 05 */ EXPLOIT, /* AFL's exploitation-based const. */
POWER_SCHEDULES_NUM
};
extern char* power_names[POWER_SCHEDULES_NUM];
@ -410,8 +401,6 @@ extern s32 cpu_aff; /* Selected CPU core */
extern FILE* plot_file; /* Gnuplot output file */
extern struct queue_entry *queue, /* Fuzzing queue (linked list) */
*queue_cur, /* Current offset within the queue */
*queue_top, /* Top of the list */
@ -429,14 +418,16 @@ extern u32 a_extras_cnt; /* Total number of tokens available */
u8* (*post_handler)(u8* buf, u32* len);
/* hooks for the custom mutator function */
size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed);
size_t (*custom_mutator)(u8* data, size_t size, u8* mutated_out,
size_t max_size, unsigned int seed);
size_t (*pre_save_handler)(u8* data, size_t size, u8** new_data);
/* Interesting values, as per config.h */
extern s8 interesting_8[INTERESTING_8_LEN];
extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
extern s32 interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN];
extern s32
interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN];
/* Python stuff */
#ifdef USE_PYTHON
@ -446,12 +437,14 @@ extern s32 interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_3
extern PyObject* py_module;
enum {
/* 00 */ PY_FUNC_INIT,
/* 01 */ PY_FUNC_FUZZ,
/* 02 */ PY_FUNC_INIT_TRIM,
/* 03 */ PY_FUNC_POST_TRIM,
/* 04 */ PY_FUNC_TRIM,
PY_FUNC_COUNT
};
extern PyObject* py_functions[PY_FUNC_COUNT];
@ -581,25 +574,27 @@ void save_cmdline(u32, char**);
have slight bias. */
static inline u32 UR(u32 limit) {
#ifdef HAVE_ARC4RANDOM
if (fixed_seed) {
return random() % limit;
}
if (fixed_seed) { return random() % limit; }
/* The boundary not being necessarily a power of 2,
we need to ensure the result uniformity. */
return arc4random_uniform(limit);
#else
if (!fixed_seed && unlikely(!rand_cnt--)) {
u32 seed[2];
ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom");
srandom(seed[0]);
rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG);
}
return random() % limit;
#endif
}
/* Find first power of two greater or equal to val (assuming val under
@ -608,7 +603,8 @@ static inline u32 UR(u32 limit) {
static u64 next_p2(u64 val) {
u64 ret = 1;
while (val > ret) ret <<= 1;
while (val > ret)
ret <<= 1;
return ret;
}
@ -626,7 +622,6 @@ static u64 get_cur_time(void) {
}
/* Get unix time in microseconds */
static u64 get_cur_time_us(void) {

View File

@ -31,28 +31,35 @@
/* User-facing macro to sprintf() to a dynamically allocated buffer. */
#define alloc_printf(_str...) ({ \
#define alloc_printf(_str...) \
({ \
\
u8* _tmp; \
s32 _len = snprintf(NULL, 0, _str); \
if (_len < 0) FATAL("Whoa, snprintf() fails?!"); \
_tmp = ck_alloc(_len + 1); \
snprintf((char*)_tmp, _len + 1, _str); \
_tmp; \
\
})
/* Macro to enforce allocation limits as a last-resort defense against
integer overflows. */
#define ALLOC_CHECK_SIZE(_s) do { \
if ((_s) > MAX_ALLOC) \
ABORT("Bad alloc request: %u bytes", (_s)); \
#define ALLOC_CHECK_SIZE(_s) \
do { \
\
if ((_s) > MAX_ALLOC) ABORT("Bad alloc request: %u bytes", (_s)); \
\
} while (0)
/* Macro to check malloc() failures and the like. */
#define ALLOC_CHECK_RESULT(_r, _s) do { \
if (!(_r)) \
ABORT("Out of memory: can't allocate %u bytes", (_s)); \
#define ALLOC_CHECK_RESULT(_r, _s) \
do { \
\
if (!(_r)) ABORT("Out of memory: can't allocate %u bytes", (_s)); \
\
} while (0)
/* Magic tokens used to mark used / freed chunks. */
@ -76,37 +83,53 @@
/* Sanity-checking macros for pointers. */
#define CHECK_PTR(_p) do { \
#define CHECK_PTR(_p) \
do { \
\
if (_p) { \
\
if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) { \
\
if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \
ABORT("Use after free."); \
else ABORT("Corrupted head alloc canary."); \
else \
ABORT("Corrupted head alloc canary."); \
\
} \
\
} \
\
} while (0)
/*
#define CHECK_PTR(_p) do { \
\
if (_p) { \
\
if (ALLOC_C1(_p) ^ ALLOC_MAGIC_C1) {\
\
if (ALLOC_C1(_p) == ALLOC_MAGIC_F) \
ABORT("Use after free."); \
else ABORT("Corrupted head alloc canary."); \
\
} \
if (ALLOC_C2(_p) ^ ALLOC_MAGIC_C2) \
ABORT("Corrupted tail alloc canary."); \
\
} \
\
} while (0)
*/
#define CHECK_PTR_EXPR(_p) ({ \
#define CHECK_PTR_EXPR(_p) \
({ \
\
typeof(_p) _tmp = (_p); \
CHECK_PTR(_tmp); \
_tmp; \
\
})
/* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized
requests. */
@ -130,7 +153,6 @@ static inline void* DFL_ck_alloc_nozero(u32 size) {
}
/* Allocate a buffer, returning zeroed memory. */
static inline void* DFL_ck_alloc(u32 size) {
@ -144,7 +166,6 @@ static inline void* DFL_ck_alloc(u32 size) {
}
/* Free memory, checking for double free and corrupted heap. When DEBUG_BUILD
is set, the old memory will be also clobbered with 0xFF. */
@ -168,7 +189,6 @@ static inline void DFL_ck_free(void* mem) {
}
/* Re-allocate a buffer, checking for issues and zeroing any newly-added tail.
With DEBUG_BUILD, the buffer is always reallocated to a new addresses and the
old memory is clobbered with 0xFF. */
@ -237,14 +257,12 @@ static inline void* DFL_ck_realloc(void* orig, u32 size) {
ALLOC_S(ret) = size;
ALLOC_C2(ret) = ALLOC_MAGIC_C2;
if (size > old_size)
memset(ret + old_size, 0, size - old_size);
if (size > old_size) memset(ret + old_size, 0, size - old_size);
return (void*)ret;
}
/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up
repeated small reallocs without complicating the user code). */
@ -268,7 +286,6 @@ static inline void* DFL_ck_realloc_block(void* orig, u32 size) {
}
/* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */
static inline u8* DFL_ck_strdup(u8* str) {
@ -294,7 +311,6 @@ static inline u8* DFL_ck_strdup(u8* str) {
}
/* Create a buffer with a copy of a memory block. Returns NULL for zero-sized
or NULL inputs. */
@ -318,7 +334,6 @@ static inline void* DFL_ck_memdup(void* mem, u32 size) {
}
/* Create a buffer with a block of text, appending a NUL terminator at the end.
Returns NULL for zero-sized or NULL inputs. */
@ -345,7 +360,6 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) {
}
#ifndef DEBUG_BUILD
/* In non-debug mode, we just do straightforward aliasing of the above functions
@ -372,9 +386,11 @@ static inline u8* DFL_ck_memdup_str(u8* mem, u32 size) {
# define ALLOC_BUCKETS 4096
struct TRK_obj {
void* ptr;
char *file, *func;
u32 line;
};
# ifdef AFL_MAIN
@ -397,7 +413,6 @@ extern u32 TRK_cnt[ALLOC_BUCKETS];
# define TRKH(_ptr) (((((u32)(_ptr)) >> 16) ^ ((u32)(_ptr))) % ALLOC_BUCKETS)
/* Add a new entry to the list of allocated objects. */
static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func,
@ -425,8 +440,8 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func,
/* No space available - allocate more. */
TRK[bucket] = DFL_ck_realloc_block(TRK[bucket],
(TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
TRK[bucket] = DFL_ck_realloc_block(
TRK[bucket], (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
TRK[bucket][i].ptr = ptr;
TRK[bucket][i].file = (char*)file;
@ -437,7 +452,6 @@ static inline void TRK_alloc_buf(void* ptr, const char* file, const char* func,
}
/* Remove entry from the list of allocated objects. */
static inline void TRK_free_buf(void* ptr, const char* file, const char* func,
@ -460,12 +474,11 @@ static inline void TRK_free_buf(void* ptr, const char* file, const char* func,
}
WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)",
func, file, line);
WARNF("ALLOC: Attempt to free non-allocated memory in %s (%s:%u)", func, file,
line);
}
/* Do a final report on all non-deallocated objects. */
static inline void TRK_report(void) {
@ -482,7 +495,6 @@ static inline void TRK_report(void) {
}
/* Simple wrappers for non-debugging functions: */
static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func,
@ -494,7 +506,6 @@ static inline void* TRK_ck_alloc(u32 size, const char* file, const char* func,
}
static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file,
const char* func, u32 line) {
@ -505,7 +516,6 @@ static inline void* TRK_ck_realloc(void* orig, u32 size, const char* file,
}
static inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file,
const char* func, u32 line) {
@ -516,7 +526,6 @@ static inline void* TRK_ck_realloc_block(void* orig, u32 size, const char* file,
}
static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func,
u32 line) {
@ -526,7 +535,6 @@ static inline void* TRK_ck_strdup(u8* str, const char* file, const char* func,
}
static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file,
const char* func, u32 line) {
@ -536,7 +544,6 @@ static inline void* TRK_ck_memdup(void* mem, u32 size, const char* file,
}
static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file,
const char* func, u32 line) {
@ -546,9 +553,8 @@ static inline void* TRK_ck_memdup_str(void* mem, u32 size, const char* file,
}
static inline void TRK_ck_free(void* ptr, const char* file,
const char* func, u32 line) {
static inline void TRK_ck_free(void* ptr, const char* file, const char* func,
u32 line) {
TRK_free_buf(ptr, file, func, line);
DFL_ck_free(ptr);
@ -557,11 +563,9 @@ static inline void TRK_ck_free(void* ptr, const char* file,
/* Aliasing user-facing names to tracking functions: */
#define ck_alloc(_p1) \
TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
# define ck_alloc(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
#define ck_alloc_nozero(_p1) \
TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
#define ck_alloc_nozero(_p1) TRK_ck_alloc(_p1, __FILE__, __FUNCTION__, __LINE__)
# define ck_realloc(_p1, _p2)\
TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
@ -569,8 +573,7 @@ static inline void TRK_ck_free(void* ptr, const char* file,
# define ck_realloc_block(_p1, _p2)\
TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
#define ck_strdup(_p1) \
TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
# define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
# define ck_memdup(_p1, _p2)\
TRK_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
@ -578,9 +581,9 @@ static inline void TRK_ck_free(void* ptr, const char* file,
# define ck_memdup_str(_p1, _p2)\
TRK_ck_memdup_str(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
#define ck_free(_p1) \
TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
# define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
#endif /* ^!DEBUG_BUILD */
#endif /* ! _HAVE_ALLOC_INL_H */

View File

@ -22,60 +22,59 @@
#define ASHMEM_DEVICE "/dev/ashmem"
static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf)
{
static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
int ret = 0;
if (__cmd == IPC_RMID) {
int length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
struct ashmem_pin pin = {0, length};
ret = ioctl(__shmid, ASHMEM_UNPIN, &pin);
close(__shmid);
}
return ret;
}
static inline int shmget (key_t __key, size_t __size, int __shmflg)
{
static inline int shmget(key_t __key, size_t __size, int __shmflg) {
int fd, ret;
char ourkey[11];
fd = open(ASHMEM_DEVICE, O_RDWR);
if (fd < 0)
return fd;
if (fd < 0) return fd;
sprintf(ourkey, "%d", __key);
ret = ioctl(fd, ASHMEM_SET_NAME, ourkey);
if (ret < 0)
goto error;
if (ret < 0) goto error;
ret = ioctl(fd, ASHMEM_SET_SIZE, __size);
if (ret < 0)
goto error;
if (ret < 0) goto error;
return fd;
error:
close(fd);
return ret;
}
static inline void *shmat (int __shmid, const void *__shmaddr, int __shmflg)
{
static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) {
int size;
void *ptr;
size = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
if (size < 0) {
return NULL;
}
if (size < 0) { return NULL; }
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, __shmid, 0);
if (ptr == MAP_FAILED) {
return NULL;
}
if (ptr == MAP_FAILED) { return NULL; }
return ptr;
}
#endif

View File

@ -4,3 +4,4 @@
void detect_file_args(char **argv, u8 *prog_in);
#endif

View File

@ -349,7 +349,6 @@
#define AFL_QEMU_NOT_ZERO
/* Uncomment this to use inferior block-coverage-based instrumentation. Note
that you need to recompile the target binary for this to have any effect: */
@ -368,3 +367,4 @@
// #define IGNORE_FINDS
#endif /* ! _HAVE_CONFIG_H */

View File

@ -168,84 +168,118 @@
/* Show a prefixed warning. */
#define WARNF(x...) do { \
#define WARNF(x...) \
do { \
\
SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \
SAYF(cRST "\n"); \
\
} while (0)
/* Show a prefixed "doing something" message. */
#define ACTF(x...) do { \
#define ACTF(x...) \
do { \
\
SAYF(cLBL "[*] " cRST x); \
SAYF(cRST "\n"); \
\
} while (0)
/* Show a prefixed "success" message. */
#define OKF(x...) do { \
#define OKF(x...) \
do { \
\
SAYF(cLGN "[+] " cRST x); \
SAYF(cRST "\n"); \
\
} while (0)
/* Show a prefixed fatal error message (not used in afl). */
#define BADF(x...) do { \
#define BADF(x...) \
do { \
\
SAYF(cLRD "\n[-] " cRST x); \
SAYF(cRST "\n"); \
\
} while (0)
/* Die with a verbose non-OS fatal error message. */
#define FATAL(x...) do { \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
cRST x); \
SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", \
__FUNCTION__, __FILE__, __LINE__); \
#define FATAL(x...) \
do { \
\
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \
"\n[-] PROGRAM ABORT : " cRST x); \
SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \
__FILE__, __LINE__); \
exit(1); \
\
} while (0)
/* Die by calling abort() to provide a core dump. */
#define ABORT(x...) do { \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] PROGRAM ABORT : " \
cRST x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", \
__FUNCTION__, __FILE__, __LINE__); \
#define ABORT(x...) \
do { \
\
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \
"\n[-] PROGRAM ABORT : " cRST x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \
__FILE__, __LINE__); \
abort(); \
\
} while (0)
/* Die while also including the output of perror(). */
#define PFATAL(x...) do { \
#define PFATAL(x...) \
do { \
\
fflush(stdout); \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD "\n[-] SYSTEM ERROR : " \
cRST x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", \
__FUNCTION__, __FILE__, __LINE__); \
SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \
"\n[-] SYSTEM ERROR : " cRST x); \
SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", __FUNCTION__, \
__FILE__, __LINE__); \
SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \
exit(1); \
\
} while (0)
/* Die with FAULT() or PFAULT() depending on the value of res (used to
interpret different failure modes for read(), write(), etc). */
#define RPFATAL(res, x...) do { \
if (res < 0) PFATAL(x); else FATAL(x); \
#define RPFATAL(res, x...) \
do { \
\
if (res < 0) \
PFATAL(x); \
else \
FATAL(x); \
\
} while (0)
/* Error-checking versions of read() and write() that call RPFATAL() as
appropriate. */
#define ck_write(fd, buf, len, fn) do { \
#define ck_write(fd, buf, len, fn) \
do { \
\
u32 _len = (len); \
s32 _res = write(fd, buf, _len); \
if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \
\
} while (0)
#define ck_read(fd, buf, len, fn) do { \
#define ck_read(fd, buf, len, fn) \
do { \
\
u32 _len = (len); \
s32 _res = read(fd, buf, _len); \
if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \
\
} while (0)
#endif /* ! _HAVE_DEBUG_H */

View File

@ -21,5 +21,5 @@ void init_forkserver(char **argv);
# define MSG_ULIMIT_USAGE " ( ulimit -Sd $[%llu << 10];"
#endif /* ^RLIMIT_AS */
#endif

View File

@ -102,3 +102,4 @@ static inline u32 hash32(const void* key, u32 len, u32 seed) {
#endif /* ^__x86_64__ */
#endif /* !_HAVE_HASH_H */

View File

@ -5,3 +5,4 @@ void setup_shm(unsigned char dumb_mode);
void remove_shm(void);
#endif

View File

@ -56,16 +56,21 @@ typedef int64_t s64;
# define MAX(_a, _b) ((_a) > (_b) ? (_a) : (_b))
#endif /* !MIN */
#define SWAP16(_x) ({ \
#define SWAP16(_x) \
({ \
\
u16 _ret = (_x); \
(u16)((_ret << 8) | (_ret >> 8)); \
\
})
#define SWAP32(_x) ({ \
#define SWAP32(_x) \
({ \
\
u32 _ret = (_x); \
(u32)((_ret << 24) | (_ret >> 24) | \
((_ret << 8) & 0x00FF0000) | \
(u32)((_ret << 24) | (_ret >> 24) | ((_ret << 8) & 0x00FF0000) | \
((_ret >> 8) & 0x0000FF00)); \
\
})
#ifdef AFL_LLVM_PASS
@ -77,8 +82,7 @@ typedef int64_t s64;
#define STRINGIFY_INTERNAL(x) #x
#define STRINGIFY(x) STRINGIFY_INTERNAL(x)
#define MEM_BARRIER() \
__asm__ volatile("" ::: "memory")
#define MEM_BARRIER() __asm__ volatile("" ::: "memory")
#if __GNUC__ < 6
# define likely(_x) (_x)
@ -89,3 +93,4 @@ typedef int64_t s64;
#endif
#endif /* ! _HAVE_TYPES_H */

View File

@ -38,23 +38,35 @@
/* Error / message handling: */
#define DEBUGF(_x...) do { \
#define DEBUGF(_x...) \
do { \
\
if (alloc_verbose) { \
\
if (++call_depth == 1) { \
\
fprintf(stderr, "[AFL] " _x); \
fprintf(stderr, "\n"); \
\
} \
call_depth--; \
\
} \
\
} while (0)
#define FATAL(_x...) do { \
#define FATAL(_x...) \
do { \
\
if (++call_depth == 1) { \
\
fprintf(stderr, "*** [AFL] " _x); \
fprintf(stderr, " ***\n"); \
abort(); \
\
} \
call_depth--; \
\
} while (0)
/* Macro to count the number of pages needed to store a buffer: */
@ -80,7 +92,6 @@ static __thread size_t total_mem; /* Currently allocated mem */
static __thread u32 call_depth; /* To avoid recursion via fprintf() */
/* This is the main alloc function. It allocates one page more than necessary,
sets that tailing page to PROT_NONE, and then increments the return address
so that it is right-aligned to that boundary. Since it always uses mmap(),
@ -90,14 +101,11 @@ static void* __dislocator_alloc(size_t len) {
void* ret;
if (total_mem + len > max_mem || total_mem + len < total_mem) {
if (hard_fail)
FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024);
if (hard_fail) FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024);
DEBUGF("total allocs exceed %u MB, returning NULL",
max_mem / 1024 / 1024);
DEBUGF("total allocs exceed %u MB, returning NULL", max_mem / 1024 / 1024);
return NULL;
@ -142,7 +150,6 @@ static void* __dislocator_alloc(size_t len) {
}
/* The "user-facing" wrapper for calloc(). This just checks for overflows and
displays debug messages if requested. */
@ -157,8 +164,11 @@ void* calloc(size_t elem_len, size_t elem_cnt) {
if (elem_cnt && len / elem_cnt != elem_len) {
if (no_calloc_over) {
DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len, elem_cnt);
DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len,
elem_cnt);
return NULL;
}
FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt);
@ -174,7 +184,6 @@ void* calloc(size_t elem_len, size_t elem_cnt) {
}
/* The wrapper for malloc(). Roughly the same, also clobbers the returned
memory (unlike calloc(), malloc() is not guaranteed to return zeroed
memory). */
@ -193,7 +202,6 @@ void* malloc(size_t len) {
}
/* The wrapper for free(). This simply marks the entire region as PROT_NONE.
If the region is already freed, the code will segfault during the attempt to
read the canary. Not very graceful, but works, right? */
@ -224,7 +232,6 @@ void free(void* ptr) {
}
/* Realloc is pretty straightforward, too. We forcibly reallocate the buffer,
move data, and then free (aka mprotect()) the original one. */
@ -249,7 +256,6 @@ void* realloc(void* ptr, size_t len) {
}
__attribute__((constructor)) void __dislocator_init(void) {
u8* tmp = getenv("AFL_LD_LIMIT_MB");
@ -266,3 +272,4 @@ __attribute__((constructor)) void __dislocator_init(void) {
no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER");
}

View File

@ -30,20 +30,16 @@
# error "Sorry, this library is Linux-specific for now!"
#endif /* !__linux__ */
/* Mapping data and such */
#define MAX_MAPPINGS 1024
static struct mapping {
void *st, *en;
} __tokencap_ro[MAX_MAPPINGS];
static struct mapping { void *st, *en; } __tokencap_ro[MAX_MAPPINGS];
static u32 __tokencap_ro_cnt;
static u8 __tokencap_ro_loaded;
static FILE* __tokencap_out_file;
/* Identify read-only regions in memory. Only parameters that fall into these
ranges are worth dumping when passed to strcmp() and so on. Read-write
regions are far more likely to contain user input instead. */
@ -76,7 +72,6 @@ static void __tokencap_load_mappings(void) {
}
/* Check an address against the list of read-only mappings. */
static u8 __tokencap_is_ro(const void* ptr) {
@ -92,7 +87,6 @@ static u8 __tokencap_is_ro(const void* ptr) {
}
/* Dump an interesting token to output file, quoting and escaping it
properly. */
@ -120,9 +114,7 @@ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) {
pos += 4;
break;
default:
buf[pos++] = ptr[i];
default: buf[pos++] = ptr[i];
}
@ -134,7 +126,6 @@ static void __tokencap_dump(const u8* ptr, size_t len, u8 is_text) {
}
/* Replacements for strcmp(), memcmp(), and so on. Note that these will be used
only if the target is compiled with -fno-builtins and linked dynamically. */
@ -151,13 +142,13 @@ int strcmp(const char* str1, const char* str2) {
if (c1 != c2) return (c1 > c2) ? 1 : -1;
if (!c1) return 0;
str1++; str2++;
str1++;
str2++;
}
}
#undef strncmp
int strncmp(const char* str1, const char* str2, size_t len) {
@ -171,7 +162,8 @@ int strncmp(const char* str1, const char* str2, size_t len) {
if (!c1) return 0;
if (c1 != c2) return (c1 > c2) ? 1 : -1;
str1++; str2++;
str1++;
str2++;
}
@ -179,7 +171,6 @@ int strncmp(const char* str1, const char* str2, size_t len) {
}
#undef strcasecmp
int strcasecmp(const char* str1, const char* str2) {
@ -193,13 +184,13 @@ int strcasecmp(const char* str1, const char* str2) {
if (c1 != c2) return (c1 > c2) ? 1 : -1;
if (!c1) return 0;
str1++; str2++;
str1++;
str2++;
}
}
#undef strncasecmp
int strncasecmp(const char* str1, const char* str2, size_t len) {
@ -213,7 +204,8 @@ int strncasecmp(const char* str1, const char* str2, size_t len) {
if (!c1) return 0;
if (c1 != c2) return (c1 > c2) ? 1 : -1;
str1++; str2++;
str1++;
str2++;
}
@ -221,7 +213,6 @@ int strncasecmp(const char* str1, const char* str2, size_t len) {
}
#undef memcmp
int memcmp(const void* mem1, const void* mem2, size_t len) {
@ -233,7 +224,8 @@ int memcmp(const void* mem1, const void* mem2, size_t len) {
unsigned char c1 = *(const char*)mem1, c2 = *(const char*)mem2;
if (c1 != c2) return (c1 > c2) ? 1 : -1;
mem1++; mem2++;
mem1++;
mem2++;
}
@ -241,7 +233,6 @@ int memcmp(const void* mem1, const void* mem2, size_t len) {
}
#undef strstr
char* strstr(const char* haystack, const char* needle) {
@ -249,14 +240,15 @@ char* strstr(const char* haystack, const char* needle) {
if (__tokencap_is_ro(haystack))
__tokencap_dump(haystack, strlen(haystack), 1);
if (__tokencap_is_ro(needle))
__tokencap_dump(needle, strlen(needle), 1);
if (__tokencap_is_ro(needle)) __tokencap_dump(needle, strlen(needle), 1);
do {
const char* n = needle;
const char* h = haystack;
while(*n && *h && *n == *h) n++, h++;
while (*n && *h && *n == *h)
n++, h++;
if (!*n) return (char*)haystack;
@ -266,7 +258,6 @@ char* strstr(const char* haystack, const char* needle) {
}
#undef strcasestr
char* strcasestr(const char* haystack, const char* needle) {
@ -274,15 +265,15 @@ char* strcasestr(const char* haystack, const char* needle) {
if (__tokencap_is_ro(haystack))
__tokencap_dump(haystack, strlen(haystack), 1);
if (__tokencap_is_ro(needle))
__tokencap_dump(needle, strlen(needle), 1);
if (__tokencap_is_ro(needle)) __tokencap_dump(needle, strlen(needle), 1);
do {
const char* n = needle;
const char* h = haystack;
while(*n && *h && tolower(*n) == tolower(*h)) n++, h++;
while (*n && *h && tolower(*n) == tolower(*h))
n++, h++;
if (!*n) return (char*)haystack;
@ -292,7 +283,6 @@ char* strcasestr(const char* haystack, const char* needle) {
}
/* Init code to open the output file (or default to stderr). */
__attribute__((constructor)) void __tokencap_init(void) {

View File

@ -37,6 +37,7 @@ static cl::opt<bool> LoopHeadOpt("loophead", cl::desc("LoopHead"),
cl::init(false));
namespace {
struct InsTrim : public ModulePass {
protected:
@ -47,29 +48,38 @@ namespace {
int total_instr = 0;
unsigned int genLabel() {
return generator() & (MAP_SIZE - 1);
}
public:
static char ID;
InsTrim() : ModulePass(ID), generator(0) {
char *instWhiteListFilename = getenv("AFL_LLVM_WHITELIST");
if (instWhiteListFilename) {
std::string line;
std::ifstream fileStream;
fileStream.open(instWhiteListFilename);
if (!fileStream)
report_fatal_error("Unable to open AFL_LLVM_WHITELIST");
if (!fileStream) report_fatal_error("Unable to open AFL_LLVM_WHITELIST");
getline(fileStream, line);
while (fileStream) {
myWhitelist.push_back(line);
getline(fileStream, line);
}
}
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
}
#if LLVM_VERSION_MAJOR < 4
@ -78,15 +88,22 @@ namespace {
StringRef
#endif
getPassName() const override {
return "InstTrim Instrumentation";
}
bool runOnModule(Module &M) override {
char be_quiet = 0;
if (isatty(2) && !getenv("AFL_QUIET")) {
SAYF(cCYA "LLVMInsTrim" VERSION cRST " by csienslab\n");
} else be_quiet = 1;
} else
be_quiet = 1;
#if LLVM_VERSION_MAJOR < 9
char *neverZero_counters_str;
@ -94,8 +111,11 @@ namespace {
OKF("LLVM neverZero activated (by hexcoder)\n");
#endif
if (getenv("AFL_LLVM_INSTRIM_LOOPHEAD") != NULL || getenv("LOOPHEAD") != NULL) {
if (getenv("AFL_LLVM_INSTRIM_LOOPHEAD") != NULL ||
getenv("LOOPHEAD") != NULL) {
LoopHeadOpt = true;
}
// this is our default
@ -105,9 +125,13 @@ namespace {
char* inst_ratio_str = getenv("AFL_INST_RATIO");
unsigned int inst_ratio = 100;
if (inst_ratio_str) {
if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || !inst_ratio || inst_ratio > 100)
FATAL("Bad value of AFL_INST_RATIO (must be between 1 and 100)");
if (sscanf(inst_ratio_str, "%u", &inst_ratio) != 1 || !inst_ratio ||
inst_ratio > 100) FATAL("Bad value of AFL_INST_RATIO (must be between 1
and 100)");
}
*/
LLVMContext &C = M.getContext();
@ -119,160 +143,211 @@ namespace {
nullptr, "__afl_area_ptr");
GlobalVariable *OldPrev = new GlobalVariable(
M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc",
0, GlobalVariable::GeneralDynamicTLSModel, 0, false);
M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", 0,
GlobalVariable::GeneralDynamicTLSModel, 0, false);
u64 total_rs = 0;
u64 total_hs = 0;
for (Function &F : M) {
if (!F.size()) {
continue;
}
if (!F.size()) { continue; }
if (!myWhitelist.empty()) {
bool instrumentBlock = false;
DebugLoc Loc;
StringRef instFilename;
for (auto &BB : F) {
BasicBlock::iterator IP = BB.getFirstInsertionPt();
IRBuilder<> IRB(&(*IP));
if (!Loc)
Loc = IP->getDebugLoc();
if (!Loc) Loc = IP->getDebugLoc();
}
if (Loc) {
DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode());
unsigned int instLine = cDILoc->getLine();
instFilename = cDILoc->getFilename();
if (instFilename.str().empty()) {
/* If the original location is empty, try using the inlined location */
/* If the original location is empty, try using the inlined location
*/
DILocation *oDILoc = cDILoc->getInlinedAt();
if (oDILoc) {
instFilename = oDILoc->getFilename();
instLine = oDILoc->getLine();
}
}
/* Continue only if we know where we actually are */
if (!instFilename.str().empty()) {
for (std::list<std::string>::iterator it = myWhitelist.begin(); it != myWhitelist.end(); ++it) {
for (std::list<std::string>::iterator it = myWhitelist.begin();
it != myWhitelist.end(); ++it) {
if (instFilename.str().length() >= it->length()) {
if (instFilename.str().compare(instFilename.str().length() - it->length(), it->length(), *it) == 0) {
if (instFilename.str().compare(
instFilename.str().length() - it->length(),
it->length(), *it) == 0) {
instrumentBlock = true;
break;
}
}
}
}
}
/* Either we couldn't figure out our location or the location is
* not whitelisted, so we skip instrumentation. */
if (!instrumentBlock) {
if (!instFilename.str().empty())
SAYF(cYEL "[!] " cBRI "Not in whitelist, skipping %s ...\n", instFilename.str().c_str());
SAYF(cYEL "[!] " cBRI "Not in whitelist, skipping %s ...\n",
instFilename.str().c_str());
else
SAYF(cYEL "[!] " cBRI "No filename information found, skipping it");
continue;
}
}
std::unordered_set<BasicBlock *> MS;
if (!MarkSetOpt) {
for (auto &BB : F) {
MS.insert(&BB);
}
total_rs += F.size();
} else {
auto Result = markNodes(&F);
auto RS = Result.first;
auto HS = Result.second;
MS.insert(RS.begin(), RS.end());
if (!LoopHeadOpt) {
MS.insert(HS.begin(), HS.end());
total_rs += MS.size();
} else {
DenseSet<std::pair<BasicBlock *, BasicBlock *>> EdgeSet;
DominatorTreeWrapperPass *DTWP = &getAnalysis<DominatorTreeWrapperPass>(F);
DominatorTreeWrapperPass * DTWP =
&getAnalysis<DominatorTreeWrapperPass>(F);
auto DT = &DTWP->getDomTree();
total_rs += RS.size();
total_hs += HS.size();
for (BasicBlock *BB : HS) {
bool Inserted = false;
for (auto BI = pred_begin(BB), BE = pred_end(BB);
BI != BE; ++BI
) {
for (auto BI = pred_begin(BB), BE = pred_end(BB); BI != BE; ++BI) {
auto Edge = BasicBlockEdge(*BI, BB);
if (Edge.isSingleEdge() && DT->dominates(Edge, BB)) {
EdgeSet.insert({*BI, BB});
Inserted = true;
break;
}
}
if (!Inserted) {
MS.insert(BB);
total_rs += 1;
total_hs -= 1;
}
}
for (auto I = EdgeSet.begin(), E = EdgeSet.end(); I != E; ++I) {
auto PredBB = I->first;
auto SuccBB = I->second;
auto NewBB = SplitBlockPredecessors(SuccBB, {PredBB}, ".split",
DT, nullptr,
auto NewBB =
SplitBlockPredecessors(SuccBB, {PredBB}, ".split", DT, nullptr,
#if LLVM_VERSION_MAJOR >= 8
nullptr,
#endif
false);
MS.insert(NewBB);
}
}
auto *EBB = &F.getEntryBlock();
if (succ_begin(EBB) == succ_end(EBB)) {
MS.insert(EBB);
total_rs += 1;
}
for (BasicBlock &BB : F) {
if (MS.find(&BB) == MS.end()) {
continue;
}
if (MS.find(&BB) == MS.end()) { continue; }
IRBuilder<> IRB(&*BB.getFirstInsertionPt());
IRB.CreateStore(ConstantInt::get(Int32Ty, genLabel()), OldPrev);
}
}
for (BasicBlock &BB : F) {
auto PI = pred_begin(&BB);
auto PE = pred_end(&BB);
if (MarkSetOpt && MS.find(&BB) == MS.end()) {
continue;
}
if (MarkSetOpt && MS.find(&BB) == MS.end()) { continue; }
IRBuilder<> IRB(&*BB.getFirstInsertionPt());
Value * L = NULL;
if (PI == PE) {
L = ConstantInt::get(Int32Ty, genLabel());
} else {
auto *PN = PHINode::Create(Int32Ty, 0, "", &*BB.begin());
DenseMap<BasicBlock *, unsigned> PredMap;
for (auto PI = pred_begin(&BB), PE = pred_end(&BB);
PI != PE; ++PI
) {
for (auto PI = pred_begin(&BB), PE = pred_end(&BB); PI != PE; ++PI) {
BasicBlock *PBB = *PI;
auto It = PredMap.insert({PBB, genLabel()});
unsigned Label = It.first->second;
PN->addIncoming(ConstantInt::get(Int32Ty, Label), PBB);
}
L = PN;
}
/* Load prev_loc */
@ -283,7 +358,8 @@ namespace {
/* Load SHM pointer */
LoadInst *MapPtr = IRB.CreateLoad(CovMapPtr);
MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
Value *MapPtrIdx = IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, L));
Value *MapPtrIdx =
IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, L));
/* Update bitmap */
LoadInst *Counter = IRB.CreateLoad(MapPtrIdx);
@ -292,13 +368,18 @@ namespace {
Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1));
#if LLVM_VERSION_MAJOR < 9
if (neverZero_counters_str != NULL) // with llvm 9 we make this the default as the bug in llvm is then fixed
if (neverZero_counters_str !=
NULL) // with llvm 9 we make this the default as the bug in llvm is
// then fixed
#else
if (1) // with llvm 9 we make this the default as the bug in llvm is then fixed
if (1) // with llvm 9 we make this the default as the bug in llvm is
// then fixed
#endif
{
/* hexcoder: Realize a counter that skips zero during overflow.
* Once this counter reaches its maximum value, it next increments to 1
* Once this counter reaches its maximum value, it next increments to
* 1
*
* Instead of
* Counter + 1 -> Counter
@ -309,35 +390,49 @@ namespace {
auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0));
auto carry = IRB.CreateZExt(cf, Int8Ty);
Incr = IRB.CreateAdd(Incr, carry);
}
IRB.CreateStore(Incr, MapPtrIdx)->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
IRB.CreateStore(Incr, MapPtrIdx)
->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
/* Set prev_loc to cur_loc >> 1 */
/*
StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, L >> 1), OldPrev);
Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, L >> 1),
OldPrev); Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C,
None));
*/
total_instr++;
}
}
OKF("Instrumented %u locations (%llu, %llu) (%s mode)\n"/*", ratio %u%%)."*/,
total_instr, total_rs, total_hs,
getenv("AFL_HARDEN") ? "hardened" :
((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) ?
"ASAN/MSAN" : "non-hardened")/*, inst_ratio*/);
return false;
}
OKF("Instrumented %u locations (%llu, %llu) (%s mode)\n" /*", ratio
%u%%)."*/
,
total_instr, total_rs, total_hs,
getenv("AFL_HARDEN")
? "hardened"
: ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN"))
? "ASAN/MSAN"
: "non-hardened") /*, inst_ratio*/);
return false;
}
}; // end of struct InsTrim
} // end of anonymous namespace
char InsTrim::ID = 0;
static void registerAFLPass(const PassManagerBuilder &,
legacy::PassManagerBase &PM) {
PM.add(new InsTrim());
}
static RegisterStandardPasses RegisterAFLPass(
@ -345,3 +440,4 @@ static RegisterStandardPasses RegisterAFLPass(
static RegisterStandardPasses RegisterAFLPass0(
PassManagerBuilder::EP_EnabledOnOptLevel0, registerAFLPass);

View File

@ -25,117 +25,159 @@ std::set<uint32_t> Marked , Markabove;
std::vector<std::vector<uint32_t> > Succs, Preds;
void reset() {
LMap.clear();
Blocks.clear();
Marked.clear();
Markabove.clear();
}
uint32_t start_point;
void labelEachBlock(Function *F) {
// Fake single endpoint;
LMap[NULL] = Blocks.size();
Blocks.push_back(NULL);
// Assign the unique LabelID to each block;
for (auto I = F->begin(), E = F->end(); I != E; ++I) {
BasicBlock *BB = &*I;
LMap[BB] = Blocks.size();
Blocks.push_back(BB);
}
start_point = LMap[&F->getEntryBlock()];
}
void buildCFG(Function *F) {
Succs.resize(Blocks.size());
Preds.resize(Blocks.size());
for (size_t i = 0; i < Succs.size(); i++) {
Succs[i].clear();
Preds[i].clear();
}
// uint32_t FakeID = 0;
for (auto S = F->begin(), E = F->end(); S != E; ++S) {
BasicBlock *BB = &*S;
uint32_t MyID = LMap[BB];
// if (succ_begin(BB) == succ_end(BB)) {
// Succs[MyID].push_back(FakeID);
// Marked.insert(MyID);
//}
for (auto I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
Succs[MyID].push_back(LMap[*I]);
}
}
}
std::vector<std::vector<uint32_t> > tSuccs;
std::vector<bool> tag, indfs;
void DFStree(size_t now_id) {
if (tag[now_id]) return;
tag[now_id] = true;
indfs[now_id] = true;
for (auto succ : tSuccs[now_id]) {
if (tag[succ] and indfs[succ]) {
Marked.insert(succ);
Markabove.insert(succ);
continue;
}
Succs[now_id].push_back(succ);
Preds[succ].push_back(now_id);
DFStree(succ);
}
indfs[now_id] = false;
}
void turnCFGintoDAG(Function *F) {
tSuccs = Succs;
tag.resize(Blocks.size());
indfs.resize(Blocks.size());
for (size_t i = 0; i < Blocks.size(); ++i) {
Succs[i].clear();
tag[i] = false;
indfs[i] = false;
}
DFStree(start_point);
for (size_t i = 0; i < Blocks.size(); ++i)
if (Succs[i].empty()) {
Succs[i].push_back(0);
Preds[0].push_back(i);
}
}
uint32_t timeStamp;
namespace DominatorTree {
std::vector<std::vector<uint32_t> > cov;
std::vector<uint32_t> dfn, nfd, par, sdom, idom, mom, mn;
bool Compare(uint32_t u, uint32_t v) {
return dfn[u] < dfn[v];
}
uint32_t eval(uint32_t u) {
if (mom[u] == u) return u;
uint32_t res = eval(mom[u]);
if(Compare(sdom[mn[mom[u]]] , sdom[mn[u]])) {
mn[u] = mn[mom[u]];
}
if (Compare(sdom[mn[mom[u]]], sdom[mn[u]])) { mn[u] = mn[mom[u]]; }
return mom[u] = res;
}
void DFS(uint32_t now) {
timeStamp += 1;
dfn[now] = timeStamp;
nfd[timeStamp - 1] = now;
for (auto succ : Succs[now]) {
if (dfn[succ] == 0) {
par[succ] = now;
DFS(succ);
}
}
}
void DominatorTree(Function *F) {
if (Blocks.empty()) return;
uint32_t s = start_point;
@ -150,76 +192,94 @@ namespace DominatorTree{
idom.resize(Blocks.size());
for (uint32_t i = 0; i < Blocks.size(); i++) {
dfn[i] = 0;
nfd[i] = Blocks.size();
cov[i].clear();
idom[i] = mom[i] = mn[i] = sdom[i] = i;
}
timeStamp = 0;
DFS(s);
for (uint32_t i = Blocks.size() - 1; i >= 1u; i--) {
uint32_t now = nfd[i];
if( now == Blocks.size() ) {
continue;
}
if (now == Blocks.size()) { continue; }
for (uint32_t pre : Preds[now]) {
if (dfn[pre]) {
eval(pre);
if( Compare(sdom[mn[pre]], sdom[now]) ) {
sdom[now] = sdom[mn[pre]];
}
if (Compare(sdom[mn[pre]], sdom[now])) { sdom[now] = sdom[mn[pre]]; }
}
}
cov[sdom[now]].push_back(now);
mom[now] = par[now];
for (uint32_t x : cov[par[now]]) {
eval(x);
if (Compare(sdom[mn[x]], par[now])) {
idom[x] = mn[x];
} else {
idom[x] = par[now];
}
}
}
for (uint32_t i = 1; i < Blocks.size(); i += 1) {
uint32_t now = nfd[i];
if( now == Blocks.size() ) {
continue;
if (now == Blocks.size()) { continue; }
if (idom[now] != sdom[now]) idom[now] = idom[idom[now]];
}
if(idom[now] != sdom[now])
idom[now] = idom[idom[now]];
}
}
} // End of DominatorTree
} // namespace DominatorTree
std::vector<uint32_t> Visited, InStack;
std::vector<uint32_t> TopoOrder, InDeg;
std::vector<std::vector<uint32_t> > t_Succ, t_Pred;
void Go(uint32_t now, uint32_t tt) {
if (now == tt) return;
Visited[now] = InStack[now] = timeStamp;
for (uint32_t nxt : Succs[now]) {
if (Visited[nxt] == timeStamp and InStack[nxt] == timeStamp) {
Marked.insert(nxt);
}
t_Succ[now].push_back(nxt);
t_Pred[nxt].push_back(now);
InDeg[nxt] += 1;
if(Visited[nxt] == timeStamp) {
continue;
}
if (Visited[nxt] == timeStamp) { continue; }
Go(nxt, tt);
}
InStack[now] = 0;
}
void TopologicalSort(uint32_t ss, uint32_t tt) {
timeStamp += 1;
Go(ss, tt);
@ -228,75 +288,110 @@ void TopologicalSort(uint32_t ss, uint32_t tt) {
std::queue<uint32_t> wait;
wait.push(ss);
while (not wait.empty()) {
uint32_t now = wait.front(); wait.pop();
uint32_t now = wait.front();
wait.pop();
TopoOrder.push_back(now);
for (uint32_t nxt : t_Succ[now]) {
InDeg[nxt] -= 1;
if(InDeg[nxt] == 0u) {
wait.push(nxt);
}
if (InDeg[nxt] == 0u) { wait.push(nxt); }
}
}
}
std::vector<std::set<uint32_t> > NextMarked;
bool Indistinguish(uint32_t node1, uint32_t node2) {
if (NextMarked[node1].size() > NextMarked[node2].size()) {
uint32_t _swap = node1;
node1 = node2;
node2 = _swap;
}
for (uint32_t x : NextMarked[node1]) {
if( NextMarked[node2].find(x) != NextMarked[node2].end() ) {
return true;
}
if (NextMarked[node2].find(x) != NextMarked[node2].end()) { return true; }
}
return false;
}
void MakeUniq(uint32_t now) {
bool StopFlag = false;
if (Marked.find(now) == Marked.end()) {
for (uint32_t pred1 : t_Pred[now]) {
for (uint32_t pred2 : t_Pred[now]) {
if (pred1 == pred2) continue;
if (Indistinguish(pred1, pred2)) {
Marked.insert(now);
StopFlag = true;
break;
}
}
if (StopFlag) {
break;
}
if (StopFlag) { break; }
}
}
if (Marked.find(now) != Marked.end()) {
NextMarked[now].insert(now);
} else {
for (uint32_t pred : t_Pred[now]) {
for (uint32_t x : NextMarked[pred]) {
NextMarked[now].insert(x);
}
}
}
}
void MarkSubGraph(uint32_t ss, uint32_t tt) {
TopologicalSort(ss, tt);
if (TopoOrder.empty()) return;
for (uint32_t i : TopoOrder) {
NextMarked[i].clear();
}
NextMarked[TopoOrder[0]].insert(TopoOrder[0]);
for (uint32_t i = 1; i < TopoOrder.size(); i += 1) {
MakeUniq(TopoOrder[i]);
}
}
void MarkVertice(Function *F) {
uint32_t s = start_point;
InDeg.resize(Blocks.size());
@ -307,25 +402,31 @@ void MarkVertice(Function *F) {
NextMarked.resize(Blocks.size());
for (uint32_t i = 0; i < Blocks.size(); i += 1) {
Visited[i] = InStack[i] = InDeg[i] = 0;
t_Succ[i].clear();
t_Pred[i].clear();
}
timeStamp = 0;
uint32_t t = 0;
// MarkSubGraph(s, t);
// return;
while (s != t) {
MarkSubGraph(DominatorTree::idom[t], t);
t = DominatorTree::idom[t];
}
}
// return {marked nodes}
std::pair<std::vector<BasicBlock *>,
std::vector<BasicBlock *> >markNodes(Function *F) {
std::pair<std::vector<BasicBlock *>, std::vector<BasicBlock *> > markNodes(
Function *F) {
assert(F->size() > 0 && "Function can not be empty");
reset();
@ -337,19 +438,28 @@ std::pair<std::vector<BasicBlock *>,
std::vector<BasicBlock *> Result, ResultAbove;
for (uint32_t x : Markabove) {
auto it = Marked.find(x);
if( it != Marked.end() )
Marked.erase( it );
if( x )
ResultAbove.push_back(Blocks[x]);
if (it != Marked.end()) Marked.erase(it);
if (x) ResultAbove.push_back(Blocks[x]);
}
for (uint32_t x : Marked) {
if (x == 0) {
continue;
} else {
Result.push_back(Blocks[x]);
}
}
return {Result, ResultAbove};
}

View File

@ -5,7 +5,8 @@
# include "llvm/IR/Function.h"
# include <vector>
std::pair<std::vector<llvm::BasicBlock *>,
std::vector<llvm::BasicBlock *>> markNodes(llvm::Function *F);
std::pair<std::vector<llvm::BasicBlock *>, std::vector<llvm::BasicBlock *>>
markNodes(llvm::Function *F);
#endif

View File

@ -38,7 +38,6 @@ static u8* obj_path; /* Path to runtime libraries */
static u8** cc_params; /* Parameters passed to the real CC */
static u32 cc_par_cnt = 1; /* Param count, including argv0 */
/* Try to find the runtime libraries. If that fails, abort. */
static void find_obj(u8* argv0) {
@ -51,9 +50,11 @@ static void find_obj(u8* argv0) {
tmp = alloc_printf("%s/afl-llvm-rt.o", afl_path);
if (!access(tmp, R_OK)) {
obj_path = afl_path;
ck_free(tmp);
return;
}
ck_free(tmp);
@ -73,9 +74,11 @@ static void find_obj(u8* argv0) {
tmp = alloc_printf("%s/afl-llvm-rt.o", dir);
if (!access(tmp, R_OK)) {
obj_path = dir;
ck_free(tmp);
return;
}
ck_free(tmp);
@ -84,14 +87,17 @@ static void find_obj(u8* argv0) {
}
if (!access(AFL_PATH "/afl-llvm-rt.o", R_OK)) {
obj_path = AFL_PATH;
return;
}
FATAL("Unable to find 'afl-llvm-rt.o' or 'afl-llvm-pass.so.cc'. Please set AFL_PATH");
}
FATAL(
"Unable to find 'afl-llvm-rt.o' or 'afl-llvm-pass.so.cc'. Please set "
"AFL_PATH");
}
/* Copy argv to cc_params, making the necessary edits. */
@ -103,14 +109,21 @@ static void edit_params(u32 argc, char** argv) {
cc_params = ck_alloc((argc + 128) * sizeof(u8*));
name = strrchr(argv[0], '/');
if (!name) name = argv[0]; else name++;
if (!name)
name = argv[0];
else
name++;
if (!strcmp(name, "afl-clang-fast++")) {
u8* alt_cxx = getenv("AFL_CXX");
cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++";
} else {
u8* alt_cc = getenv("AFL_CC");
cc_params[0] = alt_cc ? alt_cc : (u8*)"clang";
}
/* There are three ways to compile with afl-clang-fast. In the traditional
@ -118,36 +131,50 @@ static void edit_params(u32 argc, char** argv) {
much faster but has less coverage. Finally tere is the experimental
'trace-pc-guard' mode, we use native LLVM instrumentation callbacks
instead. For trace-pc-guard see:
http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards */
http://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards
*/
// laf
if (getenv("LAF_SPLIT_SWITCHES") || getenv("AFL_LLVM_LAF_SPLIT_SWITCHES")) {
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = "-load";
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = alloc_printf("%s/split-switches-pass.so", obj_path);
cc_params[cc_par_cnt++] =
alloc_printf("%s/split-switches-pass.so", obj_path);
}
if (getenv("LAF_TRANSFORM_COMPARES")||getenv("AFL_LLVM_LAF_TRANSFORM_COMPARES")) {
if (getenv("LAF_TRANSFORM_COMPARES") ||
getenv("AFL_LLVM_LAF_TRANSFORM_COMPARES")) {
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = "-load";
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = alloc_printf("%s/compare-transform-pass.so", obj_path);
cc_params[cc_par_cnt++] =
alloc_printf("%s/compare-transform-pass.so", obj_path);
}
if (getenv("LAF_SPLIT_COMPARES") || getenv("AFL_LLVM_LAF_SPLIT_COMPARES")) {
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = "-load";
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = alloc_printf("%s/split-compares-pass.so", obj_path);
cc_params[cc_par_cnt++] =
alloc_printf("%s/split-compares-pass.so", obj_path);
}
// /laf
#ifdef USE_TRACE_PC
cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard"; // edge coverage by default
cc_params[cc_par_cnt++] =
"-fsanitize-coverage=trace-pc-guard"; // edge coverage by default
// cc_params[cc_par_cnt++] = "-mllvm";
//cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-cmp,trace-div,trace-gep";
//cc_params[cc_par_cnt++] = "-sanitizer-coverage-block-threshold=0";
// cc_params[cc_par_cnt++] =
// "-fsanitize-coverage=trace-cmp,trace-div,trace-gep"; cc_params[cc_par_cnt++]
// = "-sanitizer-coverage-block-threshold=0";
#else
cc_params[cc_par_cnt++] = "-Xclang";
cc_params[cc_par_cnt++] = "-load";
@ -165,6 +192,7 @@ static void edit_params(u32 argc, char** argv) {
if (argc == 1 && !strcmp(argv[1], "-v")) maybe_linking = 0;
while (--argc) {
u8* cur = *(++argv);
if (!strcmp(cur, "-m32")) bit_mode = 32;
@ -175,15 +203,15 @@ static void edit_params(u32 argc, char** argv) {
if (!strcmp(cur, "-c") || !strcmp(cur, "-S") || !strcmp(cur, "-E"))
maybe_linking = 0;
if (!strcmp(cur, "-fsanitize=address") ||
!strcmp(cur, "-fsanitize=memory")) asan_set = 1;
if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory"))
asan_set = 1;
if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;
if (!strcmp(cur, "-shared")) maybe_linking = 0;
if (!strcmp(cur, "-Wl,-z,defs") ||
!strcmp(cur, "-Wl,--no-undefined")) continue;
if (!strcmp(cur, "-Wl,-z,defs") || !strcmp(cur, "-Wl,--no-undefined"))
continue;
cc_params[cc_par_cnt++] = cur;
@ -193,8 +221,7 @@ static void edit_params(u32 argc, char** argv) {
cc_params[cc_par_cnt++] = "-fstack-protector-all";
if (!fortify_set)
cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
}
@ -202,8 +229,7 @@ static void edit_params(u32 argc, char** argv) {
if (getenv("AFL_USE_ASAN")) {
if (getenv("AFL_USE_MSAN"))
FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_HARDEN"))
FATAL("ASAN and AFL_HARDEN are mutually exclusive");
@ -213,8 +239,7 @@ static void edit_params(u32 argc, char** argv) {
} else if (getenv("AFL_USE_MSAN")) {
if (getenv("AFL_USE_ASAN"))
FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_HARDEN"))
FATAL("MSAN and AFL_HARDEN are mutually exclusive");
@ -279,9 +304,11 @@ static void edit_params(u32 argc, char** argv) {
*/
cc_params[cc_par_cnt++] = "-D__AFL_LOOP(_A)="
cc_params[cc_par_cnt++] =
"-D__AFL_LOOP(_A)="
"({ static volatile char *_B __attribute__((used)); "
" _B = (char*)\"" PERSIST_SIG "\"; "
" _B = (char*)\"" PERSIST_SIG
"\"; "
#ifdef __APPLE__
"__attribute__((visibility(\"default\"))) "
"int _L(unsigned int) __asm__(\"___afl_persistent_loop\"); "
@ -291,9 +318,11 @@ static void edit_params(u32 argc, char** argv) {
#endif /* ^__APPLE__ */
"_L(_A); })";
cc_params[cc_par_cnt++] = "-D__AFL_INIT()="
cc_params[cc_par_cnt++] =
"-D__AFL_INIT()="
"do { static volatile char *_A __attribute__((used)); "
" _A = (char*)\"" DEFER_SIG "\"; "
" _A = (char*)\"" DEFER_SIG
"\"; "
#ifdef __APPLE__
"__attribute__((visibility(\"default\"))) "
"void _I(void) __asm__(\"___afl_manual_init\"); "
@ -306,8 +335,10 @@ static void edit_params(u32 argc, char** argv) {
if (maybe_linking) {
if (x_set) {
cc_params[cc_par_cnt++] = "-x";
cc_params[cc_par_cnt++] = "none";
}
switch (bit_mode) {
@ -340,7 +371,6 @@ static void edit_params(u32 argc, char** argv) {
}
/* Main entry point */
int main(int argc, char** argv) {
@ -348,7 +378,8 @@ int main(int argc, char** argv) {
if (isatty(2) && !getenv("AFL_QUIET")) {
#ifdef USE_TRACE_PC
SAYF(cCYA "afl-clang-fast" VERSION cRST " [tpcg] by <lszekeres@google.com>\n");
SAYF(cCYA "afl-clang-fast" VERSION cRST
" [tpcg] by <lszekeres@google.com>\n");
#else
SAYF(cCYA "afl-clang-fast" VERSION cRST " by <lszekeres@google.com>\n");
#endif /* ^USE_TRACE_PC */
@ -357,18 +388,25 @@ int main(int argc, char** argv) {
if (argc < 2) {
SAYF("\n"
"This is a helper application for afl-fuzz. It serves as a drop-in replacement\n"
"for clang, letting you recompile third-party code with the required runtime\n"
"instrumentation. A common use pattern would be one of the following:\n\n"
SAYF(
"\n"
"This is a helper application for afl-fuzz. It serves as a drop-in "
"replacement\n"
"for clang, letting you recompile third-party code with the required "
"runtime\n"
"instrumentation. A common use pattern would be one of the "
"following:\n\n"
" CC=%s/afl-clang-fast ./configure\n"
" CXX=%s/afl-clang-fast++ ./configure\n\n"
"In contrast to the traditional afl-clang tool, this version is implemented as\n"
"an LLVM pass and tends to offer improved performance with slow programs.\n\n"
"In contrast to the traditional afl-clang tool, this version is "
"implemented as\n"
"an LLVM pass and tends to offer improved performance with slow "
"programs.\n\n"
"You can specify custom next-stage toolchain via AFL_CC and AFL_CXX. Setting\n"
"You can specify custom next-stage toolchain via AFL_CC and AFL_CXX. "
"Setting\n"
"AFL_HARDEN enables hardening optimizations in the compiled code.\n\n",
BIN_PATH, BIN_PATH);
@ -376,7 +414,6 @@ int main(int argc, char** argv) {
}
find_obj(argv[0]);
edit_params(argc, argv);
@ -396,3 +433,4 @@ int main(int argc, char** argv) {
return 0;
}

View File

@ -51,42 +51,44 @@ namespace {
class AFLCoverage : public ModulePass {
public:
static char ID;
AFLCoverage() : ModulePass(ID) {
char *instWhiteListFilename = getenv("AFL_LLVM_WHITELIST");
if (instWhiteListFilename) {
std::string line;
std::ifstream fileStream;
fileStream.open(instWhiteListFilename);
if (!fileStream)
report_fatal_error("Unable to open AFL_LLVM_WHITELIST");
if (!fileStream) report_fatal_error("Unable to open AFL_LLVM_WHITELIST");
getline(fileStream, line);
while (fileStream) {
myWhitelist.push_back(line);
getline(fileStream, line);
}
}
}
bool runOnModule(Module &M) override;
// StringRef getPassName() const override {
// return "American Fuzzy Lop Instrumentation";
// }
protected:
std::list<std::string> myWhitelist;
};
}
} // namespace
char AFLCoverage::ID = 0;
bool AFLCoverage::runOnModule(Module &M) {
LLVMContext &C = M.getContext();
@ -103,7 +105,9 @@ bool AFLCoverage::runOnModule(Module &M) {
SAYF(cCYA "afl-llvm-pass" VERSION cRST " by <lszekeres@google.com>\n");
} else be_quiet = 1;
} else
be_quiet = 1;
/* Decide instrumentation ratio */
@ -134,8 +138,8 @@ bool AFLCoverage::runOnModule(Module &M) {
M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc");
#else
GlobalVariable *AFLPrevLoc = new GlobalVariable(
M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc",
0, GlobalVariable::GeneralDynamicTLSModel, 0, false);
M, Int32Ty, false, GlobalValue::ExternalLinkage, 0, "__afl_prev_loc", 0,
GlobalVariable::GeneralDynamicTLSModel, 0, false);
#endif
/* Instrument all the things! */
@ -149,6 +153,7 @@ bool AFLCoverage::runOnModule(Module &M) {
IRBuilder<> IRB(&(*IP));
if (!myWhitelist.empty()) {
bool instrumentBlock = false;
/* Get the current location using debug information.
@ -156,42 +161,60 @@ bool AFLCoverage::runOnModule(Module &M) {
* to determine our location. */
DebugLoc Loc = IP->getDebugLoc();
if (Loc) {
DILocation *cDILoc = dyn_cast<DILocation>(Loc.getAsMDNode());
unsigned int instLine = cDILoc->getLine();
StringRef instFilename = cDILoc->getFilename();
if (instFilename.str().empty()) {
/* If the original location is empty, try using the inlined location */
/* If the original location is empty, try using the inlined location
*/
DILocation *oDILoc = cDILoc->getInlinedAt();
if (oDILoc) {
instFilename = oDILoc->getFilename();
instLine = oDILoc->getLine();
}
}
/* Continue only if we know where we actually are */
if (!instFilename.str().empty()) {
for (std::list<std::string>::iterator it = myWhitelist.begin(); it != myWhitelist.end(); ++it) {
for (std::list<std::string>::iterator it = myWhitelist.begin();
it != myWhitelist.end(); ++it) {
/* We don't check for filename equality here because
* filenames might actually be full paths. Instead we
* check that the actual filename ends in the filename
* specified in the list. */
if (instFilename.str().length() >= it->length()) {
if (instFilename.str().compare(instFilename.str().length() - it->length(), it->length(), *it) == 0) {
if (instFilename.str().compare(
instFilename.str().length() - it->length(),
it->length(), *it) == 0) {
instrumentBlock = true;
break;
}
}
}
}
}
/* Either we couldn't figure out our location or the location is
* not whitelisted, so we skip instrumentation. */
if (!instrumentBlock) continue;
}
}
if (AFL_R(100) >= inst_ratio) continue;
@ -207,22 +230,25 @@ bool AFLCoverage::runOnModule(Module &M) {
int more_than_one = -1;
// fprintf(stderr, "BB %u: ", cur_loc);
for (BasicBlock *Pred : predecessors(&BB)) {
int count = 0;
if (more_than_one == -1)
more_than_one = 0;
if (more_than_one == -1) more_than_one = 0;
// fprintf(stderr, " %p=>", Pred);
for (BasicBlock *Succ : successors(Pred)) {
// if (count > 0)
// fprintf(stderr, "|");
if (Succ != NULL) count++;
// fprintf(stderr, "%p", Succ);
}
if (count > 1)
more_than_one = 1;
if (count > 1) more_than_one = 1;
}
// fprintf(stderr, " == %d\n", more_than_one);
if (more_than_one != 1)
continue;
if (more_than_one != 1) continue;
ConstantInt *CurLoc = ConstantInt::get(Int32Ty, cur_loc);
@ -236,7 +262,8 @@ bool AFLCoverage::runOnModule(Module &M) {
LoadInst *MapPtr = IRB.CreateLoad(AFLMapPtr);
MapPtr->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
Value *MapPtrIdx = IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc));
Value *MapPtrIdx =
IRB.CreateGEP(MapPtr, IRB.CreateXor(PrevLocCasted, CurLoc));
/* Update bitmap */
@ -246,7 +273,9 @@ bool AFLCoverage::runOnModule(Module &M) {
Value *Incr = IRB.CreateAdd(Counter, ConstantInt::get(Int8Ty, 1));
#if LLVM_VERSION_MAJOR < 9
if (neverZero_counters_str != NULL) { // with llvm 9 we make this the default as the bug in llvm is then fixed
if (neverZero_counters_str !=
NULL) { // with llvm 9 we make this the default as the bug in llvm is
// then fixed
#endif
/* hexcoder: Realize a counter that skips zero during overflow.
* Once this counter reaches its maximum value, it next increments to 1
@ -260,45 +289,64 @@ bool AFLCoverage::runOnModule(Module &M) {
/* // we keep the old solutions just in case
// Solution #1
if (neverZero_counters_str[0] == '1') {
CallInst *AddOv = IRB.CreateBinaryIntrinsic(Intrinsic::uadd_with_overflow, Counter, ConstantInt::get(Int8Ty, 1));
AddOv->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
Value *SumWithOverflowBit = AddOv;
Incr = IRB.CreateAdd(IRB.CreateExtractValue(SumWithOverflowBit, 0), // sum
IRB.CreateZExt( // convert from one bit type to 8 bits type
IRB.CreateExtractValue(SumWithOverflowBit, 1), // overflow
Int8Ty));
CallInst *AddOv =
IRB.CreateBinaryIntrinsic(Intrinsic::uadd_with_overflow, Counter,
ConstantInt::get(Int8Ty, 1));
AddOv->setMetadata(M.getMDKindID("nosanitize"),
MDNode::get(C, None)); Value *SumWithOverflowBit = AddOv; Incr =
IRB.CreateAdd(IRB.CreateExtractValue(SumWithOverflowBit, 0), // sum
IRB.CreateZExt( // convert from one bit
type to 8 bits type IRB.CreateExtractValue(SumWithOverflowBit, 1), //
overflow Int8Ty));
// Solution #2
} else if (neverZero_counters_str[0] == '2') {
auto cf = IRB.CreateICmpEQ(Counter, ConstantInt::get(Int8Ty, 255));
Value *HowMuch = IRB.CreateAdd(ConstantInt::get(Int8Ty, 1), cf);
Incr = IRB.CreateAdd(Counter, HowMuch);
auto cf = IRB.CreateICmpEQ(Counter,
ConstantInt::get(Int8Ty, 255)); Value *HowMuch =
IRB.CreateAdd(ConstantInt::get(Int8Ty, 1), cf); Incr =
IRB.CreateAdd(Counter, HowMuch);
// Solution #3
} else if (neverZero_counters_str[0] == '3') {
*/
// this is the solution we choose because llvm9 should do the right thing here
// this is the solution we choose because llvm9 should do the right
// thing here
auto cf = IRB.CreateICmpEQ(Incr, ConstantInt::get(Int8Ty, 0));
auto carry = IRB.CreateZExt(cf, Int8Ty);
Incr = IRB.CreateAdd(Incr, carry);
/*
// Solution #4
} else if (neverZero_counters_str[0] == '4') {
auto cf = IRB.CreateICmpULT(Incr, ConstantInt::get(Int8Ty, 1));
auto carry = IRB.CreateZExt(cf, Int8Ty);
Incr = IRB.CreateAdd(Incr, carry);
} else {
fprintf(stderr, "Error: unknown value for AFL_NZERO_COUNTS: %s (valid is 1-4)\n", neverZero_counters_str);
exit(-1);
fprintf(stderr, "Error: unknown value for AFL_NZERO_COUNTS: %s
(valid is 1-4)\n", neverZero_counters_str); exit(-1);
}
*/
#if LLVM_VERSION_MAJOR < 9
}
#endif
IRB.CreateStore(Incr, MapPtrIdx)->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
IRB.CreateStore(Incr, MapPtrIdx)
->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
/* Set prev_loc to cur_loc >> 1 */
StoreInst *Store = IRB.CreateStore(ConstantInt::get(Int32Ty, cur_loc >> 1), AFLPrevLoc);
StoreInst *Store =
IRB.CreateStore(ConstantInt::get(Int32Ty, cur_loc >> 1), AFLPrevLoc);
Store->setMetadata(M.getMDKindID("nosanitize"), MDNode::get(C, None));
inst_blocks++;
@ -309,11 +357,16 @@ bool AFLCoverage::runOnModule(Module &M) {
if (!be_quiet) {
if (!inst_blocks) WARNF("No instrumentation targets found.");
else OKF("Instrumented %u locations (%s mode, ratio %u%%).",
inst_blocks, getenv("AFL_HARDEN") ? "hardened" :
((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) ?
"ASAN/MSAN" : "non-hardened"), inst_ratio);
if (!inst_blocks)
WARNF("No instrumentation targets found.");
else
OKF("Instrumented %u locations (%s mode, ratio %u%%).", inst_blocks,
getenv("AFL_HARDEN")
? "hardened"
: ((getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN"))
? "ASAN/MSAN"
: "non-hardened"),
inst_ratio);
}
@ -321,7 +374,6 @@ bool AFLCoverage::runOnModule(Module &M) {
}
static void registerAFLPass(const PassManagerBuilder &,
legacy::PassManagerBase &PM) {
@ -329,9 +381,9 @@ static void registerAFLPass(const PassManagerBuilder &,
}
static RegisterStandardPasses RegisterAFLPass(
PassManagerBuilder::EP_OptimizerLast, registerAFLPass);
static RegisterStandardPasses RegisterAFLPass0(
PassManagerBuilder::EP_EnabledOnOptLevel0, registerAFLPass);

View File

@ -50,10 +50,9 @@
#include <sys/mman.h>
#include <fcntl.h>
/* Globals needed by the injected instrumentation. The __afl_area_initial region
is used for instrumentation output before __afl_map_shm() has a chance to run.
It will end up as .comm, so it shouldn't be too wasteful. */
is used for instrumentation output before __afl_map_shm() has a chance to
run. It will end up as .comm, so it shouldn't be too wasteful. */
u8 __afl_area_initial[MAP_SIZE];
u8* __afl_area_ptr = __afl_area_initial;
@ -64,12 +63,10 @@ u32 __afl_prev_loc;
__thread u32 __afl_prev_loc;
#endif
/* Running in persistent mode? */
static u8 is_persistent;
/* SHM setup. */
static void __afl_map_shm(void) {
@ -81,6 +78,7 @@ static void __afl_map_shm(void) {
hacky .init code to work correctly in projects such as OpenSSL. */
if (id_str) {
#ifdef USEMMAP
const char* shm_file_path = id_str;
int shm_fd = -1;
@ -89,18 +87,22 @@ static void __afl_map_shm(void) {
/* create the shared memory segment as if it was a file */
shm_fd = shm_open(shm_file_path, O_RDWR, 0600);
if (shm_fd == -1) {
printf("shm_open() failed\n");
exit(1);
}
/* map the shared memory segment to the address space of the process */
shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
if (shm_base == MAP_FAILED) {
close(shm_fd);
shm_fd = -1;
printf("mmap() failed\n");
exit(2);
}
__afl_area_ptr = shm_base;
@ -123,7 +125,6 @@ static void __afl_map_shm(void) {
}
/* Fork server logic. */
static void __afl_start_forkserver(void) {
@ -154,8 +155,10 @@ static void __afl_start_forkserver(void) {
process. */
if (child_stopped && was_killed) {
child_stopped = 0;
if (waitpid(child_pid, &status, 0) < 0) _exit(1);
}
if (!child_stopped) {
@ -168,6 +171,7 @@ static void __afl_start_forkserver(void) {
/* In child process: close fds, resume execution. */
if (!child_pid) {
signal(SIGCHLD, old_sigchld_handler);
close(FORKSRV_FD);
@ -207,7 +211,6 @@ static void __afl_start_forkserver(void) {
}
/* A simplified persistent mode handler, used as explained in README.llvm. */
int __afl_persistent_loop(unsigned int max_cnt) {
@ -227,6 +230,7 @@ int __afl_persistent_loop(unsigned int max_cnt) {
memset(__afl_area_ptr, 0, MAP_SIZE);
__afl_area_ptr[0] = 1;
__afl_prev_loc = 0;
}
cycle_cnt = max_cnt;
@ -262,7 +266,6 @@ int __afl_persistent_loop(unsigned int max_cnt) {
}
/* This one can be called from user code when deferred forkserver mode
is enabled. */
@ -280,7 +283,6 @@ void __afl_manual_init(void) {
}
/* Proper initialization routine. */
__attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) {
@ -293,7 +295,6 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) {
}
/* The following stuff deals with supporting -fsanitize-coverage=trace-pc-guard.
It remains non-operational in the traditional, plugin-backed LLVM mode.
For more info about 'trace-pc-guard', see README.llvm.
@ -302,9 +303,10 @@ __attribute__((constructor(CONST_PRIO))) void __afl_auto_init(void) {
edge (as opposed to every basic block). */
void __sanitizer_cov_trace_pc_guard(uint32_t* guard) {
__afl_area_ptr[*guard]++;
}
__afl_area_ptr[*guard]++;
}
/* Init callback. Populates instrumentation IDs. Note that we're using
ID of 0 as a special value to indicate non-instrumented bits. That may
@ -321,8 +323,10 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) {
if (x) inst_ratio = atoi(x);
if (!inst_ratio || inst_ratio > 100) {
fprintf(stderr, "[-] ERROR: Invalid AFL_INST_RATIO (must be 1-100).\n");
abort();
}
/* Make sure that the first element in the range is always set - we use that
@ -333,11 +337,14 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t* start, uint32_t* stop) {
while (start < stop) {
if (R(100) < inst_ratio) *start = R(MAP_SIZE - 1) + 1;
else *start = 0;
if (R(100) < inst_ratio)
*start = R(MAP_SIZE - 1) + 1;
else
*start = 0;
start++;
}
}

View File

@ -41,28 +41,39 @@ namespace {
public:
static char ID;
CompareTransform() : ModulePass(ID) {
}
bool runOnModule(Module &M) override;
#if LLVM_VERSION_MAJOR < 4
const char *getPassName() const override {
#else
StringRef getPassName() const override {
#endif
return "transforms compare functions";
}
private:
bool transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp
,const bool processStrncmp, const bool processStrcasecmp, const bool processStrncasecmp);
};
}
private:
bool transformCmps(Module &M, const bool processStrcmp,
const bool processMemcmp, const bool processStrncmp,
const bool processStrcasecmp,
const bool processStrncasecmp);
};
} // namespace
char CompareTransform::ID = 0;
bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp
, const bool processStrncmp, const bool processStrcasecmp, const bool processStrncasecmp) {
bool CompareTransform::transformCmps(Module &M, const bool processStrcmp,
const bool processMemcmp,
const bool processStrncmp,
const bool processStrcasecmp,
const bool processStrncasecmp) {
std::vector<CallInst *> calls;
LLVMContext & C = M.getContext();
@ -75,11 +86,10 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
#else
FunctionCallee
#endif
c = M.getOrInsertFunction("tolower",
Int32Ty,
Int32Ty
c = M.getOrInsertFunction("tolower", Int32Ty, Int32Ty
#if LLVM_VERSION_MAJOR < 5
, nullptr
,
nullptr
#endif
);
#if LLVM_VERSION_MAJOR < 9
@ -88,10 +98,14 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
FunctionCallee tolowerFn = c;
#endif
/* iterate over all functions, bbs and instruction and add suitable calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp */
/* iterate over all functions, bbs and instruction and add suitable calls to
* strcmp/memcmp/strncmp/strcasecmp/strncasecmp */
for (auto &F : M) {
for (auto &BB : F) {
for (auto &IN : BB) {
CallInst *callInst = nullptr;
if ((callInst = dyn_cast<CallInst>(&IN))) {
@ -103,10 +117,8 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
bool isStrncasecmp = processStrncasecmp;
Function *Callee = callInst->getCalledFunction();
if (!Callee)
continue;
if (callInst->getCallingConv() != llvm::CallingConv::C)
continue;
if (!Callee) continue;
if (callInst->getCallingConv() != llvm::CallingConv::C) continue;
StringRef FuncName = Callee->getName();
isStrcmp &= !FuncName.compare(StringRef("strcmp"));
isMemcmp &= !FuncName.compare(StringRef("memcmp"));
@ -114,19 +126,20 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
isStrcasecmp &= !FuncName.compare(StringRef("strcasecmp"));
isStrncasecmp &= !FuncName.compare(StringRef("strncasecmp"));
if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && !isStrncasecmp)
if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp &&
!isStrncasecmp)
continue;
/* Verify the strcmp/memcmp/strncmp/strcasecmp/strncasecmp function prototype */
/* Verify the strcmp/memcmp/strncmp/strcasecmp/strncasecmp function
* prototype */
FunctionType *FT = Callee->getFunctionType();
isStrcmp &= FT->getNumParams() == 2 &&
FT->getReturnType()->isIntegerTy(32) &&
isStrcmp &=
FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) &&
FT->getParamType(0) == FT->getParamType(1) &&
FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext());
isStrcasecmp &= FT->getNumParams() == 2 &&
FT->getReturnType()->isIntegerTy(32) &&
isStrcasecmp &=
FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) &&
FT->getParamType(0) == FT->getParamType(1) &&
FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext());
isMemcmp &= FT->getNumParams() == 3 &&
@ -137,101 +150,122 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
isStrncmp &= FT->getNumParams() == 3 &&
FT->getReturnType()->isIntegerTy(32) &&
FT->getParamType(0) == FT->getParamType(1) &&
FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()) &&
FT->getParamType(0) ==
IntegerType::getInt8PtrTy(M.getContext()) &&
FT->getParamType(2)->isIntegerTy();
isStrncasecmp &= FT->getNumParams() == 3 &&
FT->getReturnType()->isIntegerTy(32) &&
FT->getParamType(0) == FT->getParamType(1) &&
FT->getParamType(0) == IntegerType::getInt8PtrTy(M.getContext()) &&
FT->getParamType(0) ==
IntegerType::getInt8PtrTy(M.getContext()) &&
FT->getParamType(2)->isIntegerTy();
if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && !isStrncasecmp)
if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp &&
!isStrncasecmp)
continue;
/* is a str{n,}{case,}cmp/memcmp, check if we have
* str{case,}cmp(x, "const") or str{case,}cmp("const", x)
* strn{case,}cmp(x, "const", ..) or strn{case,}cmp("const", x, ..)
* memcmp(x, "const", ..) or memcmp("const", x, ..) */
Value *Str1P = callInst->getArgOperand(0), *Str2P = callInst->getArgOperand(1);
Value *Str1P = callInst->getArgOperand(0),
*Str2P = callInst->getArgOperand(1);
StringRef Str1, Str2;
bool HasStr1 = getConstantStringInfo(Str1P, Str1);
bool HasStr2 = getConstantStringInfo(Str2P, Str2);
/* handle cases of one string is const, one string is variable */
if (!(HasStr1 ^ HasStr2))
continue;
if (!(HasStr1 ^ HasStr2)) continue;
if (isMemcmp || isStrncmp || isStrncasecmp) {
/* check if third operand is a constant integer
* strlen("constStr") and sizeof() are treated as constant */
Value * op2 = callInst->getArgOperand(2);
ConstantInt *ilen = dyn_cast<ConstantInt>(op2);
if (!ilen)
continue;
/* final precaution: if size of compare is larger than constant string skip it*/
uint64_t literalLength = HasStr1 ? GetStringLength(Str1P) : GetStringLength(Str2P);
if (literalLength < ilen->getZExtValue())
continue;
if (!ilen) continue;
/* final precaution: if size of compare is larger than constant
* string skip it*/
uint64_t literalLength =
HasStr1 ? GetStringLength(Str1P) : GetStringLength(Str2P);
if (literalLength < ilen->getZExtValue()) continue;
}
calls.push_back(callInst);
}
}
}
}
if (!calls.size())
return false;
errs() << "Replacing " << calls.size() << " calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp\n";
}
}
}
if (!calls.size()) return false;
errs() << "Replacing " << calls.size()
<< " calls to strcmp/memcmp/strncmp/strcasecmp/strncasecmp\n";
for (auto &callInst : calls) {
Value *Str1P = callInst->getArgOperand(0), *Str2P = callInst->getArgOperand(1);
Value *Str1P = callInst->getArgOperand(0),
*Str2P = callInst->getArgOperand(1);
StringRef Str1, Str2, ConstStr;
std::string TmpConstStr;
Value * VarStr;
bool HasStr1 = getConstantStringInfo(Str1P, Str1);
getConstantStringInfo(Str2P, Str2);
uint64_t constLen, sizedLen;
bool isMemcmp = !callInst->getCalledFunction()->getName().compare(StringRef("memcmp"));
bool isSizedcmp = isMemcmp
|| !callInst->getCalledFunction()->getName().compare(StringRef("strncmp"))
|| !callInst->getCalledFunction()->getName().compare(StringRef("strncasecmp"));
bool isCaseInsensitive = !callInst->getCalledFunction()->getName().compare(StringRef("strcasecmp"))
|| !callInst->getCalledFunction()->getName().compare(StringRef("strncasecmp"));
bool isMemcmp =
!callInst->getCalledFunction()->getName().compare(StringRef("memcmp"));
bool isSizedcmp = isMemcmp ||
!callInst->getCalledFunction()->getName().compare(
StringRef("strncmp")) ||
!callInst->getCalledFunction()->getName().compare(
StringRef("strncasecmp"));
bool isCaseInsensitive = !callInst->getCalledFunction()->getName().compare(
StringRef("strcasecmp")) ||
!callInst->getCalledFunction()->getName().compare(
StringRef("strncasecmp"));
if (isSizedcmp) {
Value * op2 = callInst->getArgOperand(2);
ConstantInt *ilen = dyn_cast<ConstantInt>(op2);
sizedLen = ilen->getZExtValue();
}
if (HasStr1) {
TmpConstStr = Str1.str();
VarStr = Str2P;
constLen = isMemcmp ? sizedLen : GetStringLength(Str1P);
}
else {
} else {
TmpConstStr = Str2.str();
VarStr = Str1P;
constLen = isMemcmp ? sizedLen : GetStringLength(Str2P);
}
/* properly handle zero terminated C strings by adding the terminating 0 to
* the StringRef (in comparison to std::string a StringRef has built-in
* runtime bounds checking, which makes debugging easier) */
TmpConstStr.append("\0", 1); ConstStr = StringRef(TmpConstStr);
TmpConstStr.append("\0", 1);
ConstStr = StringRef(TmpConstStr);
if (isSizedcmp && constLen > sizedLen) {
constLen = sizedLen;
}
if (isSizedcmp && constLen > sizedLen) { constLen = sizedLen; }
errs() << callInst->getCalledFunction()->getName() << ": len " << constLen << ": " << ConstStr << "\n";
errs() << callInst->getCalledFunction()->getName() << ": len " << constLen
<< ": " << ConstStr << "\n";
/* split before the call instruction */
BasicBlock *bb = callInst->getParent();
BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(callInst));
BasicBlock *next_bb = BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb);
BasicBlock *next_bb =
BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb);
BranchInst::Create(end_bb, next_bb);
PHINode *PN = PHINode::Create(Int32Ty, constLen + 1, "cmp_phi");
@ -249,7 +283,6 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
char c = isCaseInsensitive ? tolower(ConstStr[i]) : ConstStr[i];
BasicBlock::iterator IP = next_bb->getFirstInsertionPt();
IRBuilder<> IRB(&*IP);
@ -257,12 +290,15 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
Value *ele = IRB.CreateInBoundsGEP(VarStr, v, "empty");
Value *load = IRB.CreateLoad(ele);
if (isCaseInsensitive) {
// load >= 'A' && load <= 'Z' ? load | 0x020 : load
std::vector<Value *> args;
args.push_back(load);
load = IRB.CreateCall(tolowerFn, args, "tmp");
load = IRB.CreateTrunc(load, Int8Ty);
}
Value *isub;
if (HasStr1)
isub = IRB.CreateSub(ConstantInt::get(Int8Ty, c), load);
@ -272,16 +308,20 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
Value *sext = IRB.CreateSExt(isub, Int32Ty);
PN->addIncoming(sext, cur_bb);
if (i < constLen - 1) {
next_bb = BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb);
next_bb =
BasicBlock::Create(C, "cmp_added", end_bb->getParent(), end_bb);
BranchInst::Create(end_bb, next_bb);
Value *icmp = IRB.CreateICmpEQ(isub, ConstantInt::get(Int8Ty, 0));
IRB.CreateCondBr(icmp, next_bb, end_bb);
cur_bb->getTerminator()->eraseFromParent();
} else {
// IRB.CreateBr(end_bb);
}
// add offset to varstr
@ -290,26 +330,30 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, const
// create icmp
// create jcc
// create next_bb
}
/* since the call is the first instruction of the bb it is safe to
* replace it with a phi instruction */
BasicBlock::iterator ii(callInst);
ReplaceInstWithInst(callInst->getParent()->getInstList(), ii, PN);
}
return true;
}
bool CompareTransform::runOnModule(Module &M) {
if (getenv("AFL_QUIET") == NULL)
llvm::errs() << "Running compare-transform-pass by laf.intel@gmail.com, extended by heiko@hexco.de\n";
llvm::errs() << "Running compare-transform-pass by laf.intel@gmail.com, "
"extended by heiko@hexco.de\n";
transformCmps(M, true, true, true, true, true);
verifyModule(M);
return true;
}
static void registerCompTransPass(const PassManagerBuilder &,

View File

@ -27,32 +27,42 @@
using namespace llvm;
namespace {
class SplitComparesTransform : public ModulePass {
public:
static char ID;
SplitComparesTransform() : ModulePass(ID) {}
SplitComparesTransform() : ModulePass(ID) {
}
bool runOnModule(Module &M) override;
#if LLVM_VERSION_MAJOR >= 4
StringRef getPassName() const override {
#else
const char *getPassName() const override {
#endif
return "simplifies and splits ICMP instructions";
}
private:
bool splitCompares(Module &M, unsigned bitw);
bool simplifyCompares(Module &M);
bool simplifySignedness(Module &M);
};
}
} // namespace
char SplitComparesTransform::ID = 0;
/* This function splits ICMP instructions with xGE or xLE predicates into two
* ICMP instructions with predicate xGT or xLT and EQ */
bool SplitComparesTransform::simplifyCompares(Module &M) {
LLVMContext & C = M.getContext();
std::vector<Instruction *> icomps;
IntegerType * Int1Ty = IntegerType::getInt1Ty(C);
@ -60,8 +70,11 @@ bool SplitComparesTransform::simplifyCompares(Module &M) {
/* iterate over all functions, bbs and instruction and add
* all integer comparisons with >= and <= predicates to the icomps vector */
for (auto &F : M) {
for (auto &BB : F) {
for (auto &IN : BB) {
CmpInst *selectcmpInst = nullptr;
if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) {
@ -70,7 +83,9 @@ bool SplitComparesTransform::simplifyCompares(Module &M) {
selectcmpInst->getPredicate() != CmpInst::ICMP_SGE &&
selectcmpInst->getPredicate() != CmpInst::ICMP_ULE &&
selectcmpInst->getPredicate() != CmpInst::ICMP_SLE) {
continue;
}
auto op0 = selectcmpInst->getOperand(0);
@ -80,22 +95,22 @@ bool SplitComparesTransform::simplifyCompares(Module &M) {
IntegerType *intTyOp1 = dyn_cast<IntegerType>(op1->getType());
/* this is probably not needed but we do it anyway */
if (!intTyOp0 || !intTyOp1) {
continue;
}
if (!intTyOp0 || !intTyOp1) { continue; }
icomps.push_back(selectcmpInst);
}
}
}
}
if (!icomps.size()) {
return false;
}
}
}
if (!icomps.size()) { return false; }
for (auto &IcmpInst : icomps) {
BasicBlock *bb = IcmpInst->getParent();
auto op0 = IcmpInst->getOperand(0);
@ -105,20 +120,14 @@ bool SplitComparesTransform::simplifyCompares(Module &M) {
auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate();
CmpInst::Predicate new_pred;
switch (pred) {
case CmpInst::ICMP_UGE:
new_pred = CmpInst::ICMP_UGT;
break;
case CmpInst::ICMP_SGE:
new_pred = CmpInst::ICMP_SGT;
break;
case CmpInst::ICMP_ULE:
new_pred = CmpInst::ICMP_ULT;
break;
case CmpInst::ICMP_SLE:
new_pred = CmpInst::ICMP_SLT;
break;
case CmpInst::ICMP_UGE: new_pred = CmpInst::ICMP_UGT; break;
case CmpInst::ICMP_SGE: new_pred = CmpInst::ICMP_SGT; break;
case CmpInst::ICMP_ULE: new_pred = CmpInst::ICMP_ULT; break;
case CmpInst::ICMP_SLE: new_pred = CmpInst::ICMP_SLT; break;
default: // keep the compiler happy
continue;
}
/* split before the icmp instruction */
@ -136,8 +145,8 @@ bool SplitComparesTransform::simplifyCompares(Module &M) {
/* create a new basic block which holds the new EQ icmp */
Instruction *icmp_eq;
/* insert middle_bb before end_bb */
BasicBlock* middle_bb = BasicBlock::Create(C, "injected",
end_bb->getParent(), end_bb);
BasicBlock *middle_bb =
BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
icmp_eq = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, op0, op1);
middle_bb->getInstList().push_back(icmp_eq);
/* add an unconditional branch to the end of middle_bb with destination
@ -150,7 +159,6 @@ bool SplitComparesTransform::simplifyCompares(Module &M) {
BranchInst::Create(end_bb, middle_bb, icmp_np, bb);
term->eraseFromParent();
/* replace the old IcmpInst (which is the first inst in end_bb) with a PHI
* inst to wire up the loose ends */
PHINode *PN = PHINode::Create(Int1Ty, 2, "");
@ -162,13 +170,16 @@ bool SplitComparesTransform::simplifyCompares(Module &M) {
/* replace the old IcmpInst with our new and shiny PHI inst */
BasicBlock::iterator ii(IcmpInst);
ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN);
}
return true;
}
/* this function transforms signed compares to equivalent unsigned compares */
bool SplitComparesTransform::simplifySignedness(Module &M) {
LLVMContext & C = M.getContext();
std::vector<Instruction *> icomps;
IntegerType * Int1Ty = IntegerType::getInt1Ty(C);
@ -176,16 +187,20 @@ bool SplitComparesTransform::simplifySignedness(Module &M) {
/* iterate over all functions, bbs and instruction and add
* all signed compares to icomps vector */
for (auto &F : M) {
for (auto &BB : F) {
for (auto &IN : BB) {
CmpInst *selectcmpInst = nullptr;
if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) {
if (selectcmpInst->getPredicate() != CmpInst::ICMP_SGT &&
selectcmpInst->getPredicate() != CmpInst::ICMP_SLT
) {
selectcmpInst->getPredicate() != CmpInst::ICMP_SLT) {
continue;
}
auto op0 = selectcmpInst->getOperand(0);
@ -195,26 +210,25 @@ bool SplitComparesTransform::simplifySignedness(Module &M) {
IntegerType *intTyOp1 = dyn_cast<IntegerType>(op1->getType());
/* see above */
if (!intTyOp0 || !intTyOp1) {
continue;
}
if (!intTyOp0 || !intTyOp1) { continue; }
/* i think this is not possible but to lazy to look it up */
if (intTyOp0->getBitWidth() != intTyOp1->getBitWidth()) {
continue;
}
if (intTyOp0->getBitWidth() != intTyOp1->getBitWidth()) { continue; }
icomps.push_back(selectcmpInst);
}
}
}
}
if (!icomps.size()) {
return false;
}
}
}
if (!icomps.size()) { return false; }
for (auto &IcmpInst : icomps) {
BasicBlock *bb = IcmpInst->getParent();
auto op0 = IcmpInst->getOperand(0);
@ -224,14 +238,17 @@ bool SplitComparesTransform::simplifySignedness(Module &M) {
unsigned bitw = intTyOp0->getBitWidth();
IntegerType *IntType = IntegerType::get(C, bitw);
/* get the new predicate */
auto pred = dyn_cast<CmpInst>(IcmpInst)->getPredicate();
CmpInst::Predicate new_pred;
if (pred == CmpInst::ICMP_SGT) {
new_pred = CmpInst::ICMP_UGT;
} else {
new_pred = CmpInst::ICMP_ULT;
}
BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(IcmpInst));
@ -240,40 +257,52 @@ bool SplitComparesTransform::simplifySignedness(Module &M) {
* the original operands so only the first bit remains.*/
Instruction *s_op0, *t_op0, *s_op1, *t_op1, *icmp_sign_bit;
s_op0 = BinaryOperator::Create(Instruction::LShr, op0, ConstantInt::get(IntType, bitw - 1));
s_op0 = BinaryOperator::Create(Instruction::LShr, op0,
ConstantInt::get(IntType, bitw - 1));
bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op0);
t_op0 = new TruncInst(s_op0, Int1Ty);
bb->getInstList().insert(bb->getTerminator()->getIterator(), t_op0);
s_op1 = BinaryOperator::Create(Instruction::LShr, op1, ConstantInt::get(IntType, bitw - 1));
s_op1 = BinaryOperator::Create(Instruction::LShr, op1,
ConstantInt::get(IntType, bitw - 1));
bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op1);
t_op1 = new TruncInst(s_op1, Int1Ty);
bb->getInstList().insert(bb->getTerminator()->getIterator(), t_op1);
/* compare of the sign bits */
icmp_sign_bit = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, t_op0, t_op1);
icmp_sign_bit =
CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, t_op0, t_op1);
bb->getInstList().insert(bb->getTerminator()->getIterator(), icmp_sign_bit);
/* create a new basic block which is executed if the signedness bit is
* different */
Instruction *icmp_inv_sig_cmp;
BasicBlock* sign_bb = BasicBlock::Create(C, "sign", end_bb->getParent(), end_bb);
BasicBlock * sign_bb =
BasicBlock::Create(C, "sign", end_bb->getParent(), end_bb);
if (pred == CmpInst::ICMP_SGT) {
/* if we check for > and the op0 positive and op1 negative then the final
* result is true. if op0 negative and op1 pos, the cmp must result
* in false
*/
icmp_inv_sig_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, t_op0, t_op1);
icmp_inv_sig_cmp =
CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, t_op0, t_op1);
} else {
/* just the inverse of the above statement */
icmp_inv_sig_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, t_op0, t_op1);
icmp_inv_sig_cmp =
CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, t_op0, t_op1);
}
sign_bb->getInstList().push_back(icmp_inv_sig_cmp);
BranchInst::Create(end_bb, sign_bb);
/* create a new bb which is executed if signedness is equal */
Instruction *icmp_usign_cmp;
BasicBlock* middle_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
BasicBlock * middle_bb =
BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
/* we can do a normal unsigned compare now */
icmp_usign_cmp = CmpInst::Create(Instruction::ICmp, new_pred, op0, op1);
middle_bb->getInstList().push_back(icmp_usign_cmp);
@ -285,7 +314,6 @@ bool SplitComparesTransform::simplifySignedness(Module &M) {
BranchInst::Create(middle_bb, sign_bb, icmp_sign_bit, bb);
term->eraseFromParent();
PHINode *PN = PHINode::Create(Int1Ty, 2, "");
PN->addIncoming(icmp_usign_cmp, middle_bb);
@ -293,13 +321,16 @@ bool SplitComparesTransform::simplifySignedness(Module &M) {
BasicBlock::iterator ii(IcmpInst);
ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN);
}
return true;
}
/* splits icmps of size bitw into two nested icmps with bitw/2 size each */
bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
LLVMContext &C = M.getContext();
IntegerType *Int1Ty = IntegerType::getInt1Ty(C);
@ -308,20 +339,19 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
std::vector<Instruction *> icomps;
if (bitw % 2) {
return false;
}
if (bitw % 2) { return false; }
/* not supported yet */
if (bitw > 64) {
return false;
}
if (bitw > 64) { return false; }
/* get all EQ, NE, UGT, and ULT icmps of width bitw. if the other two
* unctions were executed only these four predicates should exist */
for (auto &F : M) {
for (auto &BB : F) {
for (auto &IN : BB) {
CmpInst *selectcmpInst = nullptr;
if ((selectcmpInst = dyn_cast<CmpInst>(&IN))) {
@ -329,9 +359,10 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
if (selectcmpInst->getPredicate() != CmpInst::ICMP_EQ &&
selectcmpInst->getPredicate() != CmpInst::ICMP_NE &&
selectcmpInst->getPredicate() != CmpInst::ICMP_UGT &&
selectcmpInst->getPredicate() != CmpInst::ICMP_ULT
) {
selectcmpInst->getPredicate() != CmpInst::ICMP_ULT) {
continue;
}
auto op0 = selectcmpInst->getOperand(0);
@ -340,26 +371,30 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
IntegerType *intTyOp0 = dyn_cast<IntegerType>(op0->getType());
IntegerType *intTyOp1 = dyn_cast<IntegerType>(op1->getType());
if (!intTyOp0 || !intTyOp1) {
continue;
}
if (!intTyOp0 || !intTyOp1) { continue; }
/* check if the bitwidths are the one we are looking for */
if (intTyOp0->getBitWidth() != bitw || intTyOp1->getBitWidth() != bitw) {
if (intTyOp0->getBitWidth() != bitw ||
intTyOp1->getBitWidth() != bitw) {
continue;
}
icomps.push_back(selectcmpInst);
}
}
}
}
if (!icomps.size()) {
return false;
}
}
}
if (!icomps.size()) { return false; }
for (auto &IcmpInst : icomps) {
BasicBlock *bb = IcmpInst->getParent();
auto op0 = IcmpInst->getOperand(0);
@ -372,12 +407,14 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
/* create the comparison of the top halves of the original operands */
Instruction *s_op0, *op0_high, *s_op1, *op1_high, *icmp_high;
s_op0 = BinaryOperator::Create(Instruction::LShr, op0, ConstantInt::get(OldIntType, bitw / 2));
s_op0 = BinaryOperator::Create(Instruction::LShr, op0,
ConstantInt::get(OldIntType, bitw / 2));
bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op0);
op0_high = new TruncInst(s_op0, NewIntType);
bb->getInstList().insert(bb->getTerminator()->getIterator(), op0_high);
s_op1 = BinaryOperator::Create(Instruction::LShr, op1, ConstantInt::get(OldIntType, bitw / 2));
s_op1 = BinaryOperator::Create(Instruction::LShr, op1,
ConstantInt::get(OldIntType, bitw / 2));
bb->getInstList().insert(bb->getTerminator()->getIterator(), s_op1);
op1_high = new TruncInst(s_op1, NewIntType);
bb->getInstList().insert(bb->getTerminator()->getIterator(), op1_high);
@ -387,11 +424,13 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
/* now we have to destinguish between == != and > < */
if (pred == CmpInst::ICMP_EQ || pred == CmpInst::ICMP_NE) {
/* transformation for == and != icmps */
/* create a compare for the lower half of the original operands */
Instruction *op0_low, *op1_low, *icmp_low;
BasicBlock* cmp_low_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
BasicBlock * cmp_low_bb =
BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
op0_low = new TruncInst(op0, NewIntType);
cmp_low_bb->getInstList().push_back(op0_low);
@ -407,21 +446,30 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
* the comparison */
auto term = bb->getTerminator();
if (pred == CmpInst::ICMP_EQ) {
BranchInst::Create(cmp_low_bb, end_bb, icmp_high, bb);
} else {
/* CmpInst::ICMP_NE */
BranchInst::Create(end_bb, cmp_low_bb, icmp_high, bb);
}
term->eraseFromParent();
/* create the PHI and connect the edges accordingly */
PHINode *PN = PHINode::Create(Int1Ty, 2, "");
PN->addIncoming(icmp_low, cmp_low_bb);
if (pred == CmpInst::ICMP_EQ) {
PN->addIncoming(ConstantInt::get(Int1Ty, 0), bb);
} else {
/* CmpInst::ICMP_NE */
PN->addIncoming(ConstantInt::get(Int1Ty, 1), bb);
}
/* replace the old icmp with the new PHI */
@ -429,6 +477,7 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN);
} else {
/* CmpInst::ICMP_UGT and CmpInst::ICMP_ULT */
/* transformations for < and > */
@ -436,12 +485,20 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
* if this is true we can go to the end if not we have to got to the
* bb which checks the lower half of the operands */
Instruction *icmp_inv_cmp, *op0_low, *op1_low, *icmp_low;
BasicBlock* inv_cmp_bb = BasicBlock::Create(C, "inv_cmp", end_bb->getParent(), end_bb);
BasicBlock * inv_cmp_bb =
BasicBlock::Create(C, "inv_cmp", end_bb->getParent(), end_bb);
if (pred == CmpInst::ICMP_UGT) {
icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT, op0_high, op1_high);
icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
op0_high, op1_high);
} else {
icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, op0_high, op1_high);
icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT,
op0_high, op1_high);
}
inv_cmp_bb->getInstList().push_back(icmp_inv_cmp);
auto term = bb->getTerminator();
@ -449,7 +506,8 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
BranchInst::Create(end_bb, inv_cmp_bb, icmp_high, bb);
/* create a bb which handles the cmp of the lower halves */
BasicBlock* cmp_low_bb = BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
BasicBlock *cmp_low_bb =
BasicBlock::Create(C, "injected", end_bb->getParent(), end_bb);
op0_low = new TruncInst(op0, NewIntType);
cmp_low_bb->getInstList().push_back(op0_low);
op1_low = new TruncInst(op1, NewIntType);
@ -468,20 +526,22 @@ bool SplitComparesTransform::splitCompares(Module &M, unsigned bitw) {
BasicBlock::iterator ii(IcmpInst);
ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN);
}
}
return true;
}
bool SplitComparesTransform::runOnModule(Module &M) {
int bitw = 64;
char *bitw_env = getenv("LAF_SPLIT_COMPARES_BITW");
if (!bitw_env)
bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW");
if (bitw_env) {
bitw = atoi(bitw_env);
}
if (!bitw_env) bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW");
if (bitw_env) { bitw = atoi(bitw_env); }
simplifyCompares(M);
@ -491,6 +551,7 @@ bool SplitComparesTransform::runOnModule(Module &M) {
errs() << "Split-compare-pass by laf.intel@gmail.com\n";
switch (bitw) {
case 64:
errs() << "Running split-compare-pass " << 64 << "\n";
splitCompares(M, 64);
@ -510,15 +571,19 @@ bool SplitComparesTransform::runOnModule(Module &M) {
errs() << "NOT Running split-compare-pass \n";
return false;
break;
}
verifyModule(M);
return true;
}
static void registerSplitComparesPass(const PassManagerBuilder &,
legacy::PassManagerBase &PM) {
PM.add(new SplitComparesTransform());
}
static RegisterStandardPasses RegisterSplitComparesPass(
@ -526,3 +591,4 @@ static RegisterStandardPasses RegisterSplitComparesPass(
static RegisterStandardPasses RegisterSplitComparesTransPass0(
PassManagerBuilder::EP_EnabledOnOptLevel0, registerSplitComparesPass);

View File

@ -41,47 +41,58 @@ namespace {
public:
static char ID;
SplitSwitchesTransform() : ModulePass(ID) {
}
bool runOnModule(Module &M) override;
#if LLVM_VERSION_MAJOR >= 4
StringRef getPassName() const override {
#else
const char *getPassName() const override {
#endif
return "splits switch constructs";
}
struct CaseExpr {
ConstantInt *Val;
BasicBlock * BB;
CaseExpr(ConstantInt *val = nullptr, BasicBlock *bb = nullptr) :
Val(val), BB(bb) { }
CaseExpr(ConstantInt *val = nullptr, BasicBlock *bb = nullptr)
: Val(val), BB(bb) {
}
};
typedef std::vector<CaseExpr> CaseVector;
private:
bool splitSwitches(Module &M);
bool transformCmps(Module &M, const bool processStrcmp, const bool processMemcmp);
bool transformCmps(Module &M, const bool processStrcmp,
const bool processMemcmp);
BasicBlock *switchConvert(CaseVector Cases, std::vector<bool> bytesChecked,
BasicBlock *OrigBlock, BasicBlock *NewDefault,
Value *Val, unsigned level);
};
}
} // namespace
char SplitSwitchesTransform::ID = 0;
/* switchConvert - Transform simple list of Cases into list of CaseRange's */
BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector<bool> bytesChecked,
BasicBlock* OrigBlock, BasicBlock* NewDefault,
Value* Val, unsigned level) {
BasicBlock *SplitSwitchesTransform::switchConvert(
CaseVector Cases, std::vector<bool> bytesChecked, BasicBlock *OrigBlock,
BasicBlock *NewDefault, Value *Val, unsigned level) {
unsigned ValTypeBitWidth = Cases[0].Val->getBitWidth();
IntegerType *ValType = IntegerType::get(OrigBlock->getContext(), ValTypeBitWidth);
IntegerType *ValType =
IntegerType::get(OrigBlock->getContext(), ValTypeBitWidth);
IntegerType * ByteType = IntegerType::get(OrigBlock->getContext(), 8);
unsigned BytesInValue = bytesChecked.size();
std::vector<uint8_t> setSizes;
@ -92,11 +103,14 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector<
/* for each of the possible cases we iterate over all bytes of the values
* build a set of possible values at each byte position in byteSets */
for (CaseExpr &Case : Cases) {
for (unsigned i = 0; i < BytesInValue; i++) {
uint8_t byte = (Case.Val->getZExtValue() >> (i * 8)) & 0xFF;
byteSets[i].insert(byte);
}
}
/* find the index of the first byte position that was not yet checked. then
@ -104,13 +118,17 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector<
unsigned smallestIndex = 0;
unsigned smallestSize = 257;
for (unsigned i = 0; i < byteSets.size(); i++) {
if (bytesChecked[i])
continue;
if (bytesChecked[i]) continue;
if (byteSets[i].size() < smallestSize) {
smallestIndex = i;
smallestSize = byteSets[i].size();
}
}
assert(bytesChecked[smallestIndex] == false);
/* there are only smallestSize different bytes at index smallestIndex */
@ -118,16 +136,20 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector<
Instruction *Shift, *Trunc;
Function * F = OrigBlock->getParent();
BasicBlock * NewNode = BasicBlock::Create(Val->getContext(), "NodeBlock", F);
Shift = BinaryOperator::Create(Instruction::LShr, Val, ConstantInt::get(ValType, smallestIndex * 8));
Shift = BinaryOperator::Create(Instruction::LShr, Val,
ConstantInt::get(ValType, smallestIndex * 8));
NewNode->getInstList().push_back(Shift);
if (ValTypeBitWidth > 8) {
Trunc = new TruncInst(Shift, ByteType);
NewNode->getInstList().push_back(Trunc);
}
else {
} else {
/* not necessary to trunc */
Trunc = Shift;
}
/* this is a trivial case, we can directly check for the byte,
@ -135,77 +157,107 @@ BasicBlock* SplitSwitchesTransform::switchConvert(CaseVector Cases, std::vector<
* mark the byte as checked. if this was the last byte to check
* we can finally execute the block belonging to this case */
if (smallestSize == 1) {
uint8_t byte = *(byteSets[smallestIndex].begin());
/* insert instructions to check whether the value we are switching on is equal to byte */
ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_EQ, Trunc, ConstantInt::get(ByteType, byte), "byteMatch");
/* insert instructions to check whether the value we are switching on is
* equal to byte */
ICmpInst *Comp =
new ICmpInst(ICmpInst::ICMP_EQ, Trunc, ConstantInt::get(ByteType, byte),
"byteMatch");
NewNode->getInstList().push_back(Comp);
bytesChecked[smallestIndex] = true;
if (std::all_of(bytesChecked.begin(), bytesChecked.end(), [](bool b){return b;} )) {
if (std::all_of(bytesChecked.begin(), bytesChecked.end(),
[](bool b) { return b; })) {
assert(Cases.size() == 1);
BranchInst::Create(Cases[0].BB, NewDefault, Comp, NewNode);
/* we have to update the phi nodes! */
for (BasicBlock::iterator I = Cases[0].BB->begin(); I != Cases[0].BB->end(); ++I) {
if (!isa<PHINode>(&*I)) {
continue;
}
for (BasicBlock::iterator I = Cases[0].BB->begin();
I != Cases[0].BB->end(); ++I) {
if (!isa<PHINode>(&*I)) { continue; }
PHINode *PN = cast<PHINode>(I);
/* Only update the first occurrence. */
unsigned Idx = 0, E = PN->getNumIncomingValues();
for (; Idx != E; ++Idx) {
if (PN->getIncomingBlock(Idx) == OrigBlock) {
PN->setIncomingBlock(Idx, NewNode);
break;
}
}
}
}
else {
BasicBlock* BB = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, level + 1);
} else {
BasicBlock *BB = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault,
Val, level + 1);
BranchInst::Create(BB, NewDefault, Comp, NewNode);
}
}
/* there is no byte which we can directly check on, split the tree */
else {
std::vector<uint8_t> byteVector;
std::copy(byteSets[smallestIndex].begin(), byteSets[smallestIndex].end(), std::back_inserter(byteVector));
std::copy(byteSets[smallestIndex].begin(), byteSets[smallestIndex].end(),
std::back_inserter(byteVector));
std::sort(byteVector.begin(), byteVector.end());
uint8_t pivot = byteVector[byteVector.size() / 2];
/* we already chose to divide the cases based on the value of byte at index smallestIndex
* the pivot value determines the threshold for the decicion; if a case value
* is smaller at this byte index move it to the LHS vector, otherwise to the RHS vector */
/* we already chose to divide the cases based on the value of byte at index
* smallestIndex the pivot value determines the threshold for the decicion;
* if a case value
* is smaller at this byte index move it to the LHS vector, otherwise to the
* RHS vector */
CaseVector LHSCases, RHSCases;
for (CaseExpr &Case : Cases) {
uint8_t byte = (Case.Val->getZExtValue() >> (smallestIndex * 8)) & 0xFF;
if (byte < pivot) {
LHSCases.push_back(Case);
}
else {
RHSCases.push_back(Case);
}
}
BasicBlock *LBB, *RBB;
LBB = switchConvert(LHSCases, bytesChecked, OrigBlock, NewDefault, Val, level + 1);
RBB = switchConvert(RHSCases, bytesChecked, OrigBlock, NewDefault, Val, level + 1);
/* insert instructions to check whether the value we are switching on is equal to byte */
ICmpInst* Comp = new ICmpInst(ICmpInst::ICMP_ULT, Trunc, ConstantInt::get(ByteType, pivot), "byteMatch");
LHSCases.push_back(Case);
} else {
RHSCases.push_back(Case);
}
}
BasicBlock *LBB, *RBB;
LBB = switchConvert(LHSCases, bytesChecked, OrigBlock, NewDefault, Val,
level + 1);
RBB = switchConvert(RHSCases, bytesChecked, OrigBlock, NewDefault, Val,
level + 1);
/* insert instructions to check whether the value we are switching on is
* equal to byte */
ICmpInst *Comp =
new ICmpInst(ICmpInst::ICMP_ULT, Trunc,
ConstantInt::get(ByteType, pivot), "byteMatch");
NewNode->getInstList().push_back(Comp);
BranchInst::Create(LBB, RBB, Comp, NewNode);
}
return NewNode;
}
bool SplitSwitchesTransform::splitSwitches(Module &M) {
@ -215,20 +267,25 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) {
/* iterate over all functions, bbs and instruction and add
* all switches to switches vector for later processing */
for (auto &F : M) {
for (auto &BB : F) {
SwitchInst *switchInst = nullptr;
if ((switchInst = dyn_cast<SwitchInst>(BB.getTerminator()))) {
if (switchInst->getNumCases() < 1)
continue;
if (switchInst->getNumCases() < 1) continue;
switches.push_back(switchInst);
}
}
}
if (!switches.size())
return false;
errs() << "Rewriting " << switches.size() << " switch statements " << "\n";
}
}
if (!switches.size()) return false;
errs() << "Rewriting " << switches.size() << " switch statements "
<< "\n";
for (auto &SI : switches) {
@ -242,11 +299,13 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) {
errs() << "switch: " << SI->getNumCases() << " cases " << bitw << " bit\n";
/* If there is only the default destination or the condition checks 8 bit or less, don't bother with the code below. */
/* If there is only the default destination or the condition checks 8 bit or
* less, don't bother with the code below. */
if (!SI->getNumCases() || bitw <= 8) {
if (getenv("AFL_QUIET") == NULL)
errs() << "skip trivial switch..\n";
if (getenv("AFL_QUIET") == NULL) errs() << "skip trivial switch..\n";
continue;
}
/* Create a new, empty default block so that the new hierarchy of
@ -258,10 +317,10 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) {
NewDefault->insertInto(F, Default);
BranchInst::Create(Default, NewDefault);
/* Prepare cases vector. */
CaseVector Cases;
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i)
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e;
++i)
#if LLVM_VERSION_MAJOR < 5
Cases.push_back(CaseExpr(i.getCaseValue(), i.getCaseSuccessor()));
#else
@ -269,8 +328,10 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) {
#endif
/* bugfix thanks to pbst
* round up bytesChecked (in case getBitWidth() % 8 != 0) */
std::vector<bool> bytesChecked((7 + Cases[0].Val->getBitWidth()) / 8, false);
BasicBlock* SwitchBlock = switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, 0);
std::vector<bool> bytesChecked((7 + Cases[0].Val->getBitWidth()) / 8,
false);
BasicBlock * SwitchBlock =
switchConvert(Cases, bytesChecked, OrigBlock, NewDefault, Val, 0);
/* Branch to our shiny new if-then stuff... */
BranchInst::Create(SwitchBlock, OrigBlock);
@ -278,27 +339,32 @@ bool SplitSwitchesTransform::splitSwitches(Module &M) {
/* We are now done with the switch instruction, delete it. */
CurBlock->getInstList().erase(SI);
/* we have to update the phi nodes! */
for (BasicBlock::iterator I = Default->begin(); I != Default->end(); ++I) {
if (!isa<PHINode>(&*I)) {
continue;
}
if (!isa<PHINode>(&*I)) { continue; }
PHINode *PN = cast<PHINode>(I);
/* Only update the first occurrence. */
unsigned Idx = 0, E = PN->getNumIncomingValues();
for (; Idx != E; ++Idx) {
if (PN->getIncomingBlock(Idx) == OrigBlock) {
PN->setIncomingBlock(Idx, NewDefault);
break;
}
}
}
}
verifyModule(M);
return true;
}
bool SplitSwitchesTransform::runOnModule(Module &M) {
@ -309,6 +375,7 @@ bool SplitSwitchesTransform::runOnModule(Module &M) {
verifyModule(M);
return true;
}
static void registerSplitSwitchesTransPass(const PassManagerBuilder &,
@ -324,3 +391,4 @@ static RegisterStandardPasses RegisterSplitSwitchesTransPass(
static RegisterStandardPasses RegisterSplitSwitchesTransPass0(
PassManagerBuilder::EP_EnabledOnOptLevel0, registerSplitSwitchesTransPass);

View File

@ -17,7 +17,8 @@
// limitations under the License.
//
// solution: echo -ne 'The quick brown fox jumps over the lazy dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest
// solution: echo -ne 'The quick brown fox jumps over the lazy
// dog\xbe\xba\xfe\xca\xbe\xba\xfe\xca\xde\xc0\xad\xde\xef\xbe' | ./compcovtest
#include <cstdint>
#include <cstdio>
@ -25,39 +26,40 @@
#include <cstring>
int main() {
char buffer[44] = {/* zero padding */};
fread(buffer, 1, sizeof(buffer) - 1, stdin);
if (memcmp(&buffer[0], "The quick brown fox ", 20) != 0 ||
strncmp(&buffer[20], "jumps over ", 11) != 0 ||
strcmp(&buffer[31], "the lazy dog") != 0) {
return 1;
}
uint64_t x = 0;
fread(&x, sizeof(x), 1, stdin);
if (x != 0xCAFEBABECAFEBABE) {
return 2;
}
if (x != 0xCAFEBABECAFEBABE) { return 2; }
uint32_t y = 0;
fread(&y, sizeof(y), 1, stdin);
if (y != 0xDEADC0DE) {
return 3;
}
if (y != 0xDEADC0DE) { return 3; }
uint16_t z = 0;
fread(&z, sizeof(z), 1, stdin);
switch (z) {
case 0xBEEF:
break;
default:
return 4;
case 0xBEEF: break;
default: return 4;
}
printf("Puzzle solved, congrats!\n");
abort();
return 0;
}

View File

@ -40,8 +40,7 @@
#define MAX_CMP_LENGTH 32
static void *__compcov_code_start,
*__compcov_code_end;
static void *__compcov_code_start, *__compcov_code_end;
static u8* __compcov_afl_map;
@ -55,16 +54,12 @@ static int (*__libc_memcmp)(const void*, const void*, size_t);
static int debug_fd = -1;
#define MAX_MAPPINGS 1024
static struct mapping {
void *st, *en;
} __compcov_ro[MAX_MAPPINGS];
static struct mapping { void *st, *en; } __compcov_ro[MAX_MAPPINGS];
static u32 __compcov_ro_cnt;
/* Check an address against the list of read-only mappings. */
static u8 __compcov_is_ro(const void* ptr) {
@ -75,15 +70,17 @@ static u8 __compcov_is_ro(const void* ptr) {
if (ptr >= __compcov_ro[i].st && ptr <= __compcov_ro[i].en) return 1;
return 0;
}
static size_t __strlen2(const char* s1, const char* s2, size_t max_length) {
// from https://github.com/googleprojectzero/CompareCoverage
size_t len = 0;
for (; len < max_length && s1[len] != '\0' && s2[len] != '\0'; len++) {}
return len;
}
/* Identify the binary boundaries in the memory mapping */
@ -96,13 +93,11 @@ static void __compcov_load(void) {
__libc_strncasecmp = dlsym(RTLD_NEXT, "strncasecmp");
__libc_memcmp = dlsym(RTLD_NEXT, "memcmp");
if (getenv("AFL_QEMU_COMPCOV")) {
__compcov_level = 1;
}
if (getenv("AFL_QEMU_COMPCOV")) { __compcov_level = 1; }
if (getenv("AFL_COMPCOV_LEVEL")) {
__compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL"));
}
char* id_str = getenv(SHM_ENV_VAR);
@ -114,9 +109,11 @@ static void __compcov_load(void) {
__compcov_afl_map = shmat(shm_id, NULL, 0);
if (__compcov_afl_map == (void*)-1) exit(1);
} else {
__compcov_afl_map = calloc(1, MAP_SIZE);
}
if (getenv("AFL_INST_LIBS")) {
@ -124,6 +121,7 @@ static void __compcov_load(void) {
__compcov_code_start = (void*)0;
__compcov_code_end = (void*)-1;
return;
}
char* bin_name = getenv("AFL_COMPCOV_BINNAME");
@ -137,11 +135,12 @@ static void __compcov_load(void) {
if (!bin_name || strstr(maps_tmp->pathname, bin_name) != NULL) {
if (maps_tmp->is_x) {
if (!__compcov_code_start)
__compcov_code_start = maps_tmp->addr_start;
if (!__compcov_code_end)
__compcov_code_end = maps_tmp->addr_end;
if (!__compcov_code_start) __compcov_code_start = maps_tmp->addr_start;
if (!__compcov_code_end) __compcov_code_end = maps_tmp->addr_end;
}
}
if ((maps_tmp->is_w && !maps_tmp->is_r) || __compcov_ro_cnt == MAX_MAPPINGS)
@ -149,26 +148,33 @@ static void __compcov_load(void) {
__compcov_ro[__compcov_ro_cnt].st = maps_tmp->addr_start;
__compcov_ro[__compcov_ro_cnt].en = maps_tmp->addr_end;
}
pmparser_free(maps);
}
}
static void __compcov_trace(u64 cur_loc, const u8* v0, const u8* v1, size_t n) {
size_t i;
if (debug_fd != 1) {
char debugbuf[4096];
snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %lu\n", cur_loc, v0 == NULL ? "(null)" : (char*)v0, v1 == NULL ? "(null)" : (char*)v1, n);
snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %lu\n", cur_loc,
v0 == NULL ? "(null)" : (char*)v0,
v1 == NULL ? "(null)" : (char*)v1, n);
write(debug_fd, debugbuf, strlen(debugbuf));
}
for (i = 0; i < n && v0[i] == v1[i]; ++i) {
__compcov_afl_map[cur_loc + i]++;
}
}
/* Check an address against the list of read-only mappings. */
@ -176,8 +182,8 @@ static void __compcov_trace(u64 cur_loc, const u8* v0, const u8* v1, size_t n) {
static u8 __compcov_is_in_bound(const void* ptr) {
return ptr >= __compcov_code_start && ptr < __compcov_code_end;
}
}
/* Replacements for strcmp(), memcmp(), and so on. Note that these will be used
only if the target is compiled with -fno-builtins and linked dynamically. */
@ -188,8 +194,9 @@ int strcmp(const char* str1, const char* str2) {
void* retaddr = __builtin_return_address(0);
if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
!__compcov_is_ro(str1) && !__compcov_is_ro(str2))) {
if (__compcov_is_in_bound(retaddr) &&
!(__compcov_level < 2 && !__compcov_is_ro(str1) &&
!__compcov_is_ro(str2))) {
size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1);
@ -200,12 +207,14 @@ int strcmp(const char* str1, const char* str2) {
cur_loc &= MAP_SIZE - 1;
__compcov_trace(cur_loc, str1, str2, n);
}
}
return __libc_strcmp(str1, str2);
}
}
#undef strncmp
@ -213,8 +222,9 @@ int strncmp(const char* str1, const char* str2, size_t len) {
void* retaddr = __builtin_return_address(0);
if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
!__compcov_is_ro(str1) && !__compcov_is_ro(str2))) {
if (__compcov_is_in_bound(retaddr) &&
!(__compcov_level < 2 && !__compcov_is_ro(str1) &&
!__compcov_is_ro(str2))) {
size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1);
n = MIN(n, len);
@ -226,12 +236,14 @@ int strncmp(const char* str1, const char* str2, size_t len) {
cur_loc &= MAP_SIZE - 1;
__compcov_trace(cur_loc, str1, str2, n);
}
}
return __libc_strncmp(str1, str2, len);
}
}
#undef strcasecmp
@ -239,8 +251,10 @@ int strcasecmp(const char* str1, const char* str2) {
void* retaddr = __builtin_return_address(0);
if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
!__compcov_is_ro(str1) && !__compcov_is_ro(str2))) {
if (__compcov_is_in_bound(retaddr) &&
!(__compcov_level < 2 && !__compcov_is_ro(str1) &&
!__compcov_is_ro(str2))) {
/* Fallback to strcmp, maybe improve in future */
size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1);
@ -252,12 +266,14 @@ int strcasecmp(const char* str1, const char* str2) {
cur_loc &= MAP_SIZE - 1;
__compcov_trace(cur_loc, str1, str2, n);
}
}
return __libc_strcasecmp(str1, str2);
}
}
#undef strncasecmp
@ -265,8 +281,10 @@ int strncasecmp(const char* str1, const char* str2, size_t len) {
void* retaddr = __builtin_return_address(0);
if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
!__compcov_is_ro(str1) && !__compcov_is_ro(str2))) {
if (__compcov_is_in_bound(retaddr) &&
!(__compcov_level < 2 && !__compcov_is_ro(str1) &&
!__compcov_is_ro(str2))) {
/* Fallback to strncmp, maybe improve in future */
size_t n = __strlen2(str1, str2, MAX_CMP_LENGTH + 1);
@ -279,12 +297,14 @@ int strncasecmp(const char* str1, const char* str2, size_t len) {
cur_loc &= MAP_SIZE - 1;
__compcov_trace(cur_loc, str1, str2, n);
}
}
return __libc_strncasecmp(str1, str2, len);
}
}
#undef memcmp
@ -292,8 +312,9 @@ int memcmp(const void* mem1, const void* mem2, size_t len) {
void* retaddr = __builtin_return_address(0);
if (__compcov_is_in_bound(retaddr) && !(__compcov_level < 2 &&
!__compcov_is_ro(mem1) && !__compcov_is_ro(mem2))) {
if (__compcov_is_in_bound(retaddr) &&
!(__compcov_level < 2 && !__compcov_is_ro(mem1) &&
!__compcov_is_ro(mem2))) {
size_t n = len;
@ -304,10 +325,13 @@ int memcmp(const void* mem1, const void* mem2, size_t len) {
cur_loc &= MAP_SIZE - 1;
__compcov_trace(cur_loc, mem1, mem2, n);
}
}
return __libc_memcmp(mem1, mem2, len);
}
/* Init code to open init the library. */
@ -315,9 +339,10 @@ int memcmp(const void* mem1, const void* mem2, size_t len) {
__attribute__((constructor)) void __compcov_init(void) {
if (getenv("AFL_QEMU_COMPCOV_DEBUG") != NULL)
debug_fd = open("compcov.debug", O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, 0644);
debug_fd =
open("compcov.debug", O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, 0644);
__compcov_load();
}

View File

@ -31,6 +31,7 @@ implied warranty.
* @desc hold all the information about an area in the process's VM
*/
typedef struct procmaps_struct {
void* addr_start; //< start address of the area
void* addr_end; //< end address
unsigned long length; //< size of the range
@ -48,6 +49,7 @@ typedef struct procmaps_struct{
char pathname[600]; //< the path of the file that backs the area
// chained list
struct procmaps_struct* next; //<handler of the chinaed list
} procmaps_struct;
/**
@ -55,12 +57,16 @@ typedef struct procmaps_struct{
* @desc holds iterating information
*/
typedef struct procmaps_iterator {
procmaps_struct* head;
procmaps_struct* current;
} procmaps_iterator;
/**
* pmparser_parse
* @param pid the process id whose memory map to be parser. the current process if pid<0
* @param pid the process id whose memory map to be parser. the current process
* if pid<0
* @return an iterator over all the nodes
*/
procmaps_iterator* pmparser_parse(int pid);
@ -83,7 +89,9 @@ void pmparser_free(procmaps_iterator* p_procmaps_it);
* _pmparser_split_line
* @description internal usage
*/
void _pmparser_split_line(char*buf,char*addr1,char*addr2,char*perm, char* offset, char* device,char*inode,char* pathname);
void _pmparser_split_line(char* buf, char* addr1, char* addr2, char* perm,
char* offset, char* device, char* inode,
char* pathname);
/**
* pmparser_print
@ -92,43 +100,54 @@ void _pmparser_split_line(char*buf,char*addr1,char*addr2,char*perm, char* offset
*/
void pmparser_print(procmaps_struct* map, int order);
/**
* gobal variables
*/
// procmaps_struct* g_last_head=NULL;
// procmaps_struct* g_current=NULL;
procmaps_iterator* pmparser_parse(int pid) {
procmaps_iterator* maps_it = malloc(sizeof(procmaps_iterator));
char maps_path[500];
if (pid >= 0) {
sprintf(maps_path, "/proc/%d/maps", pid);
} else {
sprintf(maps_path, "/proc/self/maps");
}
FILE* file = fopen(maps_path, "r");
if (!file) {
fprintf(stderr,"pmparser : cannot open the memory maps, %s\n",strerror(errno));
fprintf(stderr, "pmparser : cannot open the memory maps, %s\n",
strerror(errno));
return NULL;
}
int ind=0;char buf[PROCMAPS_LINE_MAX_LENGTH];
int ind = 0;
char buf[PROCMAPS_LINE_MAX_LENGTH];
// int c;
procmaps_struct* list_maps = NULL;
procmaps_struct* tmp;
procmaps_struct* current_node = list_maps;
char addr1[20],addr2[20], perm[8], offset[20], dev[10],inode[30],pathname[PATH_MAX];
char addr1[20], addr2[20], perm[8], offset[20], dev[10], inode[30],
pathname[PATH_MAX];
while (!feof(file)) {
fgets(buf, PROCMAPS_LINE_MAX_LENGTH, file);
// allocate a node
tmp = (procmaps_struct*)malloc(sizeof(procmaps_struct));
// fill the node
_pmparser_split_line(buf, addr1, addr2, perm, offset, dev, inode, pathname);
// printf("#%s",buf);
//printf("%s-%s %s %s %s %s\t%s\n",addr1,addr2,perm,offset,dev,inode,pathname);
//addr_start & addr_end
//unsigned long l_addr_start;
// printf("%s-%s %s %s %s
// %s\t%s\n",addr1,addr2,perm,offset,dev,inode,pathname); addr_start &
// addr_end unsigned long l_addr_start;
sscanf(addr1, "%lx", (long unsigned*)&tmp->addr_start);
sscanf(addr2, "%lx", (long unsigned*)&tmp->addr_end);
// size
@ -151,80 +170,92 @@ procmaps_iterator* pmparser_parse(int pid){
tmp->next = NULL;
// attach the node
if (ind == 0) {
list_maps = tmp;
list_maps->next = NULL;
current_node = list_maps;
}
current_node->next = tmp;
current_node = tmp;
ind++;
// printf("%s",buf);
}
// close file
fclose(file);
// g_last_head=list_maps;
maps_it->head = list_maps;
maps_it->current = list_maps;
return maps_it;
}
procmaps_struct* pmparser_next(procmaps_iterator* p_procmaps_it) {
if(p_procmaps_it->current == NULL)
return NULL;
if (p_procmaps_it->current == NULL) return NULL;
procmaps_struct* p_current = p_procmaps_it->current;
p_procmaps_it->current = p_procmaps_it->current->next;
return p_current;
/*
if(g_current==NULL){
g_current=g_last_head;
}else
g_current=g_current->next;
return g_current;
*/
}
void pmparser_free(procmaps_iterator* p_procmaps_it) {
procmaps_struct* maps_list = p_procmaps_it->head;
if (maps_list == NULL) return;
procmaps_struct* act = maps_list;
procmaps_struct* nxt = act->next;
while (act != NULL) {
free(act);
act = nxt;
if(nxt!=NULL)
nxt=nxt->next;
if (nxt != NULL) nxt = nxt->next;
}
}
void _pmparser_split_line(
char*buf,char*addr1,char*addr2,
char*perm,char* offset,char* device,char*inode,
void _pmparser_split_line(char* buf, char* addr1, char* addr2, char* perm,
char* offset, char* device, char* inode,
char* pathname) {
//
int orig = 0;
int i = 0;
// addr1
while (buf[i] != '-') {
addr1[i - orig] = buf[i];
i++;
}
addr1[i] = '\0';
i++;
// addr2
orig = i;
while (buf[i] != '\t' && buf[i] != ' ') {
addr2[i - orig] = buf[i];
i++;
}
addr2[i - orig] = '\0';
// perm
@ -232,36 +263,48 @@ void _pmparser_split_line(
i++;
orig = i;
while (buf[i] != '\t' && buf[i] != ' ') {
perm[i - orig] = buf[i];
i++;
}
perm[i - orig] = '\0';
// offset
while (buf[i] == '\t' || buf[i] == ' ')
i++;
orig = i;
while (buf[i] != '\t' && buf[i] != ' ') {
offset[i - orig] = buf[i];
i++;
}
offset[i - orig] = '\0';
// dev
while (buf[i] == '\t' || buf[i] == ' ')
i++;
orig = i;
while (buf[i] != '\t' && buf[i] != ' ') {
device[i - orig] = buf[i];
i++;
}
device[i - orig] = '\0';
// inode
while (buf[i] == '\t' || buf[i] == ' ')
i++;
orig = i;
while (buf[i] != '\t' && buf[i] != ' ') {
inode[i - orig] = buf[i];
i++;
}
inode[i - orig] = '\0';
// pathname
pathname[0] = '\0';
@ -269,12 +312,15 @@ void _pmparser_split_line(
i++;
orig = i;
while (buf[i] != '\t' && buf[i] != ' ' && buf[i] != '\n') {
pathname[i - orig] = buf[i];
i++;
}
pathname[i - orig] = '\0';
}
#endif

View File

@ -42,10 +42,8 @@
"adcb $0, (%0, %1, 1)\n" \
: /* no out */ \
: "r"(afl_area_ptr), "r"(loc) \
: "memory", "eax" \
)
: "memory", "eax")
#else
# define INC_AFL_AREA(loc) \
afl_area_ptr[loc]++
# define INC_AFL_AREA(loc) afl_area_ptr[loc]++
#endif

View File

@ -42,11 +42,16 @@
_start and does the usual forkserver stuff, not very different from
regular instrumentation injected via afl-as.h. */
#define AFL_QEMU_CPU_SNIPPET2 do { \
#define AFL_QEMU_CPU_SNIPPET2 \
do { \
\
if (itb->pc == afl_entry_point) { \
\
afl_setup(); \
afl_forkserver(cpu); \
\
} \
\
} while (0)
/* We use one additional file descriptor to relay "needs translation"
@ -56,7 +61,8 @@
/* This is equivalent to afl-as.h: */
static unsigned char dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */
static unsigned char
dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */
unsigned char *afl_area_ptr = dummy; /* Exported for afl_gen_trace */
/* Exported variables populated by the code patched into elfload.c: */
@ -83,33 +89,43 @@ static void afl_setup(void);
static void afl_forkserver(CPUState *);
static void afl_wait_tsl(CPUState *, int);
static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t, TranslationBlock*, int);
static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t,
TranslationBlock *, int);
/* Data structures passed around by the translate handlers: */
struct afl_tb {
target_ulong pc;
target_ulong cs_base;
uint32_t flags;
uint32_t cf_mask;
};
struct afl_tsl {
struct afl_tb tb;
char is_chain;
};
struct afl_chain {
struct afl_tb last_tb;
uint32_t cf_mask;
int tb_exit;
};
/* Some forward decls: */
TranslationBlock *tb_htable_lookup(CPUState*, target_ulong, target_ulong, uint32_t, uint32_t);
static inline TranslationBlock *tb_find(CPUState*, TranslationBlock*, int, uint32_t);
static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb_next);
TranslationBlock *tb_htable_lookup(CPUState *, target_ulong, target_ulong,
uint32_t, uint32_t);
static inline TranslationBlock *tb_find(CPUState *, TranslationBlock *, int,
uint32_t);
static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next);
/*************************
* ACTUAL IMPLEMENTATION *
@ -119,8 +135,7 @@ static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb
static void afl_setup(void) {
char *id_str = getenv(SHM_ENV_VAR),
*inst_r = getenv("AFL_INST_RATIO");
char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO");
int shm_id;
@ -159,13 +174,11 @@ static void afl_setup(void) {
}
/* Maintain for compatibility */
if (getenv("AFL_QEMU_COMPCOV")) {
afl_compcov_level = 1;
}
if (getenv("AFL_QEMU_COMPCOV")) { afl_compcov_level = 1; }
if (getenv("AFL_COMPCOV_LEVEL")) {
afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL"));
}
/* pthread_atfork() seems somewhat broken in util/rcu.c, and I'm
@ -176,15 +189,13 @@ static void afl_setup(void) {
}
/* Fork server logic, invoked once we hit _start. */
static void afl_forkserver(CPUState *cpu) {
static unsigned char tmp[4];
if (forkserver_installed == 1)
return;
if (forkserver_installed == 1) return;
forkserver_installed = 1;
// if (!afl_area_ptr) return; // not necessary because of fixed dummy buffer
@ -246,14 +257,14 @@ static void afl_forkserver(CPUState *cpu) {
}
/* This code is invoked whenever QEMU decides that it doesn't have a
translation of a particular block and needs to compute it, or when it
decides to chain two TBs together. When this happens, we tell the parent to
mirror the operation, so that the next fork() has a cached copy. */
static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, uint32_t cf_mask,
TranslationBlock *last_tb, int tb_exit) {
static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags,
uint32_t cf_mask, TranslationBlock *last_tb,
int tb_exit) {
struct afl_tsl t;
struct afl_chain c;
@ -270,6 +281,7 @@ static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, ui
return;
if (t.is_chain) {
c.last_tb.pc = last_tb->pc;
c.last_tb.cs_base = last_tb->cs_base;
c.last_tb.flags = last_tb->flags;
@ -278,10 +290,10 @@ static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, ui
if (write(TSL_FD, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
return;
}
}
}
/* Check if an address is valid in the current mapping */
@ -295,10 +307,10 @@ static inline int is_valid_addr(target_ulong addr) {
l = (page + TARGET_PAGE_SIZE) - addr;
flags = page_get_flags(page);
if (!(flags & PAGE_VALID) || !(flags & PAGE_READ))
return 0;
if (!(flags & PAGE_VALID) || !(flags & PAGE_READ)) return 0;
return 1;
}
/* This is the other side of the same channel. Since timeouts are handled by
@ -316,8 +328,7 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
/* Broken pipe means it's time to return to the fork server routine. */
if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
break;
if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break;
tb = tb_htable_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
@ -333,13 +344,17 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
mmap_lock();
tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask);
mmap_unlock();
} else {
invalid_pc = 1;
}
}
if (t.is_chain) {
if (read(fd, &c, sizeof(struct afl_chain)) != sizeof(struct afl_chain))
break;
@ -347,10 +362,10 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
last_tb = tb_htable_lookup(cpu, c.last_tb.pc, c.last_tb.cs_base,
c.last_tb.flags, c.cf_mask);
if (last_tb) {
tb_add_jump(last_tb, c.tb_exit, tb);
}
if (last_tb) { tb_add_jump(last_tb, c.tb_exit, tb); }
}
}
}
@ -358,3 +373,4 @@ static void afl_wait_tsl(CPUState *cpu, int fd) {
close(fd);
}

View File

@ -47,51 +47,67 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
static void afl_compcov_log_16(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
if ((arg1 & 0xff) == (arg2 & 0xff)) {
INC_AFL_AREA(cur_loc);
}
if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); }
}
static void afl_compcov_log_32(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
if ((arg1 & 0xff) == (arg2 & 0xff)) {
INC_AFL_AREA(cur_loc);
if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
INC_AFL_AREA(cur_loc + 1);
if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
INC_AFL_AREA(cur_loc +2);
}
if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { INC_AFL_AREA(cur_loc + 2); }
}
}
}
static void afl_compcov_log_64(target_ulong cur_loc, target_ulong arg1,
target_ulong arg2) {
if ((arg1 & 0xff) == (arg2 & 0xff)) {
INC_AFL_AREA(cur_loc);
if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
INC_AFL_AREA(cur_loc + 1);
if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
INC_AFL_AREA(cur_loc + 2);
if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) {
INC_AFL_AREA(cur_loc + 3);
if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) {
INC_AFL_AREA(cur_loc + 4);
if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) {
INC_AFL_AREA(cur_loc + 5);
if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) {
INC_AFL_AREA(cur_loc + 6);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2,
TCGMemOp ot, int is_imm) {
@ -101,21 +117,15 @@ static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2,
if (!afl_compcov_level || cur_loc > afl_end_code || cur_loc < afl_start_code)
return;
if (!is_imm && afl_compcov_level < 2)
return;
if (!is_imm && afl_compcov_level < 2) return;
switch (ot) {
case MO_64:
func = &afl_compcov_log_64;
break;
case MO_32:
func = &afl_compcov_log_32;
break;
case MO_16:
func = &afl_compcov_log_16;
break;
default:
return;
case MO_64: func = &afl_compcov_log_64; break;
case MO_32: func = &afl_compcov_log_32; break;
case MO_16: func = &afl_compcov_log_16; break;
default: return;
}
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
@ -124,4 +134,6 @@ static void afl_gen_compcov(target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2,
if (cur_loc >= afl_inst_rms) return;
tcg_gen_afl_compcov_log_call(func, cur_loc, arg1, arg2);
}

View File

@ -36,8 +36,8 @@ void afl_maybe_log(void* cur_loc);
/* Note: we convert the 64 bit args to 32 bit and do some alignment
and endian swap. Maybe it would be better to do the alignment
and endian swap in tcg_reg_alloc_call(). */
void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
{
void tcg_gen_afl_maybe_log_call(target_ulong cur_loc) {
int real_args, pi;
unsigned sizemask, flags;
TCGOp * op;
@ -47,8 +47,8 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
flags = 0;
sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1);
#if defined(__sparc__) && !defined(__arch64__) \
&& !defined(CONFIG_TCG_INTERPRETER)
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* We have 64-bit values in one register, but need to pass as two
separate parameters. Split them. */
int orig_sizemask = sizemask;
@ -58,35 +58,51 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
retl = NULL;
reth = NULL;
if (sizemask != 0) {
real_args = 0;
int is_64bit = sizemask & (1 << 2);
if (is_64bit) {
TCGv_i64 orig = temp_tcgv_i64(arg);
TCGv_i32 h = tcg_temp_new_i32();
TCGv_i32 l = tcg_temp_new_i32();
tcg_gen_extr_i64_i32(l, h, orig);
split_args[real_args++] = tcgv_i32_temp(h);
split_args[real_args++] = tcgv_i32_temp(l);
} else {
split_args[real_args++] = arg;
}
nargs = real_args;
args = split_args;
sizemask = 0;
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
int is_64bit = sizemask & (1 << 2);
int is_signed = sizemask & (2 << 2);
if (!is_64bit) {
TCGv_i64 temp = tcg_temp_new_i64();
TCGv_i64 orig = temp_tcgv_i64(arg);
if (is_signed) {
tcg_gen_ext32s_i64(temp, orig);
} else {
tcg_gen_ext32u_i64(temp, orig);
}
arg = tcgv_i64_temp(temp);
}
#endif /* TCG_TARGET_EXTEND_ARGS */
op = tcg_emit_op(INDEX_op_call);
@ -98,12 +114,16 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
real_args = 0;
int is_64bit = sizemask & (1 << 2);
if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
/* some targets want aligned 64 bit args */
if (real_args & 1) {
op->args[pi++] = TCG_CALL_DUMMY_ARG;
real_args++;
}
#endif
/* If stack grows up, then we will be placing successive
arguments at lower addresses, which means we need to
@ -123,6 +143,7 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
op->args[pi++] = temp_arg(arg + 1);
#endif
real_args += 2;
}
op->args[pi++] = temp_arg(arg);
@ -136,50 +157,57 @@ void tcg_gen_afl_maybe_log_call(target_ulong cur_loc)
tcg_debug_assert(TCGOP_CALLI(op) == real_args);
tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
#if defined(__sparc__) && !defined(__arch64__) \
&& !defined(CONFIG_TCG_INTERPRETER)
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* Free all of the parts we allocated above. */
real_args = 0;
int is_64bit = orig_sizemask & (1 << 2);
if (is_64bit) {
tcg_temp_free_internal(args[real_args++]);
tcg_temp_free_internal(args[real_args++]);
} else {
real_args++;
}
if (orig_sizemask & 1) {
/* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
Note that describing these as TCGv_i64 eliminates an unnecessary
zero-extension that tcg_gen_concat_i32_i64 would create. */
tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
tcg_temp_free_i64(retl);
tcg_temp_free_i64(reth);
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
int is_64bit = sizemask & (1 << 2);
if (!is_64bit) {
tcg_temp_free_internal(arg);
}
#endif /* TCG_TARGET_EXTEND_ARGS */
}
void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg1, TCGv_i64 arg2)
{
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
int is_64bit = sizemask & (1 << 2);
if (!is_64bit) { tcg_temp_free_internal(arg); }
#endif /* TCG_TARGET_EXTEND_ARGS */
}
void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc,
TCGv_i64 arg1, TCGv_i64 arg2) {
int i, real_args, nb_rets, pi;
unsigned sizemask, flags;
TCGOp * op;
const int nargs = 3;
TCGTemp *args[3] = { tcgv_i64_temp( tcg_const_tl(cur_loc) ),
tcgv_i64_temp(arg1),
TCGTemp *args[3] = {tcgv_i64_temp(tcg_const_tl(cur_loc)), tcgv_i64_temp(arg1),
tcgv_i64_temp(arg2)};
flags = 0;
sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) |
dh_sizemask(i64, 2) | dh_sizemask(i64, 3);
sizemask = dh_sizemask(void, 0) | dh_sizemask(i64, 1) | dh_sizemask(i64, 2) |
dh_sizemask(i64, 3);
#if defined(__sparc__) && !defined(__arch64__) \
&& !defined(CONFIG_TCG_INTERPRETER)
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* We have 64-bit values in one register, but need to pass as two
separate parameters. Split them. */
int orig_sizemask = sizemask;
@ -190,38 +218,58 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg
retl = NULL;
reth = NULL;
if (sizemask != 0) {
for (i = real_args = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
if (is_64bit) {
TCGv_i64 orig = temp_tcgv_i64(args[i]);
TCGv_i32 h = tcg_temp_new_i32();
TCGv_i32 l = tcg_temp_new_i32();
tcg_gen_extr_i64_i32(l, h, orig);
split_args[real_args++] = tcgv_i32_temp(h);
split_args[real_args++] = tcgv_i32_temp(l);
} else {
split_args[real_args++] = args[i];
}
}
nargs = real_args;
args = split_args;
sizemask = 0;
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
for (i = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
int is_signed = sizemask & (2 << (i + 1) * 2);
if (!is_64bit) {
TCGv_i64 temp = tcg_temp_new_i64();
TCGv_i64 orig = temp_tcgv_i64(args[i]);
if (is_signed) {
tcg_gen_ext32s_i64(temp, orig);
} else {
tcg_gen_ext32u_i64(temp, orig);
}
args[i] = tcgv_i64_temp(temp);
}
}
#endif /* TCG_TARGET_EXTEND_ARGS */
op = tcg_emit_op(INDEX_op_call);
@ -232,14 +280,19 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg
real_args = 0;
for (i = 0; i < nargs; i++) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
#ifdef TCG_TARGET_CALL_ALIGN_ARGS
/* some targets want aligned 64 bit args */
if (real_args & 1) {
op->args[pi++] = TCG_CALL_DUMMY_ARG;
real_args++;
}
#endif
/* If stack grows up, then we will be placing successive
arguments at lower addresses, which means we need to
@ -260,11 +313,14 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg
#endif
real_args += 2;
continue;
}
op->args[pi++] = temp_arg(args[i]);
real_args++;
}
op->args[pi++] = (uintptr_t)func;
op->args[pi++] = flags;
TCGOP_CALLI(op) = real_args;
@ -273,33 +329,45 @@ void tcg_gen_afl_compcov_log_call(void *func, target_ulong cur_loc, TCGv_i64 arg
tcg_debug_assert(TCGOP_CALLI(op) == real_args);
tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
#if defined(__sparc__) && !defined(__arch64__) \
&& !defined(CONFIG_TCG_INTERPRETER)
#if defined(__sparc__) && !defined(__arch64__) && \
!defined(CONFIG_TCG_INTERPRETER)
/* Free all of the parts we allocated above. */
for (i = real_args = 0; i < orig_nargs; ++i) {
int is_64bit = orig_sizemask & (1 << (i + 1) * 2);
if (is_64bit) {
tcg_temp_free_internal(args[real_args++]);
tcg_temp_free_internal(args[real_args++]);
} else {
real_args++;
}
}
if (orig_sizemask & 1) {
/* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
Note that describing these as TCGv_i64 eliminates an unnecessary
zero-extension that tcg_gen_concat_i32_i64 would create. */
tcg_gen_concat32_i64(temp_tcgv_i64(NULL), retl, reth);
tcg_temp_free_i64(retl);
tcg_temp_free_i64(reth);
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
for (i = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i+1)*2);
if (!is_64bit) {
tcg_temp_free_internal(args[i]);
}
}
#endif /* TCG_TARGET_EXTEND_ARGS */
}
#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
for (i = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i + 1) * 2);
if (!is_64bit) { tcg_temp_free_internal(args[i]); }
}
#endif /* TCG_TARGET_EXTEND_ARGS */
}

View File

@ -59,7 +59,9 @@ static void afl_gen_trace(target_ulong cur_loc) {
/* Optimize for cur_loc > afl_end_code, which is the most likely case on
Linux systems. */
if (cur_loc > afl_end_code || cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of static dummy buffer
if (cur_loc > afl_end_code ||
cur_loc < afl_start_code /*|| !afl_area_ptr*/) // not needed because of
// static dummy buffer
return;
/* Looks like QEMU always maps to fixed locations, so ASLR is not a
@ -77,3 +79,4 @@ static void afl_gen_trace(target_ulong cur_loc) {
tcg_gen_afl_maybe_log_call(cur_loc);
}

View File

@ -75,11 +75,9 @@ static u8 edges_only, /* Ignore hit counts? */
use_hex_offsets, /* Show hex offsets? */
use_stdin = 1; /* Use stdin for program input? */
static volatile u8
stop_soon, /* Ctrl-C pressed? */
static volatile u8 stop_soon, /* Ctrl-C pressed? */
child_timed_out; /* Child timed out? */
/* Constants used for describing byte behavior. */
#define RESP_NONE 0x00 /* Changing byte is a no-op. */
@ -91,8 +89,8 @@ static volatile u8
#define RESP_CKSUM 0x05 /* Potential checksum */
#define RESP_SUSPECT 0x06 /* Potential "suspect" blob */
/* Classify tuple counts. This is a slow & naive version, but good enough here. */
/* Classify tuple counts. This is a slow & naive version, but good enough here.
*/
static u8 count_class_lookup[256] = {
@ -115,21 +113,24 @@ static void classify_counts(u8* mem) {
if (edges_only) {
while (i--) {
if (*mem) *mem = 1;
mem++;
}
} else {
while (i--) {
*mem = count_class_lookup[*mem];
mem++;
}
}
}
}
/* See if any bytes are set in the bitmap. */
@ -138,13 +139,13 @@ static inline u8 anything_set(void) {
u32* ptr = (u32*)trace_bits;
u32 i = (MAP_SIZE >> 2);
while (i--) if (*(ptr++)) return 1;
while (i--)
if (*(ptr++)) return 1;
return 0;
}
/* Get rid of temp files (atexit handler). */
static void at_exit_handler(void) {
@ -153,7 +154,6 @@ static void at_exit_handler(void) {
}
/* Read initial file. */
static void read_initial_file(void) {
@ -163,8 +163,7 @@ static void read_initial_file(void) {
if (fd < 0) PFATAL("Unable to open '%s'", in_file);
if (fstat(fd, &st) || !st.st_size)
FATAL("Zero-sized input file.");
if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file.");
if (st.st_size >= TMIN_MAX_FILE)
FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024);
@ -180,7 +179,6 @@ static void read_initial_file(void) {
}
/* Write output file. */
static s32 write_to_file(u8* path, u8* mem, u32 len) {
@ -201,7 +199,6 @@ static s32 write_to_file(u8* path, u8* mem, u32 len) {
}
/* Handle timeout signal. */
static void handle_timeout(int sig) {
@ -211,7 +208,6 @@ static void handle_timeout(int sig) {
}
/* Execute target application. Returns exec checksum, or 0 if program
times out. */
@ -237,8 +233,7 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
struct rlimit r;
if (dup2(use_stdin ? prog_in_fd : dev_null_fd, 0) < 0 ||
dup2(dev_null_fd, 1) < 0 ||
dup2(dev_null_fd, 2) < 0) {
dup2(dev_null_fd, 1) < 0 || dup2(dev_null_fd, 2) < 0) {
*(u32*)trace_bits = EXEC_FAIL_SIG;
PFATAL("dup2() failed");
@ -303,8 +298,10 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
total_execs++;
if (stop_soon) {
SAYF(cRST cLRD "\n+++ Analysis aborted by user +++\n" cRST);
exit(1);
}
/* Always discard inputs that time out. */
@ -335,7 +332,6 @@ static u32 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
}
#ifdef USE_COLOR
/* Helper function to display a human-readable character. */
@ -353,24 +349,25 @@ static void show_char(u8 val) {
}
/* Show the legend */
static void show_legend(void) {
SAYF(" " cLGR bgGRA " 01 " cRST " - no-op block "
cBLK bgLGN " 01 " cRST " - suspected length field\n"
" " cBRI bgGRA " 01 " cRST " - superficial content "
cBLK bgYEL " 01 " cRST " - suspected cksum or magic int\n"
" " cBLK bgCYA " 01 " cRST " - critical stream "
cBLK bgLRD " 01 " cRST " - suspected checksummed block\n"
SAYF(" " cLGR bgGRA " 01 " cRST " - no-op block " cBLK bgLGN
" 01 " cRST
" - suspected length field\n"
" " cBRI bgGRA " 01 " cRST " - superficial content " cBLK bgYEL
" 01 " cRST
" - suspected cksum or magic int\n"
" " cBLK bgCYA " 01 " cRST " - critical stream " cBLK bgLRD
" 01 " cRST
" - suspected checksummed block\n"
" " cBLK bgMGN " 01 " cRST " - \"magic value\" section\n\n");
}
#endif /* USE_COLOR */
/* Interpret and report a pattern in the input file. */
static void dump_hex(u8* buf, u32 len, u8* b_data) {
@ -409,15 +406,19 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
/* Small integers may be length fields. */
if (val && (val <= in_len || SWAP16(val) <= in_len)) {
rtype = RESP_LEN;
break;
}
/* Uniform integers may be checksums. */
if (val && abs(in_data[i] - in_data[i + 1]) > 32) {
rtype = RESP_CKSUM;
break;
}
break;
@ -431,8 +432,10 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
/* Small integers may be length fields. */
if (val && (val <= in_len || SWAP32(val) <= in_len)) {
rtype = RESP_LEN;
break;
}
/* Uniform integers may be checksums. */
@ -440,15 +443,19 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
if (val && (in_data[i] >> 7 != in_data[i + 1] >> 7 ||
in_data[i] >> 7 != in_data[i + 2] >> 7 ||
in_data[i] >> 7 != in_data[i + 3] >> 7)) {
rtype = RESP_CKSUM;
break;
}
break;
}
case 1: case 3: case 5 ... MAX_AUTO_EXTRA - 1: break;
case 1:
case 3:
case 5 ... MAX_AUTO_EXTRA - 1: break;
default: rtype = RESP_SUSPECT;
@ -489,7 +496,10 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
show_char(in_data[i + off]);
if (off != rlen - 1 && (i + off + 1) % 16) SAYF(" "); else SAYF(cRST " ");
if (off != rlen - 1 && (i + off + 1) % 16)
SAYF(" ");
else
SAYF(cRST " ");
}
@ -524,8 +534,6 @@ static void dump_hex(u8* buf, u32 len, u8* b_data) {
}
/* Actually analyze! */
static void analyze(char** argv) {
@ -587,12 +595,15 @@ static void analyze(char** argv) {
b_data[i] = RESP_FIXED;
} else b_data[i] = RESP_VARIABLE;
} else
b_data[i] = RESP_VARIABLE;
/* When all checksums change, flip most significant bit of b_data. */
if (prev_xff != xor_ff && prev_x01 != xor_01 &&
prev_s10 != sub_10 && prev_a10 != add_10) seq_byte ^= 0x80;
if (prev_xff != xor_ff && prev_x01 != xor_01 && prev_s10 != sub_10 &&
prev_a10 != add_10)
seq_byte ^= 0x80;
b_data[i] |= seq_byte;
@ -618,8 +629,6 @@ static void analyze(char** argv) {
}
/* Handle Ctrl-C and the like. */
static void handle_stop_sig(int sig) {
@ -630,7 +639,6 @@ static void handle_stop_sig(int sig) {
}
/* Do basic preparations - persistent fds, filenames, etc. */
static void set_up_environment(void) {
@ -674,18 +682,20 @@ static void set_up_environment(void) {
if (x) {
if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
FATAL("Custom MSAN_OPTIONS set without exit_code="
STRINGIFY(MSAN_ERROR) " - please fix!");
FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY(
MSAN_ERROR) " - please fix!");
if (!strstr(x, "symbolize=0"))
FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
}
setenv("ASAN_OPTIONS", "abort_on_error=1:"
setenv("ASAN_OPTIONS",
"abort_on_error=1:"
"detect_leaks=0:"
"symbolize=0:"
"allocator_may_return_null=1", 0);
"allocator_may_return_null=1",
0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"symbolize=0:"
@ -694,12 +704,13 @@ static void set_up_environment(void) {
"msan_track_origins=0", 0);
if (getenv("AFL_PRELOAD")) {
setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
}
}
}
/* Setup signal handlers, duh. */
@ -727,12 +738,12 @@ static void setup_signal_handlers(void) {
}
/* Display usage hints. */
static void usage(u8* argv0) {
SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
SAYF(
"\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
"Required parameters:\n\n"
@ -758,7 +769,6 @@ static void usage(u8* argv0) {
}
/* Find binary. */
static void find_binary(u8* fname) {
@ -786,7 +796,9 @@ static void find_binary(u8* fname) {
memcpy(cur_elem, env_path, delim - env_path);
delim++;
} else cur_elem = ck_strdup(env_path);
} else
cur_elem = ck_strdup(env_path);
env_path = delim;
@ -798,7 +810,8 @@ static void find_binary(u8* fname) {
ck_free(cur_elem);
if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
(st.st_mode & 0111) && st.st_size >= 4) break;
(st.st_mode & 0111) && st.st_size >= 4)
break;
ck_free(target_path);
target_path = 0;
@ -811,7 +824,6 @@ static void find_binary(u8* fname) {
}
/* Fix up argv for QEMU. */
static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
@ -832,8 +844,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
cp = alloc_printf("%s/afl-qemu-trace", tmp);
if (access(cp, X_OK))
FATAL("Unable to find '%s'", tmp);
if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
target_path = new_argv[0] = cp;
return new_argv;
@ -857,7 +868,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
}
} else ck_free(own_copy);
} else
ck_free(own_copy);
if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
@ -920,7 +933,8 @@ int main(int argc, char** argv) {
}
if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
optarg[0] == '-') FATAL("Bad syntax used for -m");
optarg[0] == '-')
FATAL("Bad syntax used for -m");
switch (suffix) {
@ -970,9 +984,7 @@ int main(int argc, char** argv) {
unicorn_mode = 1;
break;
default:
usage(argv[0]);
default: usage(argv[0]);
}

View File

@ -80,7 +80,6 @@ static u8 use_64bit = 0;
#endif /* ^__x86_64__ */
/* Examine and modify parameters to pass to 'as'. Note that the file name
is always the last parameter passed by GCC, so we exploit this property
to keep the code simple. */
@ -134,8 +133,10 @@ static void edit_params(int argc, char** argv) {
for (i = 1; i < argc - 1; i++) {
if (!strcmp(argv[i], "--64")) use_64bit = 1;
else if (!strcmp(argv[i], "--32")) use_64bit = 0;
if (!strcmp(argv[i], "--64"))
use_64bit = 1;
else if (!strcmp(argv[i], "--32"))
use_64bit = 0;
#ifdef __APPLE__
@ -143,7 +144,8 @@ static void edit_params(int argc, char** argv) {
if (!strcmp(argv[i], "-arch") && i + 1 < argc) {
if (!strcmp(argv[i + 1], "x86_64")) use_64bit = 1;
if (!strcmp(argv[i + 1], "x86_64"))
use_64bit = 1;
else if (!strcmp(argv[i + 1], "i386"))
FATAL("Sorry, 32-bit Apple platforms are not supported.");
@ -181,13 +183,17 @@ static void edit_params(int argc, char** argv) {
if (input_file[0] == '-') {
if (!strcmp(input_file + 1, "-version")) {
just_version = 1;
modified_file = input_file;
goto wrap_things_up;
}
if (input_file[1]) FATAL("Incorrect use (not called through afl-gcc?)");
else input_file = NULL;
if (input_file[1])
FATAL("Incorrect use (not called through afl-gcc?)");
else
input_file = NULL;
} else {
@ -197,13 +203,13 @@ static void edit_params(int argc, char** argv) {
NSS. */
if (strncmp(input_file, tmp_dir, strlen(tmp_dir)) &&
strncmp(input_file, "/var/tmp/", 9) &&
strncmp(input_file, "/tmp/", 5)) pass_thru = 1;
strncmp(input_file, "/var/tmp/", 9) && strncmp(input_file, "/tmp/", 5))
pass_thru = 1;
}
modified_file = alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(),
(u32)time(NULL));
modified_file =
alloc_printf("%s/.afl-%u-%u.s", tmp_dir, getpid(), (u32)time(NULL));
wrap_things_up:
@ -212,7 +218,6 @@ wrap_things_up:
}
/* Process input file, generate modified_file. Insert instrumentation in all
the appropriate places. */
@ -225,8 +230,8 @@ static void add_instrumentation(void) {
s32 outfd;
u32 ins_lines = 0;
u8 instr_ok = 0, skip_csect = 0, skip_next_label = 0,
skip_intel = 0, skip_app = 0, instrument_next = 0;
u8 instr_ok = 0, skip_csect = 0, skip_next_label = 0, skip_intel = 0,
skip_app = 0, instrument_next = 0;
#ifdef __APPLE__
@ -239,7 +244,9 @@ static void add_instrumentation(void) {
inf = fopen(input_file, "r");
if (!inf) PFATAL("Unable to read '%s'", input_file);
} else inf = stdin;
} else
inf = stdin;
outfd = open(modified_file, O_WRONLY | O_EXCL | O_CREAT, 0600);
@ -284,22 +291,26 @@ static void add_instrumentation(void) {
around them, so we use that as a signal. */
if (!clang_mode && instr_ok && !strncmp(line + 2, "p2align ", 8) &&
isdigit(line[10]) && line[11] == '\n') skip_next_label = 1;
isdigit(line[10]) && line[11] == '\n')
skip_next_label = 1;
if (!strncmp(line + 2, "text\n", 5) ||
!strncmp(line + 2, "section\t.text", 13) ||
!strncmp(line + 2, "section\t__TEXT,__text", 21) ||
!strncmp(line + 2, "section __TEXT,__text", 21)) {
instr_ok = 1;
continue;
}
if (!strncmp(line + 2, "section\t", 8) ||
!strncmp(line + 2, "section ", 8) ||
!strncmp(line + 2, "bss\n", 4) ||
!strncmp(line + 2, "section ", 8) || !strncmp(line + 2, "bss\n", 4) ||
!strncmp(line + 2, "data\n", 5)) {
instr_ok = 0;
continue;
}
}
@ -354,8 +365,9 @@ static void add_instrumentation(void) {
*/
if (skip_intel || skip_app || skip_csect || !instr_ok ||
line[0] == '#' || line[0] == ' ') continue;
if (skip_intel || skip_app || skip_csect || !instr_ok || line[0] == '#' ||
line[0] == ' ')
continue;
/* Conditional branch instruction (jnz, etc). We append the instrumentation
right after the branch (to instrument the not-taken path) and at the
@ -404,15 +416,16 @@ static void add_instrumentation(void) {
/* Apple: L<num> / LBB<num> */
if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3)))
&& R(100) < inst_ratio) {
if ((isdigit(line[1]) || (clang_mode && !strncmp(line, "LBB", 3))) &&
R(100) < inst_ratio) {
#else
/* Apple: .L<num> / .LBB<num> */
if ((isdigit(line[2]) || (clang_mode && !strncmp(line + 1, "LBB", 3)))
&& R(100) < inst_ratio) {
if ((isdigit(line[2]) ||
(clang_mode && !strncmp(line + 1, "LBB", 3))) &&
R(100) < inst_ratio) {
#endif /* __APPLE__ */
@ -427,7 +440,10 @@ static void add_instrumentation(void) {
.Lfunc_begin0-style exception handling calculations (a problem on
MacOS X). */
if (!skip_next_label) instrument_next = 1; else skip_next_label = 0;
if (!skip_next_label)
instrument_next = 1;
else
skip_next_label = 0;
}
@ -443,27 +459,27 @@ static void add_instrumentation(void) {
}
if (ins_lines)
fputs(use_64bit ? main_payload_64 : main_payload_32, outf);
if (ins_lines) fputs(use_64bit ? main_payload_64 : main_payload_32, outf);
if (input_file) fclose(inf);
fclose(outf);
if (!be_quiet) {
if (!ins_lines) WARNF("No instrumentation targets found%s.",
if (!ins_lines)
WARNF("No instrumentation targets found%s.",
pass_thru ? " (pass-thru mode)" : "");
else OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).",
ins_lines, use_64bit ? "64" : "32",
getenv("AFL_HARDEN") ? "hardened" :
(sanitizer ? "ASAN/MSAN" : "non-hardened"),
else
OKF("Instrumented %u locations (%s-bit, %s mode, ratio %u%%).", ins_lines,
use_64bit ? "64" : "32",
getenv("AFL_HARDEN") ? "hardened"
: (sanitizer ? "ASAN/MSAN" : "non-hardened"),
inst_ratio);
}
}
/* Main entry point */
int main(int argc, char** argv) {
@ -482,17 +498,24 @@ int main(int argc, char** argv) {
SAYF(cCYA "afl-as" VERSION cRST " by <lcamtuf@google.com>\n");
} else be_quiet = 1;
} else
be_quiet = 1;
if (argc < 2) {
SAYF("\n"
"This is a helper application for afl-fuzz. It is a wrapper around GNU 'as',\n"
"executed by the toolchain whenever using afl-gcc or afl-clang. You probably\n"
SAYF(
"\n"
"This is a helper application for afl-fuzz. It is a wrapper around GNU "
"'as',\n"
"executed by the toolchain whenever using afl-gcc or afl-clang. You "
"probably\n"
"don't want to run this program directly.\n\n"
"Rarely, when dealing with extremely complex projects, it may be advisable to\n"
"set AFL_INST_RATIO to a value less than 100 in order to reduce the odds of\n"
"Rarely, when dealing with extremely complex projects, it may be "
"advisable to\n"
"set AFL_INST_RATIO to a value less than 100 in order to reduce the "
"odds of\n"
"instrumenting every discovered branch.\n\n");
exit(1);
@ -524,9 +547,10 @@ int main(int argc, char** argv) {
that... */
if (getenv("AFL_USE_ASAN") || getenv("AFL_USE_MSAN")) {
sanitizer = 1;
if (!getenv("AFL_INST_RATIO"))
inst_ratio /= 3;
if (!getenv("AFL_INST_RATIO")) inst_ratio /= 3;
}
if (!just_version) add_instrumentation();

View File

@ -16,7 +16,6 @@
# include <unistd.h>
#endif
void detect_file_args(char** argv, u8* prog_in) {
u32 i = 0;
@ -27,11 +26,16 @@ void detect_file_args(char** argv, u8* prog_in) {
char* buf;
long size = pathconf(".", _PC_PATH_MAX);
if ((buf = (char*)malloc((size_t)size)) != NULL) {
cwd = getcwd(buf, (size_t)size); /* portable version */
} else {
PFATAL("getcwd() failed");
cwd = 0; /* for dumb compilers */
}
#endif
if (!cwd) PFATAL("getcwd() failed");
@ -48,8 +52,10 @@ void detect_file_args(char** argv, u8* prog_in) {
/* Be sure that we're always using fully-qualified paths. */
if (prog_in[0] == '/') aa_subst = prog_in;
else aa_subst = alloc_printf("%s/%s", cwd, prog_in);
if (prog_in[0] == '/')
aa_subst = prog_in;
else
aa_subst = alloc_printf("%s/%s", cwd, prog_in);
/* Construct a replacement argv value. */

View File

@ -18,16 +18,17 @@
extern u8 uses_asan;
extern u8 *trace_bits;
extern s32 forksrv_pid, child_pid, fsrv_ctl_fd, fsrv_st_fd;
extern s32 out_fd, out_dir_fd, dev_urandom_fd, dev_null_fd; /* initialize these with -1 */
extern s32 out_fd, out_dir_fd, dev_urandom_fd,
dev_null_fd; /* initialize these with -1 */
extern u32 exec_tmout;
extern u64 mem_limit;
extern u8 * out_file, *target_path, *doc_path;
extern FILE *plot_file;
/* we need this internally but can be defined and read extern in the main source */
/* we need this internally but can be defined and read extern in the main source
*/
u8 child_timed_out;
/* Describe integer as memory size. */
u8 *forkserver_DMS(u64 val) {
@ -35,14 +36,18 @@ u8* forkserver_DMS(u64 val) {
static u8 tmp[12][16];
static u8 cur;
#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) \
do { \
\
if (val < (_divisor) * (_limit_mult)) { \
\
sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
return tmp[cur]; \
\
} \
\
} while (0)
cur = (cur + 1) % 12;
/* 0-9999 */
@ -86,20 +91,23 @@ u8* forkserver_DMS(u64 val) {
}
/* the timeout handler */
void handle_timeout(int sig) {
if (child_pid > 0) {
child_timed_out = 1;
kill(child_pid, SIGKILL);
} else if (child_pid == -1 && forksrv_pid > 0) {
child_timed_out = 1;
kill(forksrv_pid, SIGKILL);
}
}
}
/* Spin up fork server (instrumented mode only). The idea is explained here:
@ -118,14 +126,12 @@ void init_forkserver(char **argv) {
ACTF("Spinning up the fork server...");
if (pipe(st_pipe) || pipe(ctl_pipe))
PFATAL("pipe() failed");
if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
child_timed_out = 0;
forksrv_pid = fork();
if (forksrv_pid < 0)
PFATAL("fork() failed");
if (forksrv_pid < 0) PFATAL("fork() failed");
if (!forksrv_pid) {
@ -137,11 +143,14 @@ void init_forkserver(char **argv) {
soft 128. Let's try to fix that... */
if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) {
r.rlim_cur = FORKSRV_FD + 2;
setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */
}
if (mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
#ifdef RLIMIT_AS
@ -153,6 +162,7 @@ void init_forkserver(char **argv) {
setrlimit(RLIMIT_DATA, &r); /* Ignore errors */
#endif /* ^RLIMIT_AS */
}
/* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered
@ -167,23 +177,27 @@ void init_forkserver(char **argv) {
setsid();
if (!getenv("AFL_DEBUG_CHILD_OUTPUT")) {
dup2(dev_null_fd, 1);
dup2(dev_null_fd, 2);
}
if (out_file) {
dup2(dev_null_fd, 0);
} else {
dup2(out_fd, 0);
close(out_fd);
}
/* Set up control and status pipes, close the unneeded original fds. */
if (dup2(ctl_pipe[0], FORKSRV_FD) < 0)
PFATAL("dup2() failed");
if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0)
PFATAL("dup2() failed");
if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed");
if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed");
close(ctl_pipe[0]);
close(ctl_pipe[1]);
@ -198,8 +212,7 @@ void init_forkserver(char **argv) {
/* This should improve performance a bit, since it stops the linker from
doing extra work post-fork(). */
if (!getenv("LD_BIND_LAZY"))
setenv("LD_BIND_NOW", "1", 0);
if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0);
/* Set sane defaults for ASAN if nothing else specified. */
@ -228,6 +241,7 @@ void init_forkserver(char **argv) {
*(u32 *)trace_bits = EXEC_FAIL_SIG;
exit(0);
}
/* PARENT PROCESS */
@ -243,8 +257,10 @@ void init_forkserver(char **argv) {
/* Wait for the fork server to come up, but don't wait too long. */
if (exec_tmout) {
it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000);
it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000;
}
setitimer(ITIMER_REAL, &it, NULL);
@ -260,21 +276,23 @@ void init_forkserver(char **argv) {
Otherwise, try to figure out what went wrong. */
if (rlen == 4) {
OKF("All right - fork server is up.");
return;
}
if (child_timed_out)
FATAL("Timeout while initializing fork server (adjusting -t may help)");
if (waitpid(forksrv_pid, &status, 0) <= 0)
PFATAL("waitpid() failed");
if (waitpid(forksrv_pid, &status, 0) <= 0) PFATAL("waitpid() failed");
if (WIFSIGNALED(status)) {
if (mem_limit && mem_limit < 500 && uses_asan) {
SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, "
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, "
"before receiving any input\n"
" from the fuzzer! Since it seems to be built with ASAN and you "
"have a\n"
@ -285,7 +303,8 @@ void init_forkserver(char **argv) {
} else if (!mem_limit) {
SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, "
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, "
"before receiving any input\n"
" from the fuzzer! There are several probable explanations:\n\n"
@ -303,7 +322,8 @@ void init_forkserver(char **argv) {
} else {
SAYF("\n" cLRD "[-] " cRST "Whoops, the target binary crashed suddenly, "
SAYF("\n" cLRD "[-] " cRST
"Whoops, the target binary crashed suddenly, "
"before receiving any input\n"
" from the fuzzer! There are several probable explanations:\n\n"
@ -315,7 +335,8 @@ void init_forkserver(char **argv) {
"way confirm\n"
" this diagnosis would be:\n\n"
MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n"
MSG_ULIMIT_USAGE
" /path/to/fuzzed_app )\n\n"
" Tip: you can use http://jwilk.net/software/recidivm to "
"quickly\n"
@ -334,9 +355,11 @@ void init_forkserver(char **argv) {
" fail, poke <afl-users@googlegroups.com> for troubleshooting "
"tips.\n",
forkserver_DMS(mem_limit << 20), mem_limit - 1);
}
FATAL("Fork server crashed with signal %d", WTERMSIG(status));
}
if (*(u32 *)trace_bits == EXEC_FAIL_SIG)
@ -344,7 +367,8 @@ void init_forkserver(char **argv) {
if (mem_limit && mem_limit < 500 && uses_asan) {
SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated "
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated "
"before we could complete a\n"
" handshake with the injected code. Since it seems to be built "
"with ASAN and\n"
@ -355,7 +379,8 @@ void init_forkserver(char **argv) {
} else if (!mem_limit) {
SAYF("\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated "
SAYF("\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated "
"before we could complete a\n"
" handshake with the injected code. Perhaps there is a horrible "
"bug in the\n"
@ -365,7 +390,8 @@ void init_forkserver(char **argv) {
} else {
SAYF(
"\n" cLRD "[-] " cRST "Hmm, looks like the target binary terminated "
"\n" cLRD "[-] " cRST
"Hmm, looks like the target binary terminated "
"before we could complete a\n"
" handshake with the injected code. There are %s probable "
"explanations:\n\n"
@ -377,7 +403,8 @@ void init_forkserver(char **argv) {
"option. A\n"
" simple way to confirm the diagnosis may be:\n\n"
MSG_ULIMIT_USAGE " /path/to/fuzzed_app )\n\n"
MSG_ULIMIT_USAGE
" /path/to/fuzzed_app )\n\n"
" Tip: you can use http://jwilk.net/software/recidivm to quickly\n"
" estimate the required amount of virtual memory for the "
@ -394,8 +421,10 @@ void init_forkserver(char **argv) {
" reached before the program terminates.\n\n"
: "",
forkserver_DMS(mem_limit << 20), mem_limit - 1);
}
FATAL("Fork server handshake failed");
}

View File

@ -46,7 +46,6 @@ void write_bitmap(void) {
}
/* Read bitmap from file. This is for the -B option again. */
void read_bitmap(u8* fname) {
@ -61,7 +60,6 @@ void read_bitmap(u8* fname) {
}
/* Check if the current execution path brings anything new to the table.
Update virgin bits to reflect the finds. Returns 1 if the only change is
the hit-count for a particular tuple; 2 if there are new tuples seen.
@ -111,14 +109,18 @@ u8 has_new_bits(u8* virgin_map) {
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
(cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
(cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
else ret = 1;
(cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff))
ret = 2;
else
ret = 1;
#else
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
else ret = 1;
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff))
ret = 2;
else
ret = 1;
#endif /* ^__x86_64__ */
@ -139,7 +141,6 @@ u8 has_new_bits(u8* virgin_map) {
}
/* Count the number of bits set in the provided bitmap. Used for the status
screen several times every second, does not have to be fast. */
@ -157,8 +158,10 @@ u32 count_bits(u8* mem) {
data. */
if (v == 0xffffffff) {
ret += 32;
continue;
}
v -= ((v >> 1) & 0x55555555);
@ -171,7 +174,6 @@ u32 count_bits(u8* mem) {
}
#define FF(_b) (0xff << ((_b) << 3))
/* Count the number of bytes set in the bitmap. Called fairly sporadically,
@ -200,7 +202,6 @@ u32 count_bytes(u8* mem) {
}
/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
status screen, several calls per second or so. */
@ -229,7 +230,6 @@ u32 count_non_255_bytes(u8* mem) {
}
/* Destructively simplify trace by eliminating hit count information
and replacing it with 0x80 or 0x01 depending on whether the tuple
is hit or not. Called on every new crash or timeout, should be
@ -237,8 +237,7 @@ u32 count_non_255_bytes(u8* mem) {
const u8 simplify_lookup[256] = {
[0] = 1,
[1 ... 255] = 128
[0] = 1, [1 ... 255] = 128
};
@ -265,7 +264,9 @@ void simplify_trace(u64* mem) {
mem8[6] = simplify_lookup[mem8[6]];
mem8[7] = simplify_lookup[mem8[7]];
} else *mem = 0x0101010101010101ULL;
} else
*mem = 0x0101010101010101ULL;
++mem;
@ -292,16 +293,18 @@ void simplify_trace(u32* mem) {
mem8[2] = simplify_lookup[mem8[2]];
mem8[3] = simplify_lookup[mem8[3]];
} else *mem = 0x01010101;
} else
*mem = 0x01010101;
++mem;
}
}
#endif /* ^__x86_64__ */
/* Destructively classify execution counts in a trace. This is used as a
preprocessing step for any newly acquired traces. Called on every exec,
must be fast. */
@ -322,7 +325,6 @@ static const u8 count_class_lookup8[256] = {
static u16 count_class_lookup16[65536];
void init_count_class16(void) {
u32 b1, b2;
@ -330,12 +332,10 @@ void init_count_class16(void) {
for (b1 = 0; b1 < 256; b1++)
for (b2 = 0; b2 < 256; b2++)
count_class_lookup16[(b1 << 8) + b2] =
(count_class_lookup8[b1] << 8) |
count_class_lookup8[b2];
(count_class_lookup8[b1] << 8) | count_class_lookup8[b2];
}
#ifdef __x86_64__
void classify_counts(u64* mem) {
@ -390,7 +390,6 @@ void classify_counts(u32* mem) {
#endif /* ^__x86_64__ */
/* Compact trace bytes into a smaller bitmap. We effectively just drop the
count information here. This is called only sporadically, for some
new paths. */
@ -408,7 +407,6 @@ void minimize_bits(u8* dst, u8* src) {
}
#ifndef SIMPLE_FILES
/* Construct a file name for a new test case, capturing the operation
@ -428,8 +426,7 @@ u8* describe_op(u8 hnb) {
sprintf(ret + strlen(ret), ",time:%llu", get_cur_time() - start_time);
if (splicing_with >= 0)
sprintf(ret + strlen(ret), "+%06d", splicing_with);
if (splicing_with >= 0) sprintf(ret + strlen(ret), "+%06d", splicing_with);
sprintf(ret + strlen(ret), ",op:%s", stage_short);
@ -439,10 +436,11 @@ u8* describe_op(u8 hnb) {
if (stage_val_type != STAGE_VAL_NONE)
sprintf(ret + strlen(ret), ",val:%s%+d",
(stage_val_type == STAGE_VAL_BE) ? "be:" : "",
stage_cur_val);
(stage_val_type == STAGE_VAL_BE) ? "be:" : "", stage_cur_val);
} else sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val);
} else
sprintf(ret + strlen(ret), ",rep:%d", stage_cur_val);
}
@ -454,7 +452,6 @@ u8* describe_op(u8 hnb) {
#endif /* !SIMPLE_FILES */
/* Write a message accompanying the crash directory :-) */
static void write_crash_readme(void) {
@ -473,21 +470,28 @@ static void write_crash_readme(void) {
f = fdopen(fd, "w");
if (!f) {
close(fd);
return;
}
fprintf(f, "Command line used to find this crash:\n\n"
fprintf(
f,
"Command line used to find this crash:\n\n"
"%s\n\n"
"If you can't reproduce a bug outside of afl-fuzz, be sure to set the same\n"
"If you can't reproduce a bug outside of afl-fuzz, be sure to set the "
"same\n"
"memory limit. The limit used for this fuzzing session was %s.\n\n"
"Need a tool to minimize test cases before investigating the crashes or sending\n"
"Need a tool to minimize test cases before investigating the crashes or "
"sending\n"
"them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
"Found any cool bugs in open-source tools using afl-fuzz? If yes, please drop\n"
"Found any cool bugs in open-source tools using afl-fuzz? If yes, please "
"drop\n"
"an mail at <afl-users@googlegroups.com> once the issues are fixed\n\n"
" https://github.com/vanhauser-thc/AFLplusplus\n\n",
@ -498,7 +502,6 @@ static void write_crash_readme(void) {
}
/* Check if the result of an execve() during routine fuzzing is interesting,
save or queue the input test case for further analysis if so. Returns 1 if
entry is saved, 0 otherwise. */
@ -517,8 +520,8 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
struct queue_entry* q = queue;
while (q) {
if (q->exec_cksum == cksum)
q->n_fuzz = q->n_fuzz + 1;
if (q->exec_cksum == cksum) q->n_fuzz = q->n_fuzz + 1;
q = q->next;
@ -530,8 +533,10 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
future fuzzing, etc. */
if (!(hnb = has_new_bits(virgin_bits))) {
if (crash_mode) ++total_crashes;
return 0;
}
#ifndef SIMPLE_FILES
@ -548,8 +553,10 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
add_to_queue(fn, len, 0);
if (hnb == 2) {
queue_top->has_new_cov = 1;
++queued_with_cov;
}
queue_top->exec_cksum = cksum;
@ -559,8 +566,7 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0);
if (res == FAULT_ERROR)
FATAL("Unable to execute target application");
if (res == FAULT_ERROR) FATAL("Unable to execute target application");
fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600);
if (fd < 0) PFATAL("Unable to create '%s'", fn);
@ -620,13 +626,12 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
#ifndef SIMPLE_FILES
fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir,
unique_hangs, describe_op(0));
fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, unique_hangs,
describe_op(0));
#else
fn = alloc_printf("%s/hangs/id_%06llu", out_dir,
unique_hangs);
fn = alloc_printf("%s/hangs/id_%06llu", out_dir, unique_hangs);
#endif /* ^!SIMPLE_FILES */

View File

@ -22,23 +22,23 @@
#include "afl-fuzz.h"
/* Helper function for load_extras. */
static int compare_extras_len(const void* p1, const void* p2) {
struct extra_data *e1 = (struct extra_data*)p1,
*e2 = (struct extra_data*)p2;
struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2;
return e1->len - e2->len;
}
static int compare_extras_use_d(const void* p1, const void* p2) {
struct extra_data *e1 = (struct extra_data*)p1,
*e2 = (struct extra_data*)p2;
struct extra_data *e1 = (struct extra_data*)p1, *e2 = (struct extra_data*)p2;
return e2->hit_cnt - e1->hit_cnt;
}
}
/* Read extras from a file, sort by size. */
@ -62,10 +62,12 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
/* Trim on left and right. */
while (isspace(*lptr)) ++lptr;
while (isspace(*lptr))
++lptr;
rptr = lptr + strlen(lptr) - 1;
while (rptr >= lptr && isspace(*rptr)) --rptr;
while (rptr >= lptr && isspace(*rptr))
--rptr;
++rptr;
*rptr = 0;
@ -84,7 +86,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
/* Skip alphanumerics and dashes (label). */
while (isalnum(*lptr) || *lptr == '_') ++lptr;
while (isalnum(*lptr) || *lptr == '_')
++lptr;
/* If @number follows, parse that. */
@ -92,13 +95,15 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
++lptr;
if (atoi(lptr) > dict_level) continue;
while (isdigit(*lptr)) ++lptr;
while (isdigit(*lptr))
++lptr;
}
/* Skip whitespace and = signs. */
while (isspace(*lptr) || *lptr == '=') ++lptr;
while (isspace(*lptr) || *lptr == '=')
++lptr;
/* Consume opening '"'. */
@ -112,8 +117,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
/* Okay, let's allocate memory and copy data between "...", handling
\xNN escaping, \\, and \". */
extras = ck_realloc_block(extras, (extras_cnt + 1) *
sizeof(struct extra_data));
extras =
ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr);
@ -132,16 +137,17 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
++lptr;
if (*lptr == '\\' || *lptr == '"') {
*(wptr++) = *(lptr++);
klen++;
break;
}
if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2]))
FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line);
*(wptr++) =
((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
*(wptr++) = ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) |
(strchr(hexdigits, tolower(lptr[2])) - hexdigits);
lptr += 3;
@ -149,10 +155,7 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
break;
default:
*(wptr++) = *(lptr++);
++klen;
default: *(wptr++) = *(lptr++); ++klen;
}
@ -161,8 +164,8 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
extras[extras_cnt].len = klen;
if (extras[extras_cnt].len > MAX_DICT_FILE)
FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line,
DMS(klen), DMS(MAX_DICT_FILE));
FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, DMS(klen),
DMS(MAX_DICT_FILE));
if (*min_len > klen) *min_len = klen;
if (*max_len < klen) *max_len = klen;
@ -175,7 +178,6 @@ void load_extras_file(u8* fname, u32* min_len, u32* max_len, u32 dict_level) {
}
/* Read extras from the extras directory and sort them by size. */
void load_extras(u8* dir) {
@ -201,8 +203,10 @@ void load_extras(u8* dir) {
if (!d) {
if (errno == ENOTDIR) {
load_extras_file(dir, &min_len, &max_len, dict_level);
goto check_and_sort;
}
PFATAL("Unable to open '%s'", dir);
@ -217,8 +221,7 @@ void load_extras(u8* dir) {
u8* fn = alloc_printf("%s/%s", dir, de->d_name);
s32 fd;
if (lstat(fn, &st) || access(fn, R_OK))
PFATAL("Unable to access '%s'", fn);
if (lstat(fn, &st) || access(fn, R_OK)) PFATAL("Unable to access '%s'", fn);
/* This also takes care of . and .. */
if (!S_ISREG(st.st_mode) || !st.st_size) {
@ -229,14 +232,14 @@ void load_extras(u8* dir) {
}
if (st.st_size > MAX_DICT_FILE)
FATAL("Extra '%s' is too big (%s, limit is %s)", fn,
DMS(st.st_size), DMS(MAX_DICT_FILE));
FATAL("Extra '%s' is too big (%s, limit is %s)", fn, DMS(st.st_size),
DMS(MAX_DICT_FILE));
if (min_len > st.st_size) min_len = st.st_size;
if (max_len < st.st_size) max_len = st.st_size;
extras = ck_realloc_block(extras, (extras_cnt + 1) *
sizeof(struct extra_data));
extras =
ck_realloc_block(extras, (extras_cnt + 1) * sizeof(struct extra_data));
extras[extras_cnt].data = ck_alloc(st.st_size);
extras[extras_cnt].len = st.st_size;
@ -262,8 +265,8 @@ check_and_sort:
qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len);
OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt,
DMS(min_len), DMS(max_len));
OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, DMS(min_len),
DMS(max_len));
if (max_len > 32)
WARNF("Some tokens are relatively large (%s) - consider trimming.",
@ -275,18 +278,16 @@ check_and_sort:
}
/* Helper function for maybe_add_auto() */
static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) {
while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
while (len--)
if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1;
return 0;
}
/* Maybe add automatic extra. */
void maybe_add_auto(u8* mem, u32 len) {
@ -312,7 +313,8 @@ void maybe_add_auto(u8* mem, u32 len) {
while (i--)
if (*((u16*)mem) == interesting_16[i] ||
*((u16*)mem) == SWAP16(interesting_16[i])) return;
*((u16*)mem) == SWAP16(interesting_16[i]))
return;
}
@ -322,7 +324,8 @@ void maybe_add_auto(u8* mem, u32 len) {
while (i--)
if (*((u32*)mem) == interesting_32[i] ||
*((u32*)mem) == SWAP32(interesting_32[i])) return;
*((u32*)mem) == SWAP32(interesting_32[i]))
return;
}
@ -358,8 +361,8 @@ void maybe_add_auto(u8* mem, u32 len) {
if (a_extras_cnt < MAX_AUTO_EXTRAS) {
a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) *
sizeof(struct extra_data));
a_extras = ck_realloc_block(a_extras,
(a_extras_cnt + 1) * sizeof(struct extra_data));
a_extras[a_extras_cnt].data = ck_memdup(mem, len);
a_extras[a_extras_cnt].len = len;
@ -367,8 +370,7 @@ void maybe_add_auto(u8* mem, u32 len) {
} else {
i = MAX_AUTO_EXTRAS / 2 +
UR((MAX_AUTO_EXTRAS + 1) / 2);
i = MAX_AUTO_EXTRAS / 2 + UR((MAX_AUTO_EXTRAS + 1) / 2);
ck_free(a_extras[i].data);
@ -387,12 +389,11 @@ sort_a_extras:
/* Then, sort the top USE_AUTO_EXTRAS entries by size. */
qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt),
sizeof(struct extra_data), compare_extras_len);
qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), sizeof(struct extra_data),
compare_extras_len);
}
/* Save automatically generated extras. */
void save_auto(void) {
@ -420,7 +421,6 @@ void save_auto(void) {
}
/* Load automatically generated extras. */
void load_auto(void) {
@ -458,12 +458,13 @@ void load_auto(void) {
}
if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i);
else OKF("No auto-generated dictionary tokens to reuse.");
if (i)
OKF("Loaded %u auto-discovered dictionary tokens.", i);
else
OKF("No auto-generated dictionary tokens to reuse.");
}
/* Destroy extras. */
void destroy_extras(void) {

View File

@ -25,27 +25,13 @@
/* MOpt:
Lots of globals, but mostly for the status UI and other things where it
really makes no sense to haul them around as function parameters. */
u64 limit_time_puppet,
orig_hit_cnt_puppet,
last_limit_time_start,
tmp_pilot_time,
total_pacemaker_time,
total_puppet_find,
temp_puppet_find,
most_time_key,
most_time,
most_execs_key,
most_execs,
old_hit_count;
u64 limit_time_puppet, orig_hit_cnt_puppet, last_limit_time_start,
tmp_pilot_time, total_pacemaker_time, total_puppet_find, temp_puppet_find,
most_time_key, most_time, most_execs_key, most_execs, old_hit_count;
s32 SPLICE_CYCLES_puppet,
limit_time_sig,
key_puppet,
key_module;
s32 SPLICE_CYCLES_puppet, limit_time_sig, key_puppet, key_module;
double w_init = 0.9,
w_end = 0.3,
w_now;
double w_init = 0.9, w_end = 0.3, w_now;
s32 g_now;
s32 g_max = 5000;
@ -53,15 +39,13 @@ s32 g_max = 5000;
u64 tmp_core_time;
s32 swarm_now;
double x_now[swarm_num][operator_num],
L_best[swarm_num][operator_num],
eff_best[swarm_num][operator_num],
G_best[operator_num],
v_now[swarm_num][operator_num],
probability_now[swarm_num][operator_num],
double x_now[swarm_num][operator_num], L_best[swarm_num][operator_num],
eff_best[swarm_num][operator_num], G_best[operator_num],
v_now[swarm_num][operator_num], probability_now[swarm_num][operator_num],
swarm_fitness[swarm_num];
u64 stage_finds_puppet[swarm_num][operator_num], /* Patterns found per fuzz stage */
u64 stage_finds_puppet[swarm_num]
[operator_num], /* Patterns found per fuzz stage */
stage_finds_puppet_v2[swarm_num][operator_num],
stage_cycles_puppet_v2[swarm_num][operator_num],
stage_cycles_puppet_v3[swarm_num][operator_num],
@ -96,20 +80,13 @@ u32 hang_tmout = EXEC_TIMEOUT; /* Timeout used for hang det (ms) */
u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */
u8 cal_cycles = CAL_CYCLES, /* Calibration cycles defaults */
cal_cycles_long = CAL_CYCLES_LONG,
debug, /* Debug mode */
cal_cycles_long = CAL_CYCLES_LONG, debug, /* Debug mode */
python_only; /* Python-only mode */
u32 stats_update_freq = 1; /* Stats update frequency (execs) */
char *power_names[POWER_SCHEDULES_NUM] = {
"explore",
"fast",
"coe",
"lin",
"quad",
"exploit"
};
char *power_names[POWER_SCHEDULES_NUM] = {"explore", "fast", "coe",
"lin", "quad", "exploit"};
u8 schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/
u8 havoc_max_mult = HAVOC_MAX_MULT;
@ -244,15 +221,12 @@ s32 cpu_aff = -1; /* Selected CPU core */
FILE *plot_file; /* Gnuplot output file */
struct queue_entry *queue, /* Fuzzing queue (linked list) */
*queue_cur, /* Current offset within the queue */
*queue_top, /* Top of the list */
*q_prev100; /* Previous 100 marker */
struct queue_entry*
top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */
struct queue_entry *top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */
struct extra_data *extras; /* Extra tokens to fuzz with */
u32 extras_cnt; /* Total number of tokens read */
@ -263,10 +237,10 @@ u32 a_extras_cnt; /* Total number of tokens available */
u8 *(*post_handler)(u8 *buf, u32 *len);
/* hooks for the custom mutator function */
size_t (*custom_mutator)(u8 *data, size_t size, u8* mutated_out, size_t max_size, unsigned int seed);
size_t (*custom_mutator)(u8 *data, size_t size, u8 *mutated_out,
size_t max_size, unsigned int seed);
size_t (*pre_save_handler)(u8 *data, size_t size, u8 **new_data);
/* Interesting values, as per config.h */
s8 interesting_8[] = {INTERESTING_8};

File diff suppressed because it is too large Load Diff

View File

@ -33,11 +33,16 @@ u8* DI(u64 val) {
cur = (cur + 1) % 12;
#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \
#define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) \
do { \
\
if (val < (_divisor) * (_limit_mult)) { \
\
sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \
return tmp[cur]; \
\
} \
\
} while (0)
/* 0-9999 */
@ -79,7 +84,6 @@ u8* DI(u64 val) {
}
/* Describe float. Similar to the above, except with a single
static buffer. */
@ -88,20 +92,23 @@ u8* DF(double val) {
static u8 tmp[16];
if (val < 99.995) {
sprintf(tmp, "%0.02f", val);
return tmp;
}
if (val < 999.95) {
sprintf(tmp, "%0.01f", val);
return tmp;
}
return DI((u64)val);
}
/* Describe integer as memory size. */
u8* DMS(u64 val) {
@ -152,7 +159,6 @@ u8* DMS(u64 val) {
}
/* Describe time delta. Returns one static buffer, 34 chars of less. */
u8* DTD(u64 cur_ms, u64 event_ms) {
@ -174,3 +180,4 @@ u8* DTD(u64 cur_ms, u64 event_ms) {
return tmp;
}

File diff suppressed because it is too large Load Diff

View File

@ -26,45 +26,62 @@
#ifdef USE_PYTHON
int init_py() {
Py_Initialize();
u8* module_name = getenv("AFL_PYTHON_MODULE");
if (module_name) {
PyObject* py_name = PyString_FromString(module_name);
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
if (py_module != NULL) {
u8 py_notrim = 0;
py_functions[PY_FUNC_INIT] = PyObject_GetAttrString(py_module, "init");
py_functions[PY_FUNC_FUZZ] = PyObject_GetAttrString(py_module, "fuzz");
py_functions[PY_FUNC_INIT_TRIM] = PyObject_GetAttrString(py_module, "init_trim");
py_functions[PY_FUNC_POST_TRIM] = PyObject_GetAttrString(py_module, "post_trim");
py_functions[PY_FUNC_INIT_TRIM] =
PyObject_GetAttrString(py_module, "init_trim");
py_functions[PY_FUNC_POST_TRIM] =
PyObject_GetAttrString(py_module, "post_trim");
py_functions[PY_FUNC_TRIM] = PyObject_GetAttrString(py_module, "trim");
for (u8 py_idx = 0; py_idx < PY_FUNC_COUNT; ++py_idx) {
if (!py_functions[py_idx] || !PyCallable_Check(py_functions[py_idx])) {
if (py_idx >= PY_FUNC_INIT_TRIM && py_idx <= PY_FUNC_TRIM) {
// Implementing the trim API is optional for now
if (PyErr_Occurred())
PyErr_Print();
if (PyErr_Occurred()) PyErr_Print();
py_notrim = 1;
} else {
if (PyErr_Occurred())
PyErr_Print();
fprintf(stderr, "Cannot find/call function with index %d in external Python module.\n", py_idx);
if (PyErr_Occurred()) PyErr_Print();
fprintf(stderr,
"Cannot find/call function with index %d in external "
"Python module.\n",
py_idx);
return 1;
}
}
}
if (py_notrim) {
py_functions[PY_FUNC_INIT_TRIM] = NULL;
py_functions[PY_FUNC_POST_TRIM] = NULL;
py_functions[PY_FUNC_TRIM] = NULL;
WARNF("Python module does not implement trim API, standard trimming will be used.");
WARNF(
"Python module does not implement trim API, standard trimming will "
"be used.");
}
PyObject *py_args, *py_value;
@ -73,9 +90,11 @@ int init_py() {
py_args = PyTuple_New(1);
py_value = PyInt_FromLong(UR(0xFFFFFFFF));
if (!py_value) {
Py_DECREF(py_args);
fprintf(stderr, "Cannot convert argument\n");
return 1;
}
PyTuple_SetItem(py_args, 0, py_value);
@ -85,51 +104,68 @@ int init_py() {
Py_DECREF(py_args);
if (py_value == NULL) {
PyErr_Print();
fprintf(stderr, "Call failed\n");
return 1;
}
} else {
PyErr_Print();
fprintf(stderr, "Failed to load \"%s\"\n", module_name);
return 1;
}
}
return 0;
}
void finalize_py() {
if (py_module != NULL) {
u32 i;
for (i = 0; i < PY_FUNC_COUNT; ++i)
Py_XDECREF(py_functions[i]);
Py_DECREF(py_module);
}
Py_Finalize();
}
void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char** ret, size_t* retlen) {
void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen,
char** ret, size_t* retlen) {
if (py_module != NULL) {
PyObject *py_args, *py_value;
py_args = PyTuple_New(2);
py_value = PyByteArray_FromStringAndSize(buf, buflen);
if (!py_value) {
Py_DECREF(py_args);
fprintf(stderr, "Cannot convert argument\n");
return;
}
PyTuple_SetItem(py_args, 0, py_value);
py_value = PyByteArray_FromStringAndSize(add_buf, add_buflen);
if (!py_value) {
Py_DECREF(py_args);
fprintf(stderr, "Cannot convert argument\n");
return;
}
PyTuple_SetItem(py_args, 1, py_value);
@ -139,26 +175,35 @@ void fuzz_py(char* buf, size_t buflen, char* add_buf, size_t add_buflen, char**
Py_DECREF(py_args);
if (py_value != NULL) {
*retlen = PyByteArray_Size(py_value);
*ret = malloc(*retlen);
memcpy(*ret, PyByteArray_AsString(py_value), *retlen);
Py_DECREF(py_value);
} else {
PyErr_Print();
fprintf(stderr, "Call failed\n");
return;
}
}
}
u32 init_trim_py(char* buf, size_t buflen) {
PyObject *py_args, *py_value;
py_args = PyTuple_New(1);
py_value = PyByteArray_FromStringAndSize(buf, buflen);
if (!py_value) {
Py_DECREF(py_args);
FATAL("Failed to convert arguments");
}
PyTuple_SetItem(py_args, 0, py_value);
@ -167,24 +212,32 @@ u32 init_trim_py(char* buf, size_t buflen) {
Py_DECREF(py_args);
if (py_value != NULL) {
u32 retcnt = PyInt_AsLong(py_value);
Py_DECREF(py_value);
return retcnt;
} else {
PyErr_Print();
FATAL("Call failed");
}
}
u32 post_trim_py(char success) {
PyObject *py_args, *py_value;
py_args = PyTuple_New(1);
py_value = PyBool_FromLong(success);
if (!py_value) {
Py_DECREF(py_args);
FATAL("Failed to convert arguments");
}
PyTuple_SetItem(py_args, 0, py_value);
@ -193,16 +246,22 @@ u32 post_trim_py(char success) {
Py_DECREF(py_args);
if (py_value != NULL) {
u32 retcnt = PyInt_AsLong(py_value);
Py_DECREF(py_value);
return retcnt;
} else {
PyErr_Print();
FATAL("Call failed");
}
}
void trim_py(char** ret, size_t* retlen) {
PyObject *py_args, *py_value;
py_args = PyTuple_New(0);
@ -210,14 +269,19 @@ void trim_py(char** ret, size_t* retlen) {
Py_DECREF(py_args);
if (py_value != NULL) {
*retlen = PyByteArray_Size(py_value);
*ret = malloc(*retlen);
memcpy(*ret, PyByteArray_AsString(py_value), *retlen);
Py_DECREF(py_value);
} else {
PyErr_Print();
FATAL("Call failed");
}
}
u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
@ -237,9 +301,11 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
stage_max = init_trim_py(in_buf, q->len);
if (not_on_tty && debug)
SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max, q->len);
SAYF("[Python Trimming] START: Max %d iterations, %u bytes", stage_max,
q->len);
while (stage_cur < stage_max) {
sprintf(tmp, "ptrim %s", DI(trim_exec));
u32 cksum;
@ -250,7 +316,9 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
trim_py(&retbuf, &retlen);
if (retlen > orig_len)
FATAL("Trimmed data returned by Python module is larger than original data");
FATAL(
"Trimmed data returned by Python module is larger than original "
"data");
write_to_testcase(retbuf, retlen);
@ -280,17 +348,23 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
stage_cur = post_trim_py(1);
if (not_on_tty && debug)
SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)", stage_cur, stage_max, q->len);
SAYF("[Python Trimming] SUCCESS: %d/%d iterations (now at %u bytes)",
stage_cur, stage_max, q->len);
} else {
/* Tell the Python module that the trimming was unsuccessful */
stage_cur = post_trim_py(0);
if (not_on_tty && debug)
SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur, stage_max);
SAYF("[Python Trimming] FAILURE: %d/%d iterations", stage_cur,
stage_max);
}
/* Since this can be slow, update the screen every now and then. */
if (!(trim_exec++ % stats_update_freq)) show_stats();
}
if (not_on_tty && debug)
@ -317,8 +391,6 @@ u8 trim_case_python(char** argv, struct queue_entry* q, u8* in_buf) {
}
abort_trimming:
bytes_trim_out += q->len;
@ -327,3 +399,4 @@ abort_trimming:
}
#endif /* USE_PYTHON */

View File

@ -43,7 +43,6 @@ void mark_as_det_done(struct queue_entry* q) {
}
/* Mark as variable. Create symlinks if possible to make it easier to examine
the files. */
@ -69,7 +68,6 @@ void mark_as_variable(struct queue_entry* q) {
}
/* Mark / unmark as redundant (edge-only). This is not used for restoring state,
but may be useful for post-processing datasets. */
@ -102,7 +100,6 @@ void mark_as_redundant(struct queue_entry* q, u8 state) {
}
/* Append new test case to the queue. */
void add_to_queue(u8* fname, u32 len, u8 passed_det) {
@ -122,7 +119,9 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) {
queue_top->next = q;
queue_top = q;
} else q_prev100 = queue = queue_top = q;
} else
q_prev100 = queue = queue_top = q;
++queued_paths;
++pending_not_fuzzed;
@ -140,7 +139,6 @@ void add_to_queue(u8* fname, u32 len, u8 passed_det) {
}
/* Destroy the entire queue. */
void destroy_queue(void) {
@ -159,7 +157,6 @@ void destroy_queue(void) {
}
/* When we bump into a new path, we call this to see if the path appears
more "favorable" than any of the existing ones. The purpose of the
"favorables" is to have a minimal set of paths that trigger all the bits
@ -170,7 +167,6 @@ void destroy_queue(void) {
for every byte in the bitmap. We win that slot if there is no previous
contender, or if the contender has a more favorable speed x size factor. */
void update_bitmap_score(struct queue_entry* q) {
u32 i;
@ -191,10 +187,13 @@ void update_bitmap_score(struct queue_entry* q) {
u64 top_rated_fav_factor = top_rated[i]->exec_us * top_rated[i]->len;
if (fuzz_p2 > top_rated_fuzz_p2) {
continue;
} else if (fuzz_p2 == top_rated_fuzz_p2) {
if (fav_factor > top_rated_fav_factor)
continue;
if (fav_factor > top_rated_fav_factor) continue;
}
if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue;
@ -203,8 +202,10 @@ void update_bitmap_score(struct queue_entry* q) {
previous winner, discard its trace_bits[] if necessary. */
if (!--top_rated[i]->tc_ref) {
ck_free(top_rated[i]->trace_mini);
top_rated[i]->trace_mini = 0;
}
}
@ -215,8 +216,10 @@ void update_bitmap_score(struct queue_entry* q) {
++q->tc_ref;
if (!q->trace_mini) {
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
minimize_bits(q->trace_mini, trace_bits);
}
score_changed = 1;
@ -225,7 +228,6 @@ void update_bitmap_score(struct queue_entry* q) {
}
/* The second part of the mechanism discussed above is a routine that
goes over top_rated[] entries, and then sequentially grabs winners for
previously-unseen bytes (temp_v) and marks them as favored, at least
@ -250,8 +252,10 @@ void cull_queue(void) {
q = queue;
while (q) {
q->favored = 0;
q = q->next;
}
/* Let's see if anything in the bitmap isn't captured in temp_v.
@ -271,19 +275,21 @@ void cull_queue(void) {
top_rated[i]->favored = 1;
++queued_favored;
if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed) ++pending_favored;
if (top_rated[i]->fuzz_level == 0 || !top_rated[i]->was_fuzzed)
++pending_favored;
}
q = queue;
while (q) {
mark_as_redundant(q, !q->favored);
q = q->next;
}
}
}
/* Calculate case desirability score to adjust the length of havoc fuzzing.
A helper function for fuzz_one(). Maybe some of these constants should
@ -305,34 +311,51 @@ u32 calculate_score(struct queue_entry* q) {
// Longer execution time means longer work on the input, the deeper in
// coverage, the better the fuzzing, right? -mh
if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10;
else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25;
else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50;
else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75;
else if (q->exec_us * 4 < avg_exec_us) perf_score = 300;
else if (q->exec_us * 3 < avg_exec_us) perf_score = 200;
else if (q->exec_us * 2 < avg_exec_us) perf_score = 150;
if (q->exec_us * 0.1 > avg_exec_us)
perf_score = 10;
else if (q->exec_us * 0.25 > avg_exec_us)
perf_score = 25;
else if (q->exec_us * 0.5 > avg_exec_us)
perf_score = 50;
else if (q->exec_us * 0.75 > avg_exec_us)
perf_score = 75;
else if (q->exec_us * 4 < avg_exec_us)
perf_score = 300;
else if (q->exec_us * 3 < avg_exec_us)
perf_score = 200;
else if (q->exec_us * 2 < avg_exec_us)
perf_score = 150;
/* Adjust score based on bitmap size. The working theory is that better
coverage translates to better targets. Multiplier from 0.25x to 3x. */
if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3;
else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2;
else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5;
else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25;
else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5;
else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75;
if (q->bitmap_size * 0.3 > avg_bitmap_size)
perf_score *= 3;
else if (q->bitmap_size * 0.5 > avg_bitmap_size)
perf_score *= 2;
else if (q->bitmap_size * 0.75 > avg_bitmap_size)
perf_score *= 1.5;
else if (q->bitmap_size * 3 < avg_bitmap_size)
perf_score *= 0.25;
else if (q->bitmap_size * 2 < avg_bitmap_size)
perf_score *= 0.5;
else if (q->bitmap_size * 1.5 < avg_bitmap_size)
perf_score *= 0.75;
/* Adjust score based on handicap. Handicap is proportional to how late
in the game we learned about this path. Latecomers are allowed to run
for a bit longer until they catch up with the rest. */
if (q->handicap >= 4) {
perf_score *= 4;
q->handicap -= 4;
} else if (q->handicap) {
perf_score *= 2;
--q->handicap;
}
/* Final adjustment based on input depth, under the assumption that fuzzing
@ -357,12 +380,9 @@ u32 calculate_score(struct queue_entry* q) {
switch (schedule) {
case EXPLORE:
break;
case EXPLORE: break;
case EXPLOIT:
factor = MAX_FACTOR;
break;
case EXPLOIT: factor = MAX_FACTOR; break;
case COE:
fuzz_total = 0;
@ -370,48 +390,59 @@ u32 calculate_score(struct queue_entry* q) {
struct queue_entry* queue_it = queue;
while (queue_it) {
fuzz_total += queue_it->n_fuzz;
n_paths++;
queue_it = queue_it->next;
}
fuzz_mu = fuzz_total / n_paths;
if (fuzz <= fuzz_mu) {
if (q->fuzz_level < 16)
factor = ((u32)(1 << q->fuzz_level));
else
factor = MAX_FACTOR;
} else {
factor = 0;
}
break;
case FAST:
if (q->fuzz_level < 16) {
factor = ((u32)(1 << q->fuzz_level)) / (fuzz == 0 ? 1 : fuzz);
} else
factor = MAX_FACTOR / (fuzz == 0 ? 1 : next_p2(fuzz));
break;
case LIN:
factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
break;
case LIN: factor = q->fuzz_level / (fuzz == 0 ? 1 : fuzz); break;
case QUAD:
factor = q->fuzz_level * q->fuzz_level / (fuzz == 0 ? 1 : fuzz);
break;
default:
PFATAL ("Unknown Power Schedule");
default: PFATAL("Unknown Power Schedule");
}
if (factor > MAX_FACTOR)
factor = MAX_FACTOR;
if (factor > MAX_FACTOR) factor = MAX_FACTOR;
perf_score *= factor / POWER_BETA;
// MOpt mode
if (limit_time_sig != 0 && max_depth - q->depth < 3) perf_score *= 2;
else if (perf_score < 1) perf_score = 1; // Add a lower bound to AFLFast's energy assignment strategies
if (limit_time_sig != 0 && max_depth - q->depth < 3)
perf_score *= 2;
else if (perf_score < 1)
perf_score =
1; // Add a lower bound to AFLFast's energy assignment strategies
/* Make sure that we don't go over limit. */
@ -420,3 +451,4 @@ u32 calculate_score(struct queue_entry* q) {
return perf_score;
}

View File

@ -108,10 +108,12 @@ u8 run_target(char** argv, u32 timeout) {
/* Set sane defaults for ASAN if nothing else specified. */
setenv("ASAN_OPTIONS", "abort_on_error=1:"
setenv("ASAN_OPTIONS",
"abort_on_error=1:"
"detect_leaks=0:"
"symbolize=0:"
"allocator_may_return_null=1", 0);
"allocator_may_return_null=1",
0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"symbolize=0:"
@ -152,7 +154,8 @@ u8 run_target(char** argv, u32 timeout) {
}
/* Configure timeout, as requested by user, then wait for child to terminate. */
/* Configure timeout, as requested by user, then wait for child to terminate.
*/
it.it_value.tv_sec = (timeout / 1000);
it.it_value.tv_usec = (timeout % 1000) * 1000;
@ -181,7 +184,8 @@ u8 run_target(char** argv, u32 timeout) {
if (!WIFSTOPPED(status)) child_pid = 0;
getitimer(ITIMER_REAL, &it);
exec_ms = (u64) timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
exec_ms =
(u64)timeout - (it.it_value.tv_sec * 1000 + it.it_value.tv_usec / 1000);
if (slowest_exec_ms < exec_ms) slowest_exec_ms = exec_ms;
it.it_value.tv_sec = 0;
@ -223,8 +227,10 @@ u8 run_target(char** argv, u32 timeout) {
must use a special exit code. */
if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) {
kill_signal = 0;
return FAULT_CRASH;
}
if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG)
@ -234,7 +240,6 @@ u8 run_target(char** argv, u32 timeout) {
}
/* Write modified data to file for testing. If out_file is set, the old file
is unlinked and a new one is created. Otherwise, out_fd is rewound and
truncated. */
@ -251,14 +256,20 @@ void write_to_testcase(void* mem, u32 len) {
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
} else lseek(fd, 0, SEEK_SET);
} else
lseek(fd, 0, SEEK_SET);
if (pre_save_handler) {
u8* new_data;
size_t new_size = pre_save_handler(mem, len, &new_data);
ck_write(fd, new_data, new_size, out_file);
} else {
ck_write(fd, mem, len, out_file);
}
if (!out_file) {
@ -266,11 +277,12 @@ void write_to_testcase(void* mem, u32 len) {
if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
} else close(fd);
} else
close(fd);
}
/* The same, but with an adjustable gap. Used for trimming. */
void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
@ -286,7 +298,9 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
} else lseek(fd, 0, SEEK_SET);
} else
lseek(fd, 0, SEEK_SET);
if (skip_at) ck_write(fd, mem, skip_at, out_file);
@ -298,17 +312,18 @@ void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) {
if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
} else close(fd);
} else
close(fd);
}
/* Calibrate a new test case. This is done when processing the input directory
to warn about flaky or otherwise problematic test cases early on; and when
new paths are discovered to detect variable behavior and so on. */
u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
u32 handicap, u8 from_queue) {
u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, u32 handicap,
u8 from_queue) {
static u8 first_trace[MAP_SIZE];
@ -326,8 +341,8 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
to intermittent latency. */
if (!from_queue || resuming_fuzz)
use_tmout = MAX(exec_tmout + CAL_TMOUT_ADD,
exec_tmout * CAL_TMOUT_PERC / 100);
use_tmout =
MAX(exec_tmout + CAL_TMOUT_ADD, exec_tmout * CAL_TMOUT_PERC / 100);
++q->cal_failed;
@ -337,8 +352,7 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
/* Make sure the forkserver is up before we do anything, and let's not
count its spin-up time toward binary calibration. */
if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)
init_forkserver(argv);
if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) init_forkserver(argv);
if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
@ -360,8 +374,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
if (stop_soon || fault != crash_mode) goto abort_calibration;
if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) {
fault = FAULT_NOINST;
goto abort_calibration;
}
cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST);
@ -426,8 +442,10 @@ u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
abort_calibration:
if (new_bits == 2 && !q->has_new_cov) {
q->has_new_cov = 1;
++queued_with_cov;
}
/* Mark variable paths. */
@ -437,8 +455,10 @@ abort_calibration:
var_byte_count = count_bytes(var_bytes);
if (!q->var_behavior) {
mark_as_variable(q);
++queued_variable;
}
}
@ -453,7 +473,6 @@ abort_calibration:
}
/* Grab interesting test cases from other fuzzers. */
void sync_fuzzers(char** argv) {
@ -468,7 +487,8 @@ void sync_fuzzers(char** argv) {
stage_max = stage_cur = 0;
cur_depth = 0;
/* Look at the entries created for every other fuzzer in the sync directory. */
/* Look at the entries created for every other fuzzer in the sync directory.
*/
while ((sd_ent = readdir(sd))) {
@ -490,8 +510,10 @@ void sync_fuzzers(char** argv) {
qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name);
if (!(qd = opendir(qd_path))) {
ck_free(qd_path);
continue;
}
/* Retrieve the ID of the last seen test case. */
@ -502,8 +524,7 @@ void sync_fuzzers(char** argv) {
if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path);
if (read(id_fd, &min_accept, sizeof(u32)) > 0)
lseek(id_fd, 0, SEEK_SET);
if (read(id_fd, &min_accept, sizeof(u32)) > 0) lseek(id_fd, 0, SEEK_SET);
next_min_accept = min_accept;
@ -514,8 +535,8 @@ void sync_fuzzers(char** argv) {
stage_cur = 0;
stage_max = 0;
/* For every file queued by this fuzzer, parse ID and see if we have looked at
it before; exec a test case if not. */
/* For every file queued by this fuzzer, parse ID and see if we have looked
at it before; exec a test case if not. */
while ((qd_ent = readdir(qd))) {
@ -525,12 +546,12 @@ void sync_fuzzers(char** argv) {
if (qd_ent->d_name[0] == '.' ||
sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 ||
syncing_case < min_accept) continue;
syncing_case < min_accept)
continue;
/* OK, sounds like a new one. Let's give it a try. */
if (syncing_case >= next_min_accept)
next_min_accept = syncing_case + 1;
if (syncing_case >= next_min_accept) next_min_accept = syncing_case + 1;
path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
@ -539,8 +560,10 @@ void sync_fuzzers(char** argv) {
fd = open(path, O_RDONLY);
if (fd < 0) {
ck_free(path);
continue;
}
if (fstat(fd, &st)) PFATAL("fstat() failed");
@ -591,7 +614,6 @@ void sync_fuzzers(char** argv) {
}
/* Trim all new test cases to save cycles when doing deterministic checks. The
trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
file size, to keep the stage short and sweet. */
@ -599,8 +621,7 @@ void sync_fuzzers(char** argv) {
u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
#ifdef USE_PYTHON
if (py_functions[PY_FUNC_TRIM])
return trim_case_python(argv, q, in_buf);
if (py_functions[PY_FUNC_TRIM]) return trim_case_python(argv, q, in_buf);
#endif
static u8 tmp[64];
@ -679,7 +700,9 @@ u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) {
}
} else remove_pos += remove_len;
} else
remove_pos += remove_len;
/* Since this can be slow, update the screen every now and then. */
@ -720,7 +743,6 @@ abort_trimming:
}
/* Write a modified test case, run program, process results. Handle
error conditions, returning 1 if it's time to bail out. This is
a helper function for fuzz_one(). */
@ -745,11 +767,15 @@ u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) {
if (fault == FAULT_TMOUT) {
if (subseq_tmouts++ > TMOUT_LIMIT) {
++cur_skipped_paths;
return 1;
}
} else subseq_tmouts = 0;
} else
subseq_tmouts = 0;
/* Users can hit us with SIGUSR1 to request the current input
to be abandoned. */

View File

@ -47,16 +47,21 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) {
where exec/sec stats and such are not readily available. */
if (!bitmap_cvg && !stability && !eps) {
bitmap_cvg = last_bcvg;
stability = last_stab;
eps = last_eps;
} else {
last_bcvg = bitmap_cvg;
last_stab = stability;
last_eps = eps;
}
fprintf(f, "start_time : %llu\n"
fprintf(f,
"start_time : %llu\n"
"last_update : %llu\n"
"fuzzer_pid : %d\n"
"cycles_done : %llu\n"
@ -83,22 +88,26 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) {
"slowest_exec_ms : %llu\n"
"peak_rss_mb : %lu\n"
"afl_banner : %s\n"
"afl_version : " VERSION "\n"
"afl_version : " VERSION
"\n"
"target_mode : %s%s%s%s%s%s%s%s\n"
"command_line : %s\n",
start_time / 1000, get_cur_time() / 1000, getpid(),
queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps,
queued_paths, queued_favored, queued_discovered, queued_imported,
max_depth, current_entry, pending_favored, pending_not_fuzzed,
queued_variable, stability, bitmap_cvg, unique_crashes,
unique_hangs, last_path_time / 1000, last_crash_time / 1000,
last_hang_time / 1000, total_execs - last_crash_execs,
exec_tmout, slowest_exec_ms, (unsigned long int)usage.ru_maxrss, use_banner,
unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "", dumb_mode ? " dumb " : "",
no_forkserver ? "no_forksrv " : "", crash_mode ? "crash " : "",
persistent_mode ? "persistent " : "", deferred_mode ? "deferred " : "",
(unicorn_mode || qemu_mode || dumb_mode || no_forkserver || crash_mode ||
persistent_mode || deferred_mode) ? "" : "default",
queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, queued_paths,
queued_favored, queued_discovered, queued_imported, max_depth,
current_entry, pending_favored, pending_not_fuzzed, queued_variable,
stability, bitmap_cvg, unique_crashes, unique_hangs,
last_path_time / 1000, last_crash_time / 1000, last_hang_time / 1000,
total_execs - last_crash_execs, exec_tmout, slowest_exec_ms,
(unsigned long int)usage.ru_maxrss, use_banner,
unicorn_mode ? "unicorn" : "", qemu_mode ? "qemu " : "",
dumb_mode ? " dumb " : "", no_forkserver ? "no_forksrv " : "",
crash_mode ? "crash " : "", persistent_mode ? "persistent " : "",
deferred_mode ? "deferred " : "",
(unicorn_mode || qemu_mode || dumb_mode || no_forkserver ||
crash_mode || persistent_mode || deferred_mode)
? ""
: "default",
orig_cmdline);
/* ignore errors */
@ -106,7 +115,6 @@ void write_stats_file(double bitmap_cvg, double stability, double eps) {
}
/* Update the plot file if there is a reason to. */
void maybe_update_plot_file(double bitmap_cvg, double eps) {
@ -117,7 +125,8 @@ void maybe_update_plot_file(double bitmap_cvg, double eps) {
if (prev_qp == queued_paths && prev_pf == pending_favored &&
prev_pnf == pending_not_fuzzed && prev_ce == current_entry &&
prev_qc == queue_cycle && prev_uc == unique_crashes &&
prev_uh == unique_hangs && prev_md == max_depth) return;
prev_uh == unique_hangs && prev_md == max_depth)
return;
prev_qp = queued_paths;
prev_pf = pending_favored;
@ -144,7 +153,6 @@ void maybe_update_plot_file(double bitmap_cvg, double eps) {
}
/* Check terminal dimensions after resize. */
static void check_term_size(void) {
@ -160,7 +168,6 @@ static void check_term_size(void) {
}
/* A spiffy retro stats screen! This is called every stats_update_freq
execve() calls, plus in several other circumstances. */
@ -194,14 +201,13 @@ void show_stats(void) {
} else {
double cur_avg = ((double)(total_execs - last_execs)) * 1000 /
(cur_ms - last_ms);
double cur_avg =
((double)(total_execs - last_execs)) * 1000 / (cur_ms - last_ms);
/* If there is a dramatic (5x+) jump in speed, reset the indicator
more quickly. */
if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec)
avg_exec = cur_avg;
if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) avg_exec = cur_avg;
avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) +
cur_avg * (1.0 / AVG_SMOOTHING);
@ -249,7 +255,8 @@ void show_stats(void) {
/* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */
if (!dumb_mode && cycles_wo_finds > 100 && !pending_not_fuzzed &&
getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2;
getenv("AFL_EXIT_WHEN_DONE"))
stop_soon = 2;
if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2;
@ -276,7 +283,8 @@ void show_stats(void) {
if (term_too_small) {
SAYF(cBRI "Your terminal is too small to display the UI.\n"
SAYF(cBRI
"Your terminal is too small to display the UI.\n"
"Please resize terminal window to at least 79x24.\n" cRST);
return;
@ -285,18 +293,20 @@ void show_stats(void) {
/* Let's start by drawing a centered banner. */
banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) + strlen(power_name) + 3 + 5;
banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner) +
strlen(power_name) + 3 + 5;
banner_pad = (79 - banner_len) / 2;
memset(tmp, ' ', banner_pad);
#ifdef HAVE_AFFINITY
sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
" (%s) " cPIN "[%s]" cBLU " {%d}", crash_mode ? cPIN "peruvian were-rabbit" :
cYEL "american fuzzy lop", use_banner, power_name, cpu_aff);
sprintf(tmp + banner_pad,
"%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]" cBLU " {%d}",
crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
use_banner, power_name, cpu_aff);
#else
sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN
" (%s) " cPIN "[%s]", crash_mode ? cPIN "peruvian were-rabbit" :
cYEL "american fuzzy lop", use_banner, power_name);
sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN " (%s) " cPIN "[%s]",
crash_mode ? cPIN "peruvian were-rabbit" : cYEL "american fuzzy lop",
use_banner, power_name);
#endif /* HAVE_AFFINITY */
SAYF("\n%s\n", tmp);
@ -315,8 +325,9 @@ void show_stats(void) {
/* Lord, forgive me this. */
SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH bHB
bH bSTOP cCYA " overall results " bSTG bH2 bH2 bRT "\n");
SAYF(SET_G1 bSTG bLT bH bSTOP cCYA
" process timing " bSTG bH30 bH5 bH bHB bH bSTOP cCYA
" overall results " bSTG bH2 bH2 bRT "\n");
if (dumb_mode) {
@ -327,17 +338,22 @@ void show_stats(void) {
u64 min_wo_finds = (cur_ms - last_path_time) / 1000 / 60;
/* First queue cycle: don't stop now! */
if (queue_cycle == 1 || min_wo_finds < 15) strcpy(tmp, cMGN); else
if (queue_cycle == 1 || min_wo_finds < 15)
strcpy(tmp, cMGN);
else
/* Subsequent cycles, but we're still making finds. */
if (cycles_wo_finds < 25 || min_wo_finds < 30) strcpy(tmp, cYEL); else
if (cycles_wo_finds < 25 || min_wo_finds < 30)
strcpy(tmp, cYEL);
else
/* No finds for a long time and no test cases to try. */
if (cycles_wo_finds > 100 && !pending_not_fuzzed && min_wo_finds > 120)
strcpy(tmp, cLGN);
/* Default: cautiously OK to stop? */
else strcpy(tmp, cLBL);
else
strcpy(tmp, cLBL);
}
@ -379,8 +395,7 @@ void show_stats(void) {
SAYF(bV bSTOP " last uniq crash : " cRST "%-33s " bSTG bV bSTOP
" uniq crashes : %s%-6s" bSTG bV "\n",
DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST,
tmp);
DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, tmp);
sprintf(tmp, "%s%s", DI(unique_hangs),
(unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : "");
@ -389,7 +404,8 @@ void show_stats(void) {
" uniq hangs : " cRST "%-6s" bSTG bV "\n",
DTD(cur_ms, last_hang_time), tmp);
SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
SAYF(bVR bH bSTOP cCYA
" cycle progress " bSTG bH10 bH5 bH2 bH2 bHB bH bSTOP cCYA
" map coverage " bSTG bH bHT bH20 bH2 bVL "\n");
/* This gets funny because we want to print several variable-length variables
@ -402,23 +418,24 @@ void show_stats(void) {
SAYF(bV bSTOP " now processing : " cRST "%-16s " bSTG bV bSTOP, tmp);
sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) *
100 / MAP_SIZE, t_byte_ratio);
sprintf(tmp, "%0.02f%% / %0.02f%%",
((double)queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
SAYF(" map density : %s%-21s" bSTG bV "\n", t_byte_ratio > 70 ? cLRD :
((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp);
SAYF(" map density : %s%-21s" bSTG bV "\n",
t_byte_ratio > 70 ? cLRD : ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST),
tmp);
sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths),
((double)cur_skipped_paths * 100) / queued_paths);
SAYF(bV bSTOP " paths timed out : " cRST "%-16s " bSTG bV, tmp);
sprintf(tmp, "%0.02f bits/tuple",
t_bytes ? (((double)t_bits) / t_bytes) : 0);
sprintf(tmp, "%0.02f bits/tuple", t_bytes ? (((double)t_bits) / t_bytes) : 0);
SAYF(bSTOP " count coverage : " cRST "%-21s" bSTG bV "\n", tmp);
SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
SAYF(bVR bH bSTOP cCYA
" stage progress " bSTG bH10 bH5 bH2 bH2 bX bH bSTOP cCYA
" findings in depth " bSTG bH10 bH5 bH2 bH2 bVL "\n");
sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored),
@ -427,7 +444,8 @@ void show_stats(void) {
/* Yeah... it's still going on... halp? */
SAYF(bV bSTOP " now trying : " cRST "%-20s " bSTG bV bSTOP
" favored paths : " cRST "%-22s" bSTG bV "\n", stage_name, tmp);
" favored paths : " cRST "%-22s" bSTG bV "\n",
stage_name, tmp);
if (!stage_max) {
@ -453,14 +471,14 @@ void show_stats(void) {
if (crash_mode) {
SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
" new crashes : %s%-22s" bSTG bV "\n", DI(total_execs),
unique_crashes ? cLRD : cRST, tmp);
" new crashes : %s%-22s" bSTG bV "\n",
DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
} else {
SAYF(bV bSTOP " total execs : " cRST "%-20s " bSTG bV bSTOP
" total crashes : %s%-22s" bSTG bV "\n", DI(total_execs),
unique_crashes ? cLRD : cRST, tmp);
" total crashes : %s%-22s" bSTG bV "\n",
DI(total_execs), unique_crashes ? cLRD : cRST, tmp);
}
@ -468,8 +486,8 @@ void show_stats(void) {
if (avg_exec < 100) {
sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ?
"zzzz..." : "slow!");
sprintf(tmp, "%s/sec (%s)", DF(avg_exec),
avg_exec < 20 ? "zzzz..." : "slow!");
SAYF(bV bSTOP " exec speed : " cLRD "%-20s ", tmp);
@ -487,8 +505,9 @@ void show_stats(void) {
/* Aaaalmost there... hold on! */
SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bHT bH10
bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bVL "\n");
SAYF(bVR bH cCYA bSTOP
" fuzzing strategy yields " bSTG bH10 bHT bH10 bH5 bHB bH bSTOP cCYA
" path geometry " bSTG bH5 bH2 bVL "\n");
if (skip_deterministic) {
@ -496,66 +515,77 @@ void show_stats(void) {
} else {
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]),
DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]),
DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4]));
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP1]),
DI(stage_cycles[STAGE_FLIP1]), DI(stage_finds[STAGE_FLIP2]),
DI(stage_cycles[STAGE_FLIP2]), DI(stage_finds[STAGE_FLIP4]),
DI(stage_cycles[STAGE_FLIP4]));
}
SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP " levels : "
cRST "%-10s" bSTG bV "\n", tmp, DI(max_depth));
SAYF(bV bSTOP " bit flips : " cRST "%-36s " bSTG bV bSTOP
" levels : " cRST "%-10s" bSTG bV "\n",
tmp, DI(max_depth));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]),
DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]),
DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32]));
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_FLIP8]),
DI(stage_cycles[STAGE_FLIP8]), DI(stage_finds[STAGE_FLIP16]),
DI(stage_cycles[STAGE_FLIP16]), DI(stage_finds[STAGE_FLIP32]),
DI(stage_cycles[STAGE_FLIP32]));
SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP " pending : "
cRST "%-10s" bSTG bV "\n", tmp, DI(pending_not_fuzzed));
SAYF(bV bSTOP " byte flips : " cRST "%-36s " bSTG bV bSTOP
" pending : " cRST "%-10s" bSTG bV "\n",
tmp, DI(pending_not_fuzzed));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]),
DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]),
DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32]));
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_ARITH8]),
DI(stage_cycles[STAGE_ARITH8]), DI(stage_finds[STAGE_ARITH16]),
DI(stage_cycles[STAGE_ARITH16]), DI(stage_finds[STAGE_ARITH32]),
DI(stage_cycles[STAGE_ARITH32]));
SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP " pend fav : "
cRST "%-10s" bSTG bV "\n", tmp, DI(pending_favored));
SAYF(bV bSTOP " arithmetics : " cRST "%-36s " bSTG bV bSTOP
" pend fav : " cRST "%-10s" bSTG bV "\n",
tmp, DI(pending_favored));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]),
DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]),
DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32]));
sprintf(
tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_INTEREST8]),
DI(stage_cycles[STAGE_INTEREST8]), DI(stage_finds[STAGE_INTEREST16]),
DI(stage_cycles[STAGE_INTEREST16]), DI(stage_finds[STAGE_INTEREST32]),
DI(stage_cycles[STAGE_INTEREST32]));
SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP " own finds : "
cRST "%-10s" bSTG bV "\n", tmp, DI(queued_discovered));
SAYF(bV bSTOP " known ints : " cRST "%-36s " bSTG bV bSTOP
" own finds : " cRST "%-10s" bSTG bV "\n",
tmp, DI(queued_discovered));
if (!skip_deterministic)
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]),
DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]),
DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]));
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_EXTRAS_UO]),
DI(stage_cycles[STAGE_EXTRAS_UO]), DI(stage_finds[STAGE_EXTRAS_UI]),
DI(stage_cycles[STAGE_EXTRAS_UI]), DI(stage_finds[STAGE_EXTRAS_AO]),
DI(stage_cycles[STAGE_EXTRAS_AO]));
SAYF(bV bSTOP " dictionary : " cRST "%-36s " bSTG bV bSTOP
" imported : " cRST "%-10s" bSTG bV "\n", tmp,
sync_id ? DI(queued_imported) : (u8*)"n/a");
" imported : " cRST "%-10s" bSTG bV "\n",
tmp, sync_id ? DI(queued_imported) : (u8*)"n/a");
sprintf(tmp, "%s/%s, %s/%s, %s/%s",
DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]),
DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]),
DI(stage_finds[STAGE_PYTHON]), DI(stage_cycles[STAGE_PYTHON]));
sprintf(tmp, "%s/%s, %s/%s, %s/%s", DI(stage_finds[STAGE_HAVOC]),
DI(stage_cycles[STAGE_HAVOC]), DI(stage_finds[STAGE_SPLICE]),
DI(stage_cycles[STAGE_SPLICE]), DI(stage_finds[STAGE_PYTHON]),
DI(stage_cycles[STAGE_PYTHON]));
SAYF(bV bSTOP " havoc : " cRST "%-36s " bSTG bV bSTOP, tmp);
if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio);
else strcpy(tmp, "n/a");
if (t_bytes)
sprintf(tmp, "%0.02f%%", stab_ratio);
else
strcpy(tmp, "n/a");
SAYF(" stability : %s%-10s" bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40)
? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20))
? cMGN : cRST), tmp);
SAYF(" stability : %s%-10s" bSTG bV "\n",
(stab_ratio < 85 && var_byte_count > 40)
? cLRD
: ((queued_variable && (!persistent_mode || var_byte_count > 20))
? cMGN
: cRST),
tmp);
if (!bytes_trim_out) {
@ -587,13 +617,21 @@ void show_stats(void) {
strcat(tmp, tmp2);
}
if (custom_mutator) {
sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]), DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n"
bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp);
sprintf(tmp, "%s/%s", DI(stage_finds[STAGE_CUSTOM_MUTATOR]),
DI(stage_cycles[STAGE_CUSTOM_MUTATOR]));
SAYF(bV bSTOP " custom mut. : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB
"\n" bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1,
tmp);
} else {
SAYF(bV bSTOP " trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB "\n"
bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1, tmp);
SAYF(bV bSTOP " trim : " cRST "%-36s " bSTG bVR bH20 bH2 bH bRB
"\n" bLB bH30 bH20 bH2 bRB bSTOP cRST RESET_G1,
tmp);
}
/* Provide some CPU utilization stats. */
@ -618,25 +656,26 @@ void show_stats(void) {
if (cpu_aff >= 0) {
SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST,
MIN(cpu_aff, 999), cpu_color,
MIN(cur_utilization, 999));
SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, MIN(cpu_aff, 999),
cpu_color, MIN(cur_utilization, 999));
} else {
SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST,
cpu_color, MIN(cur_utilization, 999));
SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color,
MIN(cur_utilization, 999));
}
#else
SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST,
cpu_color, MIN(cur_utilization, 999));
SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, cpu_color,
MIN(cur_utilization, 999));
#endif /* ^HAVE_AFFINITY */
} else SAYF("\r");
} else
SAYF("\r");
/* Hallelujah! */
@ -644,7 +683,6 @@ void show_stats(void) {
}
/* Display quick statistics at the end of processing the input directory,
plus a bunch of warnings. Some calibration stuff also ended up here,
along with several hardcoded constants. Maybe clean up eventually. */
@ -681,9 +719,12 @@ void show_init_stats(void) {
/* Let's keep things moving with slow binaries. */
if (avg_us > 50000) havoc_div = 10; /* 0-19 execs/sec */
else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec */
else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */
if (avg_us > 50000)
havoc_div = 10; /* 0-19 execs/sec */
else if (avg_us > 20000)
havoc_div = 5; /* 20-49 execs/sec */
else if (avg_us > 10000)
havoc_div = 2; /* 50-100 execs/sec */
if (!resuming_fuzz) {
@ -698,7 +739,9 @@ void show_init_stats(void) {
WARNF(cLRD "Some test cases look useless. Consider using a smaller set.");
if (queued_paths > 100)
WARNF(cLRD "You probably have far too many input files! Consider trimming down.");
WARNF(cLRD
"You probably have far too many input files! Consider trimming "
"down.");
else if (queued_paths > 20)
WARNF("You have lots of input files; try starting small.");
@ -706,11 +749,13 @@ void show_init_stats(void) {
OKF("Here are some useful stats:\n\n"
cGRA " Test case count : " cRST "%u favored, %u variable, %u total\n"
cGRA " Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n"
cGRA " Exec timing : " cRST "%s to %s us (average: %s us)\n",
cGRA " Test case count : " cRST
"%u favored, %u variable, %u total\n" cGRA " Bitmap range : " cRST
"%u to %u bits (average: %0.02f bits)\n" cGRA
" Exec timing : " cRST "%s to %s us (average: %s us)\n",
queued_favored, queued_variable, queued_paths, min_bits, max_bits,
((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries : 1),
((double)total_bitmap_size) /
(total_bitmap_entries ? total_bitmap_entries : 1),
DI(min_us), DI(max_us), DI(avg_us));
if (!timeout_given) {
@ -722,9 +767,12 @@ void show_init_stats(void) {
random scheduler jitter is less likely to have any impact, and because
our patience is wearing thin =) */
if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000;
else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000;
else exec_tmout = avg_us * 5 / 1000;
if (avg_us > 50000)
exec_tmout = avg_us * 2 / 1000;
else if (avg_us > 10000)
exec_tmout = avg_us * 3 / 1000;
else
exec_tmout = avg_us * 5 / 1000;
exec_tmout = MAX(exec_tmout, max_us / 1000);
exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND;

View File

@ -33,23 +33,28 @@ static void usage(u8* argv0) {
# define PHYTON_SUPPORT ""
#endif
SAYF("\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
SAYF(
"\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n"
"Required parameters:\n"
" -i dir - input directory with test cases\n"
" -o dir - output directory for fuzzer findings\n\n"
"Execution control settings:\n"
" -p schedule - power schedules recompute a seed's performance score.\n"
" <explore (default), fast, coe, lin, quad, or exploit>\n"
" -p schedule - power schedules recompute a seed's performance "
"score.\n"
" <explore (default), fast, coe, lin, quad, or "
"exploit>\n"
" see docs/power_schedules.txt\n"
" -f file - location read by the fuzzed program (stdin)\n"
" -t msec - timeout for each run (auto-scaled, 50-%d ms)\n"
" -m megs - memory limit for child process (%d MB)\n"
" -Q - use binary-only instrumentation (QEMU mode)\n"
" -U - use Unicorn-based instrumentation (Unicorn mode)\n\n"
" -L minutes - use MOpt(imize) mode and set the limit time for entering the\n"
" pacemaker mode (minutes of no new paths, 0 = immediately).\n"
" -L minutes - use MOpt(imize) mode and set the limit time for "
"entering the\n"
" pacemaker mode (minutes of no new paths, 0 = "
"immediately).\n"
" a recommended value is 10-60. see docs/README.MOpt\n\n"
"Fuzzing behavior settings:\n"
@ -59,15 +64,19 @@ static void usage(u8* argv0) {
"Testing settings:\n"
" -s seed - use a fixed seed for the RNG\n"
" -V seconds - fuzz for a maximum total time of seconds then terminate\n"
" -E execs - fuzz for a maximum number of total executions then terminate\n\n"
" -V seconds - fuzz for a maximum total time of seconds then "
"terminate\n"
" -E execs - fuzz for a maximum number of total executions then "
"terminate\n\n"
"Other stuff:\n"
" -T text - text banner to show on the screen\n"
" -M / -S id - distributed mode (see parallel_fuzzing.txt)\n"
" -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap file\n"
" -B bitmap.txt - mutate a specific test case, use the out/fuzz_bitmap "
"file\n"
" -C - crash exploration mode (the peruvian rabbit thing)\n"
" -e ext - File extension for the temporarily generated test case\n\n"
" -e ext - File extension for the temporarily generated test "
"case\n\n"
PHYTON_SUPPORT
@ -83,12 +92,15 @@ static void usage(u8* argv0) {
#ifndef AFL_LIB
static int stricmp(char const* a, char const* b) {
for (;; ++a, ++b) {
int d;
d = tolower(*a) - tolower(*b);
if (d != 0 || !*a)
return d;
if (d != 0 || !*a) return d;
}
}
/* Main entry point */
@ -107,40 +119,62 @@ int main(int argc, char** argv) {
struct timeval tv;
struct timezone tz;
SAYF(cCYA "afl-fuzz" VERSION cRST " based on afl by <lcamtuf@google.com> and a big online community\n");
SAYF(cCYA
"afl-fuzz" VERSION cRST
" based on afl by <lcamtuf@google.com> and a big online community\n");
doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
gettimeofday(&tv, &tz);
init_seed = tv.tv_sec ^ tv.tv_usec ^ getpid();
while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) > 0)
while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QUe:p:s:V:E:L:")) >
0)
switch (opt) {
case 's': {
init_seed = strtoul(optarg, 0L, 10);
fixed_seed = 1;
break;
}
case 'p': /* Power schedule */
if (!stricmp(optarg, "fast")) {
schedule = FAST;
} else if (!stricmp(optarg, "coe")) {
schedule = COE;
} else if (!stricmp(optarg, "exploit")) {
schedule = EXPLOIT;
} else if (!stricmp(optarg, "lin")) {
schedule = LIN;
} else if (!stricmp(optarg, "quad")) {
schedule = QUAD;
} else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") || !stricmp(optarg, "normal") || !stricmp(optarg, "afl")) {
} else if (!stricmp(optarg, "explore") || !stricmp(optarg, "default") ||
!stricmp(optarg, "normal") || !stricmp(optarg, "afl")) {
schedule = EXPLORE;
} else {
FATAL("Unknown -p power schedule");
}
break;
case 'e':
@ -179,7 +213,8 @@ int main(int argc, char** argv) {
if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 ||
!master_id || !master_max || master_id > master_max ||
master_max > 1000000) FATAL("Bogus master ID passed to -M");
master_max > 1000000)
FATAL("Bogus master ID passed to -M");
}
@ -214,11 +249,15 @@ int main(int argc, char** argv) {
if (timeout_given) FATAL("Multiple -t options not supported");
if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 ||
optarg[0] == '-') FATAL("Bad syntax used for -t");
optarg[0] == '-')
FATAL("Bad syntax used for -t");
if (exec_tmout < 5) FATAL("Dangerously low value of -t");
if (suffix == '+') timeout_given = 2; else timeout_given = 1;
if (suffix == '+')
timeout_given = 2;
else
timeout_given = 1;
break;
@ -239,7 +278,8 @@ int main(int argc, char** argv) {
}
if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
optarg[0] == '-') FATAL("Bad syntax used for -m");
optarg[0] == '-')
FATAL("Bad syntax used for -m");
switch (suffix) {
@ -296,7 +336,10 @@ int main(int argc, char** argv) {
case 'n': /* dumb mode */
if (dumb_mode) FATAL("Multiple -n options not supported");
if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1;
if (getenv("AFL_DUMB_FORKSRV"))
dumb_mode = 2;
else
dumb_mode = 1;
break;
@ -325,18 +368,20 @@ int main(int argc, char** argv) {
break;
case 'V': {
most_time_key = 1;
if (sscanf(optarg, "%llu", &most_time) < 1 || optarg[0] == '-')
FATAL("Bad syntax used for -V");
}
break;
} break;
case 'E': {
most_execs_key = 1;
if (sscanf(optarg, "%llu", &most_execs) < 1 || optarg[0] == '-')
FATAL("Bad syntax used for -E");
}
break;
} break;
case 'L': { /* MOpt mode */
@ -344,19 +389,19 @@ int main(int argc, char** argv) {
limit_time_sig = 1;
havoc_max_mult = HAVOC_MAX_MULT_MOPT;
if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 ||
optarg[0] == '-') FATAL("Bad syntax used for -L");
if (sscanf(optarg, "%llu", &limit_time_puppet) < 1 || optarg[0] == '-')
FATAL("Bad syntax used for -L");
u64 limit_time_puppet2 = limit_time_puppet * 60 * 1000;
if (limit_time_puppet2 < limit_time_puppet ) FATAL("limit_time overflow");
if (limit_time_puppet2 < limit_time_puppet)
FATAL("limit_time overflow");
limit_time_puppet = limit_time_puppet2;
SAYF("limit_time_puppet %llu\n", limit_time_puppet);
swarm_now = 0;
if (limit_time_puppet == 0 )
key_puppet = 1;
if (limit_time_puppet == 0) key_puppet = 1;
int i;
int tmp_swarm = 0;
@ -365,10 +410,12 @@ int main(int argc, char** argv) {
w_now = (w_init - w_end) * (g_max - g_now) / (g_max) + w_end;
for (tmp_swarm = 0; tmp_swarm < swarm_num; ++tmp_swarm) {
double total_puppet_temp = 0.0;
swarm_fitness[tmp_swarm] = 0.0;
for (i = 0; i < operator_num; ++i) {
stage_finds_puppet[tmp_swarm][i] = 0;
probability_now[tmp_swarm][i] = 0.0;
x_now[tmp_swarm][i] = ((double)(random() % 7000) * 0.0001 + 0.1);
@ -381,16 +428,24 @@ int main(int argc, char** argv) {
}
for (i = 0; i < operator_num; ++i) {
stage_cycles_puppet_v2[tmp_swarm][i] = stage_cycles_puppet[tmp_swarm][i];
stage_finds_puppet_v2[tmp_swarm][i] = stage_finds_puppet[tmp_swarm][i];
stage_cycles_puppet_v2[tmp_swarm][i] =
stage_cycles_puppet[tmp_swarm][i];
stage_finds_puppet_v2[tmp_swarm][i] =
stage_finds_puppet[tmp_swarm][i];
x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / total_puppet_temp;
}
double x_temp = 0.0;
for (i = 0; i < operator_num; ++i) {
probability_now[tmp_swarm][i] = 0.0;
v_now[tmp_swarm][i] = w_now * v_now[tmp_swarm][i] + RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) + RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
v_now[tmp_swarm][i] =
w_now * v_now[tmp_swarm][i] +
RAND_C * (L_best[tmp_swarm][i] - x_now[tmp_swarm][i]) +
RAND_C * (G_best[i] - x_now[tmp_swarm][i]);
x_now[tmp_swarm][i] += v_now[tmp_swarm][i];
@ -400,40 +455,45 @@ int main(int argc, char** argv) {
x_now[tmp_swarm][i] = v_min;
x_temp += x_now[tmp_swarm][i];
}
for (i = 0; i < operator_num; ++i) {
x_now[tmp_swarm][i] = x_now[tmp_swarm][i] / x_temp;
if (likely(i != 0))
probability_now[tmp_swarm][i] = probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
probability_now[tmp_swarm][i] =
probability_now[tmp_swarm][i - 1] + x_now[tmp_swarm][i];
else
probability_now[tmp_swarm][i] = x_now[tmp_swarm][i];
}
if (probability_now[tmp_swarm][operator_num - 1] < 0.99 || probability_now[tmp_swarm][operator_num - 1] > 1.01)
if (probability_now[tmp_swarm][operator_num - 1] < 0.99 ||
probability_now[tmp_swarm][operator_num - 1] > 1.01)
FATAL("ERROR probability");
}
for (i = 0; i < operator_num; ++i) {
core_operator_finds_puppet[i] = 0;
core_operator_finds_puppet_v2[i] = 0;
core_operator_cycles_puppet[i] = 0;
core_operator_cycles_puppet_v2[i] = 0;
core_operator_cycles_puppet_v3[i] = 0;
}
}
break;
default:
} break;
usage(argv[0]);
default: usage(argv[0]);
}
if (optind == argc || !in_dir || !out_dir) usage(argv[0]);
if (fixed_seed)
OKF("Running with fixed seed: %u", (u32)init_seed);
if (fixed_seed) OKF("Running with fixed seed: %u", (u32)init_seed);
srandom((u32)init_seed);
setup_signal_handlers();
check_asan_opts();
@ -446,11 +506,16 @@ int main(int argc, char** argv) {
FATAL("Input and output directories can't be the same");
if ((tmp_dir = getenv("AFL_TMPDIR")) != NULL) {
char tmpfile[strlen(tmp_dir + 16)];
sprintf(tmpfile, "%s/%s", tmp_dir, ".cur_input");
if (access(tmpfile, F_OK) != -1) // there is still a race condition here, but well ...
FATAL("TMP_DIR already has an existing temporary input file: %s", tmpfile);
if (access(tmpfile, F_OK) !=
-1) // there is still a race condition here, but well ...
FATAL("TMP_DIR already has an existing temporary input file: %s",
tmpfile);
} else
tmp_dir = out_dir;
if (dumb_mode) {
@ -464,10 +529,16 @@ int main(int argc, char** argv) {
if (getenv("AFL_NO_UI") && getenv("AFL_FORCE_UI"))
FATAL("AFL_NO_UI and AFL_FORCE_UI are mutually exclusive");
if (strchr(argv[optind], '/') == NULL) WARNF(cLRD "Target binary called without a prefixed path, make sure you are fuzzing the right binary: " cRST "%s", argv[optind]);
if (strchr(argv[optind], '/') == NULL)
WARNF(cLRD
"Target binary called without a prefixed path, make sure you are "
"fuzzing the right binary: " cRST "%s",
argv[optind]);
OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" Eissfeldt and Andrea Fioraldi");
OKF("afl++ is open source, get it at https://github.com/vanhauser-thc/AFLplusplus");
OKF("afl++ is maintained by Marc \"van Hauser\" Heuse, Heiko \"hexcoder\" "
"Eissfeldt and Andrea Fioraldi");
OKF("afl++ is open source, get it at "
"https://github.com/vanhauser-thc/AFLplusplus");
OKF("Power schedules from github.com/mboehme/aflfast");
OKF("Python Mutator and llvm_mode whitelisting from github.com/choller/afl");
OKF("afl-tmin fork server patch from github.com/nccgroup/TriforceAFL");
@ -475,13 +546,19 @@ int main(int argc, char** argv) {
ACTF("Getting to work...");
switch (schedule) {
case FAST: OKF("Using exponential power schedule (FAST)"); break;
case COE: OKF("Using cut-off exponential power schedule (COE)"); break;
case EXPLOIT: OKF ("Using exploitation-based constant power schedule (EXPLOIT)"); break;
case EXPLOIT:
OKF("Using exploitation-based constant power schedule (EXPLOIT)");
break;
case LIN: OKF("Using linear power schedule (LIN)"); break;
case QUAD: OKF("Using quadratic power schedule (QUAD)"); break;
case EXPLORE: OKF ("Using exploration-based constant power schedule (EXPLORE)"); break;
case EXPLORE:
OKF("Using exploration-based constant power schedule (EXPLORE)");
break;
default: FATAL("Unknown power schedule"); break;
}
if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1;
@ -491,16 +568,20 @@ int main(int argc, char** argv) {
if (getenv("AFL_FAST_CAL")) fast_cal = 1;
if (getenv("AFL_HANG_TMOUT")) {
hang_tmout = atoi(getenv("AFL_HANG_TMOUT"));
if (!hang_tmout) FATAL("Invalid value of AFL_HANG_TMOUT");
}
if (dumb_mode == 2 && no_forkserver)
FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive");
if (getenv("AFL_PRELOAD")) {
setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
}
if (getenv("AFL_LD_PRELOAD"))
@ -511,24 +592,26 @@ int main(int argc, char** argv) {
fix_up_banner(argv[optind]);
check_if_tty();
if (getenv("AFL_FORCE_UI"))
not_on_tty = 0;
if (getenv("AFL_FORCE_UI")) not_on_tty = 0;
if (getenv("AFL_CAL_FAST")) {
/* Use less calibration cycles, for slow applications */
cal_cycles = 3;
cal_cycles_long = 5;
}
if (getenv("AFL_DEBUG"))
debug = 1;
if (getenv("AFL_DEBUG")) debug = 1;
if (getenv("AFL_PYTHON_ONLY")) {
/* This ensures we don't proceed to havoc/splice */
python_only = 1;
/* Ensure we also skip all deterministic steps */
skip_deterministic = 1;
}
get_core_count();
@ -553,8 +636,7 @@ int main(int argc, char** argv) {
setup_dirs_fds();
# ifdef USE_PYTHON
if (init_py())
FATAL("Failed to initialize Python module");
if (init_py()) FATAL("Failed to initialize Python module");
# else
if (getenv("AFL_PYTHON_MODULE"))
FATAL("Your AFL binary was built without Python support");
@ -574,24 +656,33 @@ int main(int argc, char** argv) {
/* If we don't have a file name chosen yet, use a safe default. */
if (!out_file) {
u32 i = optind + 1;
while (argv[i]) {
u8* aa_loc = strstr(argv[i], "@@");
if (aa_loc && !out_file) {
if (file_extension) {
out_file = alloc_printf("%s/.cur_input.%s", out_dir, file_extension);
} else {
out_file = alloc_printf("%s/.cur_input", out_dir);
}
detect_file_args(argv + optind + 1, out_file);
break;
}
++i;
}
}
if (!out_file) setup_stdio_file();
@ -621,9 +712,11 @@ int main(int argc, char** argv) {
/* Woop woop woop */
if (!not_on_tty) {
sleep(4);
start_time += 4000;
if (stop_soon) goto stop_fuzzing;
}
// real start time, we reset, so this works correctly with -V
@ -643,16 +736,20 @@ int main(int argc, char** argv) {
queue_cur = queue;
while (seek_to) {
++current_entry;
--seek_to;
queue_cur = queue_cur->next;
}
show_stats();
if (not_on_tty) {
ACTF("Entering queue cycle %llu.", queue_cycle);
fflush(stdout);
}
/* If we had a full queue cycle with no new finds, try
@ -660,9 +757,14 @@ int main(int argc, char** argv) {
if (queued_paths == prev_queued) {
if (use_splicing) ++cycles_wo_finds; else use_splicing = 1;
if (use_splicing)
++cycles_wo_finds;
else
use_splicing = 1;
} else cycles_wo_finds = 0;
} else
cycles_wo_finds = 0;
prev_queued = queued_paths;
@ -675,8 +777,7 @@ int main(int argc, char** argv) {
if (!stop_soon && sync_id && !skipped_fuzz) {
if (!(sync_interval_cnt++ % SYNC_INTERVAL))
sync_fuzzers(use_argv);
if (!(sync_interval_cnt++ % SYNC_INTERVAL)) sync_fuzzers(use_argv);
}
@ -688,18 +789,28 @@ int main(int argc, char** argv) {
++current_entry;
if (most_time_key == 1) {
u64 cur_ms_lv = get_cur_time();
if (most_time * 1000 < cur_ms_lv - start_time) {
most_time_key = 2;
break;
}
}
if (most_execs_key == 1) {
if (most_execs <= total_execs) {
most_execs_key = 2;
break;
}
}
}
if (queue_cur) show_stats();
@ -712,15 +823,16 @@ int main(int argc, char** argv) {
* AFL_EXIT_WHEN_DONE or AFL_BENCH_UNTIL_CRASH) the child and forkserver
* where not killed?
*/
/* if we stopped programmatically, we kill the forkserver and the current runner.
if we stopped manually, this is done by the signal handler */
/* if we stopped programmatically, we kill the forkserver and the current
runner. if we stopped manually, this is done by the signal handler */
if (stop_soon == 2) {
if (child_pid > 0) kill(child_pid, SIGKILL);
if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL);
/* Now that we've killed the forkserver, we wait for it to be able to get rusage stats. */
if (waitpid(forksrv_pid, NULL, 0) <= 0) {
WARNF("error waitpid\n");
}
/* Now that we've killed the forkserver, we wait for it to be able to get
* rusage stats. */
if (waitpid(forksrv_pid, NULL, 0) <= 0) { WARNF("error waitpid\n"); }
}
write_bitmap();
@ -732,8 +844,7 @@ stop_fuzzing:
SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,
stop_soon == 2 ? "programmatically" : "by user");
if (most_time_key == 2)
SAYF(cYEL "[!] " cRST "Time limit was reached\n");
if (most_time_key == 2) SAYF(cYEL "[!] " cRST "Time limit was reached\n");
if (most_execs_key == 2)
SAYF(cYEL "[!] " cRST "Execution limit was reached\n");
@ -743,7 +854,8 @@ stop_fuzzing:
SAYF("\n" cYEL "[!] " cRST
"Stopped during the first cycle, results may be incomplete.\n"
" (For info on resuming, see %s/README)\n", doc_path);
" (For info on resuming, see %s/README)\n",
doc_path);
}
@ -766,3 +878,4 @@ stop_fuzzing:
}
#endif /* !AFL_LIB */

View File

@ -49,7 +49,6 @@ static u32 cc_par_cnt = 1; /* Param count, including argv0 */
static u8 be_quiet, /* Quiet mode */
clang_mode; /* Invoked as afl-clang*? */
/* Try to find our "fake" GNU assembler in AFL_PATH or at the location derived
from argv[0]. If that fails, abort. */
@ -63,9 +62,11 @@ static void find_as(u8* argv0) {
tmp = alloc_printf("%s/as", afl_path);
if (!access(tmp, X_OK)) {
as_path = afl_path;
ck_free(tmp);
return;
}
ck_free(tmp);
@ -85,9 +86,11 @@ static void find_as(u8* argv0) {
tmp = alloc_printf("%s/afl-as", dir);
if (!access(tmp, X_OK)) {
as_path = dir;
ck_free(tmp);
return;
}
ck_free(tmp);
@ -96,15 +99,16 @@ static void find_as(u8* argv0) {
}
if (!access(AFL_PATH "/as", X_OK)) {
as_path = AFL_PATH;
return;
}
FATAL("Unable to find AFL wrapper binary for 'as'. Please set AFL_PATH");
}
/* Copy argv to cc_params, making the necessary edits. */
static void edit_params(u32 argc, char** argv) {
@ -119,7 +123,10 @@ static void edit_params(u32 argc, char** argv) {
cc_params = ck_alloc((argc + 128) * sizeof(u8*));
name = strrchr(argv[0], '/');
if (!name) name = argv[0]; else name++;
if (!name)
name = argv[0];
else
name++;
if (!strncmp(name, "afl-clang", 9)) {
@ -128,11 +135,15 @@ static void edit_params(u32 argc, char** argv) {
setenv(CLANG_ENV_VAR, "1", 1);
if (!strcmp(name, "afl-clang++")) {
u8* alt_cxx = getenv("AFL_CXX");
cc_params[0] = alt_cxx ? alt_cxx : (u8*)"clang++";
} else {
u8* alt_cc = getenv("AFL_CC");
cc_params[0] = alt_cc ? alt_cc : (u8*)"clang";
}
} else {
@ -145,16 +156,22 @@ static void edit_params(u32 argc, char** argv) {
#ifdef __APPLE__
if (!strcmp(name, "afl-g++")) cc_params[0] = getenv("AFL_CXX");
else if (!strcmp(name, "afl-gcj")) cc_params[0] = getenv("AFL_GCJ");
else cc_params[0] = getenv("AFL_CC");
if (!strcmp(name, "afl-g++"))
cc_params[0] = getenv("AFL_CXX");
else if (!strcmp(name, "afl-gcj"))
cc_params[0] = getenv("AFL_GCJ");
else
cc_params[0] = getenv("AFL_CC");
if (!cc_params[0]) {
SAYF("\n" cLRD "[-] " cRST
"On Apple systems, 'gcc' is usually just a wrapper for clang. Please use the\n"
" 'afl-clang' utility instead of 'afl-gcc'. If you really have GCC installed,\n"
" set AFL_CC or AFL_CXX to specify the correct path to that compiler.\n");
"On Apple systems, 'gcc' is usually just a wrapper for clang. "
"Please use the\n"
" 'afl-clang' utility instead of 'afl-gcc'. If you really have "
"GCC installed,\n"
" set AFL_CC or AFL_CXX to specify the correct path to that "
"compiler.\n");
FATAL("AFL_CC or AFL_CXX required on MacOS X");
@ -163,14 +180,20 @@ static void edit_params(u32 argc, char** argv) {
#else
if (!strcmp(name, "afl-g++")) {
u8* alt_cxx = getenv("AFL_CXX");
cc_params[0] = alt_cxx ? alt_cxx : (u8*)"g++";
} else if (!strcmp(name, "afl-gcj")) {
u8* alt_cc = getenv("AFL_GCJ");
cc_params[0] = alt_cc ? alt_cc : (u8*)"gcj";
} else {
u8* alt_cc = getenv("AFL_CC");
cc_params[0] = alt_cc ? alt_cc : (u8*)"gcc";
}
#endif /* __APPLE__ */
@ -178,13 +201,20 @@ static void edit_params(u32 argc, char** argv) {
}
while (--argc) {
u8* cur = *(++argv);
if (!strncmp(cur, "-B", 2)) {
if (!be_quiet) WARNF("-B is already set, overriding");
if (!cur[2] && argc > 1) { argc--; argv++; }
if (!cur[2] && argc > 1) {
argc--;
argv++;
}
continue;
}
@ -197,8 +227,8 @@ static void edit_params(u32 argc, char** argv) {
if (!strcmp(cur, "-m32")) m32_set = 1;
#endif
if (!strcmp(cur, "-fsanitize=address") ||
!strcmp(cur, "-fsanitize=memory")) asan_set = 1;
if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory"))
asan_set = 1;
if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1;
@ -209,15 +239,13 @@ static void edit_params(u32 argc, char** argv) {
cc_params[cc_par_cnt++] = "-B";
cc_params[cc_par_cnt++] = as_path;
if (clang_mode)
cc_params[cc_par_cnt++] = "-no-integrated-as";
if (clang_mode) cc_params[cc_par_cnt++] = "-no-integrated-as";
if (getenv("AFL_HARDEN")) {
cc_params[cc_par_cnt++] = "-fstack-protector-all";
if (!fortify_set)
cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
if (!fortify_set) cc_params[cc_par_cnt++] = "-D_FORTIFY_SOURCE=2";
}
@ -229,8 +257,7 @@ static void edit_params(u32 argc, char** argv) {
} else if (getenv("AFL_USE_ASAN")) {
if (getenv("AFL_USE_MSAN"))
FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_USE_MSAN")) FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_HARDEN"))
FATAL("ASAN and AFL_HARDEN are mutually exclusive");
@ -240,8 +267,7 @@ static void edit_params(u32 argc, char** argv) {
} else if (getenv("AFL_USE_MSAN")) {
if (getenv("AFL_USE_ASAN"))
FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_USE_ASAN")) FATAL("ASAN and MSAN are mutually exclusive");
if (getenv("AFL_HARDEN"))
FATAL("MSAN and AFL_HARDEN are mutually exclusive");
@ -249,7 +275,6 @@ static void edit_params(u32 argc, char** argv) {
cc_params[cc_par_cnt++] = "-U_FORTIFY_SOURCE";
cc_params[cc_par_cnt++] = "-fsanitize=memory";
}
#ifdef USEMMAP
@ -264,8 +289,7 @@ static void edit_params(u32 argc, char** argv) {
works OK. This has nothing to do with us, but let's avoid triggering
that bug. */
if (!clang_mode || !m32_set)
cc_params[cc_par_cnt++] = "-g";
if (!clang_mode || !m32_set) cc_params[cc_par_cnt++] = "-g";
#else
@ -300,7 +324,6 @@ static void edit_params(u32 argc, char** argv) {
}
/* Main entry point */
int main(int argc, char** argv) {
@ -308,22 +331,32 @@ int main(int argc, char** argv) {
if (isatty(2) && !getenv("AFL_QUIET")) {
SAYF(cCYA "afl-cc" VERSION cRST " by <lcamtuf@google.com>\n");
SAYF(cYEL "[!] " cBRI "NOTE: " cRST "afl-gcc is deprecated, llvm_mode is much faster and has more options\n");
SAYF(cYEL "[!] " cBRI "NOTE: " cRST
"afl-gcc is deprecated, llvm_mode is much faster and has more "
"options\n");
} else be_quiet = 1;
} else
be_quiet = 1;
if (argc < 2) {
SAYF("\n"
"This is a helper application for afl-fuzz. It serves as a drop-in replacement\n"
"for gcc or clang, letting you recompile third-party code with the required\n"
"runtime instrumentation. A common use pattern would be one of the following:\n\n"
SAYF(
"\n"
"This is a helper application for afl-fuzz. It serves as a drop-in "
"replacement\n"
"for gcc or clang, letting you recompile third-party code with the "
"required\n"
"runtime instrumentation. A common use pattern would be one of the "
"following:\n\n"
" CC=%s/afl-gcc ./configure\n"
" CXX=%s/afl-g++ ./configure\n\n"
"You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and AFL_AS.\n"
"Setting AFL_HARDEN enables hardening optimizations in the compiled code.\n\n",
"You can specify custom next-stage toolchain via AFL_CC, AFL_CXX, and "
"AFL_AS.\n"
"Setting AFL_HARDEN enables hardening optimizations in the compiled "
"code.\n\n",
BIN_PATH, BIN_PATH);
exit(1);
@ -341,3 +374,4 @@ int main(int argc, char** argv) {
return 0;
}

View File

@ -51,7 +51,6 @@
# define HAVE_AFFINITY 1
#endif /* __linux__ */
/* Get unix time in microseconds. */
static u64 get_cur_time_us(void) {
@ -65,7 +64,6 @@ static u64 get_cur_time_us(void) {
}
/* Get CPU usage in microseconds. */
static u64 get_cpu_usage_us(void) {
@ -79,7 +77,6 @@ static u64 get_cpu_usage_us(void) {
}
/* Measure preemption rate. */
static u32 measure_preemption(u32 target_ms) {
@ -96,14 +93,17 @@ repeat_loop:
v1 = CTEST_BUSY_CYCLES;
while (v1--) v2++;
while (v1--)
v2++;
sched_yield();
en_t = get_cur_time_us();
if (en_t - st_t < target_ms * 1000) {
loop_repeats++;
goto repeat_loop;
}
/* Let's see what percentage of this time we actually had a chance to
@ -118,15 +118,13 @@ repeat_loop:
}
/* Do the benchmark thing. */
int main(int argc, char** argv) {
#ifdef HAVE_AFFINITY
u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN),
idle_cpus = 0, maybe_cpus = 0, i;
u32 cpu_cnt = sysconf(_SC_NPROCESSORS_ONLN), idle_cpus = 0, maybe_cpus = 0, i;
SAYF(cCYA "afl-gotcpu" VERSION cRST " by <lcamtuf@google.com>\n");
@ -255,3 +253,4 @@ int main(int argc, char** argv) {
#endif /* ^HAVE_AFFINITY */
}

View File

@ -51,49 +51,60 @@ static s32 shm_id; /* ID of the SHM region */
/* Get rid of shared memory (atexit handler). */
void remove_shm(void) {
#ifdef USEMMAP
if (g_shm_base != NULL) {
munmap(g_shm_base, MAP_SIZE);
g_shm_base = NULL;
}
if (g_shm_fd != -1) {
close(g_shm_fd);
g_shm_fd = -1;
}
#else
shmctl(shm_id, IPC_RMID, NULL);
#endif
}
}
/* Configure shared memory. */
void setup_shm(unsigned char dumb_mode) {
#ifdef USEMMAP
/* generate random file name for multi instance */
/* thanks to f*cking glibc we can not use tmpnam securely, it generates a security warning that cannot be suppressed */
/* thanks to f*cking glibc we can not use tmpnam securely, it generates a
* security warning that cannot be suppressed */
/* so we do this worse workaround */
snprintf(g_shm_file_path, L_tmpnam, "/afl_%d_%ld", getpid(), random());
/* create the shared memory segment as if it was a file */
g_shm_fd = shm_open(g_shm_file_path, O_CREAT | O_RDWR | O_EXCL, 0600);
if (g_shm_fd == -1) {
PFATAL("shm_open() failed");
}
if (g_shm_fd == -1) { PFATAL("shm_open() failed"); }
/* configure the size of the shared memory segment */
if (ftruncate(g_shm_fd, MAP_SIZE)) {
PFATAL("setup_shm(): ftruncate() failed");
}
/* map the shared memory segment to the address space of the process */
g_shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0);
g_shm_base =
mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, g_shm_fd, 0);
if (g_shm_base == MAP_FAILED) {
close(g_shm_fd);
g_shm_fd = -1;
PFATAL("mmap() failed");
}
atexit(remove_shm);
@ -136,5 +147,6 @@ void setup_shm(unsigned char dumb_mode) {
if (!trace_bits) PFATAL("shmat() failed");
#endif
}

View File

@ -73,8 +73,7 @@ static u8 quiet_mode, /* Hide non-essential messages? */
binary_mode, /* Write output as a binary map */
keep_cores; /* Allow coredumps? */
static volatile u8
stop_soon, /* Ctrl-C pressed? */
static volatile u8 stop_soon, /* Ctrl-C pressed? */
child_timed_out, /* Child timed out? */
child_crashed; /* Child crashed? */
@ -83,14 +82,8 @@ static volatile u8
static const u8 count_class_human[256] = {
[0] = 0,
[1] = 1,
[2] = 2,
[3] = 3,
[4 ... 7] = 4,
[8 ... 15] = 5,
[16 ... 31] = 6,
[32 ... 127] = 7,
[0] = 0, [1] = 1, [2] = 2, [3] = 3,
[4 ... 7] = 4, [8 ... 15] = 5, [16 ... 31] = 6, [32 ... 127] = 7,
[128 ... 255] = 8
};
@ -116,21 +109,24 @@ static void classify_counts(u8* mem, const u8* map) {
if (edges_only) {
while (i--) {
if (*mem) *mem = 1;
mem++;
}
} else if (!raw_instr_output) {
while (i--) {
*mem = map[*mem];
mem++;
}
}
}
}
/* Write results. */
@ -180,8 +176,7 @@ static u32 write_results(void) {
ret++;
total += trace_bits[i];
if (highest < trace_bits[i])
highest = trace_bits[i];
if (highest < trace_bits[i]) highest = trace_bits[i];
if (cmin_mode) {
@ -190,7 +185,9 @@ static u32 write_results(void) {
fprintf(f, "%u%u\n", trace_bits[i], i);
} else fprintf(f, "%06u:%u\n", i, trace_bits[i]);
} else
fprintf(f, "%06u:%u\n", i, trace_bits[i]);
}
@ -202,7 +199,6 @@ static u32 write_results(void) {
}
/* Handle timeout signal. */
static void handle_timeout(int sig) {
@ -212,7 +208,6 @@ static void handle_timeout(int sig) {
}
/* Execute target application. */
static void run_target(char** argv) {
@ -220,8 +215,7 @@ static void run_target(char** argv) {
static struct itimerval it;
int status = 0;
if (!quiet_mode)
SAYF("-- Program output begins --\n" cRST);
if (!quiet_mode) SAYF("-- Program output begins --\n" cRST);
MEM_BARRIER();
@ -238,8 +232,10 @@ static void run_target(char** argv) {
s32 fd = open("/dev/null", O_RDWR);
if (fd < 0 || dup2(fd, 1) < 0 || dup2(fd, 2) < 0) {
*(u32*)trace_bits = EXEC_FAIL_SIG;
PFATAL("Descriptor initialization failed");
}
close(fd);
@ -262,8 +258,10 @@ static void run_target(char** argv) {
}
if (!keep_cores) r.rlim_max = r.rlim_cur = 0;
else r.rlim_max = r.rlim_cur = RLIM_INFINITY;
if (!keep_cores)
r.rlim_max = r.rlim_cur = 0;
else
r.rlim_max = r.rlim_cur = RLIM_INFINITY;
setrlimit(RLIMIT_CORE, &r); /* Ignore errors */
@ -304,14 +302,12 @@ static void run_target(char** argv) {
if (*(u32*)trace_bits == EXEC_FAIL_SIG)
FATAL("Unable to execute '%s'", argv[0]);
classify_counts(trace_bits, binary_mode ?
count_class_binary : count_class_human);
classify_counts(trace_bits,
binary_mode ? count_class_binary : count_class_human);
if (!quiet_mode)
SAYF(cRST "-- Program output ends --\n");
if (!quiet_mode) SAYF(cRST "-- Program output ends --\n");
if (!child_timed_out && !stop_soon && WIFSIGNALED(status))
child_crashed = 1;
if (!child_timed_out && !stop_soon && WIFSIGNALED(status)) child_crashed = 1;
if (!quiet_mode) {
@ -320,14 +316,13 @@ static void run_target(char** argv) {
else if (stop_soon)
SAYF(cLRD "\n+++ Program aborted by user +++\n" cRST);
else if (child_crashed)
SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status));
SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST,
WTERMSIG(status));
}
}
/* Handle Ctrl-C and the like. */
static void handle_stop_sig(int sig) {
@ -338,15 +333,16 @@ static void handle_stop_sig(int sig) {
}
/* Do basic preparations - persistent fds, filenames, etc. */
static void set_up_environment(void) {
setenv("ASAN_OPTIONS", "abort_on_error=1:"
setenv("ASAN_OPTIONS",
"abort_on_error=1:"
"detect_leaks=0:"
"symbolize=0:"
"allocator_may_return_null=1", 0);
"allocator_may_return_null=1",
0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"symbolize=0:"
@ -355,12 +351,13 @@ static void set_up_environment(void) {
"msan_track_origins=0", 0);
if (getenv("AFL_PRELOAD")) {
setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
}
}
}
/* Setup signal handlers, duh. */
@ -388,7 +385,6 @@ static void setup_signal_handlers(void) {
}
/* Show banner. */
static void show_banner(void) {
@ -403,7 +399,8 @@ static void usage(u8* argv0) {
show_banner();
SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
SAYF(
"\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
"Required parameters:\n\n"
@ -415,7 +412,8 @@ static void usage(u8* argv0) {
" -m megs - memory limit for child process (%d MB)\n"
" -Q - use binary-only instrumentation (QEMU mode)\n"
" -U - use Unicorn-based instrumentation (Unicorn mode)\n"
" (Not necessary, here for consistency with other afl-* tools)\n\n"
" (Not necessary, here for consistency with other afl-* "
"tools)\n\n"
"Other settings:\n\n"
@ -433,7 +431,6 @@ static void usage(u8* argv0) {
}
/* Find binary. */
static void find_binary(u8* fname) {
@ -461,7 +458,9 @@ static void find_binary(u8* fname) {
memcpy(cur_elem, env_path, delim - env_path);
delim++;
} else cur_elem = ck_strdup(env_path);
} else
cur_elem = ck_strdup(env_path);
env_path = delim;
@ -473,7 +472,8 @@ static void find_binary(u8* fname) {
ck_free(cur_elem);
if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
(st.st_mode & 0111) && st.st_size >= 4) break;
(st.st_mode & 0111) && st.st_size >= 4)
break;
ck_free(target_path);
target_path = 0;
@ -486,7 +486,6 @@ static void find_binary(u8* fname) {
}
/* Fix up argv for QEMU. */
static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
@ -507,8 +506,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
cp = alloc_printf("%s/afl-qemu-trace", tmp);
if (access(cp, X_OK))
FATAL("Unable to find '%s'", tmp);
if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
target_path = new_argv[0] = cp;
return new_argv;
@ -532,7 +530,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
}
} else ck_free(own_copy);
} else
ck_free(own_copy);
if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
@ -581,7 +581,8 @@ int main(int argc, char** argv) {
}
if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
optarg[0] == '-') FATAL("Bad syntax used for -m");
optarg[0] == '-')
FATAL("Bad syntax used for -m");
switch (suffix) {
@ -609,6 +610,7 @@ int main(int argc, char** argv) {
timeout_given = 1;
if (strcmp(optarg, "none")) {
exec_tmout = atoi(optarg);
if (exec_tmout < 20 || optarg[0] == '-')
@ -683,9 +685,7 @@ int main(int argc, char** argv) {
raw_instr_output = 1;
break;
default:
usage(argv[0]);
default: usage(argv[0]);
}
@ -699,8 +699,10 @@ int main(int argc, char** argv) {
find_binary(argv[optind]);
if (!quiet_mode) {
show_banner();
ACTF("Executing '%s'...\n", target_path);
}
detect_file_args(argv + optind, at_file);
@ -717,7 +719,8 @@ int main(int argc, char** argv) {
if (!quiet_mode) {
if (!tcnt) FATAL("No instrumentation detected" cRST);
OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST, tcnt, highest, total, out_file);
OKF("Captured %u tuples (highest value %u, total values %u) in '%s'." cRST,
tcnt, highest, total, out_file);
}

View File

@ -88,8 +88,7 @@ static u8 crash_mode, /* Crash-centric mode? */
exact_mode, /* Require path match for crashes? */
use_stdin = 1; /* Use stdin for program input? */
static volatile u8
stop_soon; /* Ctrl-C pressed? */
static volatile u8 stop_soon; /* Ctrl-C pressed? */
/*
* forkserver section
@ -103,8 +102,8 @@ s32 out_fd = -1, out_dir_fd = -1, dev_urandom_fd = -1;
/* we import this as we need this information */
extern u8 child_timed_out;
/* Classify tuple counts. This is a slow & naive version, but good enough here. */
/* Classify tuple counts. This is a slow & naive version, but good enough here.
*/
static const u8 count_class_lookup[256] = {
@ -127,21 +126,24 @@ static void classify_counts(u8* mem) {
if (edges_only) {
while (i--) {
if (*mem) *mem = 1;
mem++;
}
} else {
while (i--) {
*mem = count_class_lookup[*mem];
mem++;
}
}
}
}
/* Apply mask to classified bitmap (if set). */
@ -161,7 +163,6 @@ static void apply_mask(u32* mem, u32* mask) {
}
/* See if any bytes are set in the bitmap. */
static inline u8 anything_set(void) {
@ -169,17 +170,19 @@ static inline u8 anything_set(void) {
u32* ptr = (u32*)trace_bits;
u32 i = (MAP_SIZE >> 2);
while (i--) if (*(ptr++)) return 1;
while (i--)
if (*(ptr++)) return 1;
return 0;
}
/* Get rid of temp files (atexit handler). */
static void at_exit_handler(void) {
if (out_file) unlink(out_file); /* Ignore errors */
}
/* Read initial file. */
@ -191,8 +194,7 @@ static void read_initial_file(void) {
if (fd < 0) PFATAL("Unable to open '%s'", in_file);
if (fstat(fd, &st) || !st.st_size)
FATAL("Zero-sized input file.");
if (fstat(fd, &st) || !st.st_size) FATAL("Zero-sized input file.");
if (st.st_size >= TMIN_MAX_FILE)
FATAL("Input file is too large (%u MB max)", TMIN_MAX_FILE / 1024 / 1024);
@ -208,7 +210,6 @@ static void read_initial_file(void) {
}
/* Write output file. */
static s32 write_to_file(u8* path, u8* mem, u32 len) {
@ -245,7 +246,9 @@ static void write_to_testcase(void* mem, u32 len) {
if (fd < 0) PFATAL("Unable to create '%s'", out_file);
} else lseek(fd, 0, SEEK_SET);
} else
lseek(fd, 0, SEEK_SET);
ck_write(fd, mem, len, out_file);
@ -254,12 +257,12 @@ static void write_to_testcase(void* mem, u32 len) {
if (ftruncate(fd, len)) PFATAL("ftruncate() failed");
lseek(fd, 0, SEEK_SET);
} else close(fd);
} else
close(fd);
}
/* Handle timeout signal. */
/*
static void handle_timeout(int sig) {
@ -277,11 +280,13 @@ static void handle_timeout(int sig) {
}
}
*/
/* start the app and it's forkserver */
/*
static void init_forkserver(char **argv) {
static struct itimerval it;
int st_pipe[2], ctl_pipe[2];
int status = 0;
@ -378,8 +383,10 @@ static void init_forkserver(char **argv) {
// Otherwise, try to figure out what went wrong.
if (rlen == 4) {
ACTF("All right - fork server is up.");
return;
}
if (waitpid(forksrv_pid, &status, 0) <= 0)
@ -398,6 +405,7 @@ static void init_forkserver(char **argv) {
SAYF(cLRD "\n+++ Program killed by signal %u +++\n" cRST, WTERMSIG(status));
}
*/
/* Execute target application. Returns 0 if the changes are a dud, or
@ -440,8 +448,10 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
/* Configure timeout, wait for child, cancel timeout. */
if (exec_tmout) {
it.it_value.tv_sec = (exec_tmout / 1000);
it.it_value.tv_usec = (exec_tmout % 1000) * 1000;
}
setitimer(ITIMER_REAL, &it, NULL);
@ -528,18 +538,17 @@ static u8 run_target(char** argv, u8* mem, u32 len, u8 first_run) {
}
/* Find first power of two greater or equal to val. */
static u32 next_p2(u32 val) {
u32 ret = 1;
while (val > ret) ret <<= 1;
while (val > ret)
ret <<= 1;
return ret;
}
/* Actually minimize! */
static void minimize(char** argv) {
@ -618,8 +627,8 @@ next_del_blksize:
del_pos = 0;
prev_del = 1;
SAYF(cGRA " Block length = %u, remaining size = %u\n" cRST,
del_len, in_len);
SAYF(cGRA " Block length = %u, remaining size = %u\n" cRST, del_len,
in_len);
while (del_pos < in_len) {
@ -634,8 +643,8 @@ next_del_blksize:
very end of the buffer (tail_len > 0), and the current block is the same
as the previous one... skip this step as a no-op. */
if (!prev_del && tail_len && !memcmp(in_data + del_pos - del_len,
in_data + del_pos, del_len)) {
if (!prev_del && tail_len &&
!memcmp(in_data + del_pos - del_len, in_data + del_pos, del_len)) {
del_pos += del_len;
continue;
@ -660,7 +669,9 @@ next_del_blksize:
changed_any = 1;
} else del_pos += del_len;
} else
del_pos += del_len;
}
@ -674,7 +685,8 @@ next_del_blksize:
OKF("Block removal complete, %u bytes deleted.", stage_o_len - in_len);
if (!in_len && changed_any)
WARNF(cLRD "Down to zero bytes - check the command line and mem limit!" cRST);
WARNF(cLRD
"Down to zero bytes - check the command line and mem limit!" cRST);
if (cur_pass > 1 && !changed_any) goto finalize_all;
@ -689,8 +701,10 @@ next_del_blksize:
memset(alpha_map, 0, sizeof(alpha_map));
for (i = 0; i < in_len; i++) {
if (!alpha_map[in_data[i]]) alpha_size++;
alpha_map[in_data[i]]++;
}
ACTF(cBRI "Stage #2: " cRST "Minimizing symbols (%u code point%s)...",
@ -724,8 +738,8 @@ next_del_blksize:
alpha_d_total += alpha_del1;
OKF("Symbol minimization finished, %u symbol%s (%u byte%s) replaced.",
syms_removed, syms_removed == 1 ? "" : "s",
alpha_del1, alpha_del1 == 1 ? "" : "s");
syms_removed, syms_removed == 1 ? "" : "s", alpha_del1,
alpha_del1 == 1 ? "" : "s");
/**************************
* CHARACTER MINIMIZATION *
@ -752,36 +766,34 @@ next_del_blksize:
alpha_del2++;
changed_any = 1;
} else tmp_buf[i] = orig;
} else
tmp_buf[i] = orig;
}
alpha_d_total += alpha_del2;
OKF("Character minimization done, %u byte%s replaced.",
alpha_del2, alpha_del2 == 1 ? "" : "s");
OKF("Character minimization done, %u byte%s replaced.", alpha_del2,
alpha_del2 == 1 ? "" : "s");
if (changed_any) goto next_pass;
finalize_all:
SAYF("\n"
cGRA " File size reduced by : " cRST "%0.02f%% (to %u byte%s)\n"
cGRA " Characters simplified : " cRST "%0.02f%%\n"
cGRA " Number of execs done : " cRST "%u\n"
cGRA " Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n",
SAYF("\n" cGRA " File size reduced by : " cRST
"%0.02f%% (to %u byte%s)\n" cGRA " Characters simplified : " cRST
"%0.02f%%\n" cGRA " Number of execs done : " cRST "%u\n" cGRA
" Fruitless execs : " cRST "path=%u crash=%u hang=%s%u\n\n",
100 - ((double)in_len) * 100 / orig_len, in_len, in_len == 1 ? "" : "s",
((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1),
total_execs, missed_paths, missed_crashes, missed_hangs ? cLRD : "",
missed_hangs);
((double)(alpha_d_total)) * 100 / (in_len ? in_len : 1), total_execs,
missed_paths, missed_crashes, missed_hangs ? cLRD : "", missed_hangs);
if (total_execs > 50 && missed_hangs * 10 > total_execs)
WARNF(cLRD "Frequent timeouts - results may be skewed." cRST);
}
/* Handle Ctrl-C and the like. */
static void handle_stop_sig(int sig) {
@ -792,7 +804,6 @@ static void handle_stop_sig(int sig) {
}
/* Do basic preparations - persistent fds, filenames, etc. */
static void set_up_environment(void) {
@ -823,7 +834,6 @@ static void set_up_environment(void) {
if (out_fd < 0) PFATAL("Unable to create '%s'", out_file);
/* Set sane defaults... */
x = getenv("ASAN_OPTIONS");
@ -843,18 +853,20 @@ static void set_up_environment(void) {
if (x) {
if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR)))
FATAL("Custom MSAN_OPTIONS set without exit_code="
STRINGIFY(MSAN_ERROR) " - please fix!");
FATAL("Custom MSAN_OPTIONS set without exit_code=" STRINGIFY(
MSAN_ERROR) " - please fix!");
if (!strstr(x, "symbolize=0"))
FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!");
}
setenv("ASAN_OPTIONS", "abort_on_error=1:"
setenv("ASAN_OPTIONS",
"abort_on_error=1:"
"detect_leaks=0:"
"symbolize=0:"
"allocator_may_return_null=1", 0);
"allocator_may_return_null=1",
0);
setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":"
"symbolize=0:"
@ -863,12 +875,13 @@ static void set_up_environment(void) {
"msan_track_origins=0", 0);
if (getenv("AFL_PRELOAD")) {
setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
}
}
}
/* Setup signal handlers, duh. */
@ -896,12 +909,12 @@ static void setup_signal_handlers(void) {
}
/* Display usage hints. */
static void usage(u8* argv0) {
SAYF("\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
SAYF(
"\n%s [ options ] -- /path/to/target_app [ ... ]\n\n"
"Required parameters:\n\n"
@ -915,7 +928,8 @@ static void usage(u8* argv0) {
" -m megs - memory limit for child process (%d MB)\n"
" -Q - use binary-only instrumentation (QEMU mode)\n"
" -U - use Unicorn-based instrumentation (Unicorn mode)\n\n"
" (Not necessary, here for consistency with other afl-* tools)\n\n"
" (Not necessary, here for consistency with other afl-* "
"tools)\n\n"
"Minimization settings:\n\n"
@ -930,7 +944,6 @@ static void usage(u8* argv0) {
}
/* Find binary. */
static void find_binary(u8* fname) {
@ -958,7 +971,9 @@ static void find_binary(u8* fname) {
memcpy(cur_elem, env_path, delim - env_path);
delim++;
} else cur_elem = ck_strdup(env_path);
} else
cur_elem = ck_strdup(env_path);
env_path = delim;
@ -970,7 +985,8 @@ static void find_binary(u8* fname) {
ck_free(cur_elem);
if (!stat(target_path, &st) && S_ISREG(st.st_mode) &&
(st.st_mode & 0111) && st.st_size >= 4) break;
(st.st_mode & 0111) && st.st_size >= 4)
break;
ck_free(target_path);
target_path = 0;
@ -983,7 +999,6 @@ static void find_binary(u8* fname) {
}
/* Fix up argv for QEMU. */
static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
@ -1004,8 +1019,7 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
cp = alloc_printf("%s/afl-qemu-trace", tmp);
if (access(cp, X_OK))
FATAL("Unable to find '%s'", tmp);
if (access(cp, X_OK)) FATAL("Unable to find '%s'", tmp);
target_path = new_argv[0] = cp;
return new_argv;
@ -1029,7 +1043,9 @@ static char** get_qemu_argv(u8* own_loc, char** argv, int argc) {
}
} else ck_free(own_copy);
} else
ck_free(own_copy);
if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) {
@ -1056,8 +1072,6 @@ static void read_bitmap(u8* fname) {
}
/* Main entry point */
int main(int argc, char** argv) {
@ -1120,7 +1134,8 @@ int main(int argc, char** argv) {
}
if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 ||
optarg[0] == '-') FATAL("Bad syntax used for -m");
optarg[0] == '-')
FATAL("Bad syntax used for -m");
switch (suffix) {
@ -1190,9 +1205,7 @@ int main(int argc, char** argv) {
read_bitmap(optarg);
break;
default:
usage(argv[0]);
default: usage(argv[0]);
}
@ -1230,15 +1243,16 @@ int main(int argc, char** argv) {
if (!crash_mode) {
OKF("Program terminates normally, minimizing in "
cCYA "instrumented" cRST " mode.");
OKF("Program terminates normally, minimizing in " cCYA "instrumented" cRST
" mode.");
if (!anything_set()) FATAL("No instrumentation detected.");
} else {
OKF("Program exits with a signal, minimizing in " cMGN "%scrash" cRST
" mode.", exact_mode ? "EXACT " : "");
" mode.",
exact_mode ? "EXACT " : "");
}

View File

@ -26,8 +26,10 @@ int main(int argc, char** argv) {
if (argc > 1)
buf = argv[1];
else if (read(0, buf, sizeof(buf)) < 1) {
printf("Hum?\n");
exit(1);
}
if (buf[0] == '0')
@ -40,3 +42,4 @@ int main(int argc, char** argv) {
exit(0);
}

View File

@ -41,10 +41,8 @@
"adcb $0, (%0, %1, 1)\n" \
: /* no out */ \
: "r"(afl_area_ptr), "r"(loc) \
: "memory", "eax" \
)
: "memory", "eax")
#else
# define INC_AFL_AREA(loc) \
afl_area_ptr[loc]++
# define INC_AFL_AREA(loc) afl_area_ptr[loc]++
#endif

View File

@ -44,21 +44,29 @@
it to translate within its own context, too (this avoids translation
overhead in the next forked-off copy). */
#define AFL_UNICORN_CPU_SNIPPET1 do { \
#define AFL_UNICORN_CPU_SNIPPET1 \
do { \
\
afl_request_tsl(pc, cs_base, flags); \
\
} while (0)
/* This snippet kicks in when the instruction pointer is positioned at
_start and does the usual forkserver stuff, not very different from
regular instrumentation injected via afl-as.h. */
#define AFL_UNICORN_CPU_SNIPPET2 do { \
#define AFL_UNICORN_CPU_SNIPPET2 \
do { \
\
if (unlikely(afl_first_instr == 0)) { \
\
afl_setup(env->uc); \
afl_forkserver(env); \
afl_first_instr = 1; \
\
} \
afl_maybe_log(env->uc, tb->pc); \
\
} while (0)
/* We use one additional file descriptor to relay "needs translation"
@ -80,15 +88,17 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long);
static void afl_wait_tsl(CPUArchState*, int);
static void afl_request_tsl(target_ulong, target_ulong, uint64_t);
static TranslationBlock *tb_find_slow(CPUArchState*, target_ulong,
target_ulong, uint64_t);
static TranslationBlock* tb_find_slow(CPUArchState*, target_ulong, target_ulong,
uint64_t);
/* Data structure passed around by the translate handlers: */
struct afl_tsl {
target_ulong pc;
target_ulong cs_base;
uint64_t flags;
};
/*************************
@ -99,8 +109,7 @@ struct afl_tsl {
static void afl_setup(struct uc_struct* uc) {
char *id_str = getenv(SHM_ENV_VAR),
*inst_r = getenv("AFL_INST_RATIO");
char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO");
int shm_id;
@ -132,17 +141,17 @@ static void afl_setup(struct uc_struct* uc) {
so that the parent doesn't give up on us. */
if (inst_r) uc->afl_area_ptr[0] = 1;
}
/* Maintain for compatibility */
if (getenv("AFL_QEMU_COMPCOV")) {
uc->afl_compcov_level = 1;
}
if (getenv("AFL_QEMU_COMPCOV")) { uc->afl_compcov_level = 1; }
if (getenv("AFL_COMPCOV_LEVEL")) {
uc->afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL"));
}
}
/* Fork server logic, invoked once we hit first emulated instruction. */
@ -211,7 +220,6 @@ static void afl_forkserver(CPUArchState *env) {
}
/* The equivalent of the tuple logging routine from afl-as.h. */
static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) {
@ -220,8 +228,7 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) {
u8* afl_area_ptr = uc->afl_area_ptr;
if(!afl_area_ptr)
return;
if (!afl_area_ptr) return;
/* Looks like QEMU always maps to fixed locations, so ASAN is not a
concern. Phew. But instruction addresses may be aligned. Let's mangle
@ -243,7 +250,6 @@ static inline void afl_maybe_log(struct uc_struct* uc, unsigned long cur_loc) {
}
/* This code is invoked whenever QEMU decides that it doesn't have a
translation of a particular block and needs to compute it. When this happens,
we tell the parent to mirror the operation, so that the next fork() has a
@ -264,7 +270,6 @@ static void afl_request_tsl(target_ulong pc, target_ulong cb, uint64_t flags) {
}
/* This is the other side of the same channel. Since timeouts are handled by
afl-fuzz simply killing the child, we can just wait until the pipe breaks. */
@ -276,12 +281,13 @@ static void afl_wait_tsl(CPUArchState *env, int fd) {
/* Broken pipe means it's time to return to the fork server routine. */
if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl))
break;
if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break;
tb_find_slow(env, t.pc, t.cs_base, t.flags);
}
close(fd);
}

View File

@ -35,11 +35,9 @@
static void afl_gen_compcov(TCGContext *s, uint64_t cur_loc, TCGv_i64 arg1,
TCGv_i64 arg2, TCGMemOp ot, int is_imm) {
if (!s->uc->afl_compcov_level || !s->uc->afl_area_ptr)
return;
if (!s->uc->afl_compcov_level || !s->uc->afl_area_ptr) return;
if (!is_imm && s->uc->afl_compcov_level < 2)
return;
if (!is_imm && s->uc->afl_compcov_level < 2) return;
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
cur_loc &= MAP_SIZE - 7;
@ -47,16 +45,13 @@ static void afl_gen_compcov(TCGContext *s, uint64_t cur_loc, TCGv_i64 arg1,
if (cur_loc >= s->uc->afl_inst_rms) return;
switch (ot) {
case MO_64:
gen_afl_compcov_log_64(s, cur_loc, arg1, arg2);
break;
case MO_32:
gen_afl_compcov_log_32(s, cur_loc, arg1, arg2);
break;
case MO_16:
gen_afl_compcov_log_16(s, cur_loc, arg1, arg2);
break;
default:
return;
case MO_64: gen_afl_compcov_log_64(s, cur_loc, arg1, arg2); break;
case MO_32: gen_afl_compcov_log_32(s, cur_loc, arg1, arg2); break;
case MO_16: gen_afl_compcov_log_16(s, cur_loc, arg1, arg2); break;
default: return;
}
}

View File

@ -31,26 +31,29 @@
*/
static inline void gen_afl_compcov_log_16(TCGContext *tcg_ctx, uint64_t cur_loc,
TCGv_i64 arg1, TCGv_i64 arg2)
{
TCGv_i64 arg1, TCGv_i64 arg2) {
TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc);
TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc);
gen_helper_afl_compcov_log_16(tcg_ctx, tuc, tcur_loc, arg1, arg2);
}
static inline void gen_afl_compcov_log_32(TCGContext *tcg_ctx, uint64_t cur_loc,
TCGv_i64 arg1, TCGv_i64 arg2)
{
TCGv_i64 arg1, TCGv_i64 arg2) {
TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc);
TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc);
gen_helper_afl_compcov_log_32(tcg_ctx, tuc, tcur_loc, arg1, arg2);
}
static inline void gen_afl_compcov_log_64(TCGContext *tcg_ctx, uint64_t cur_loc,
TCGv_i64 arg1, TCGv_i64 arg2)
{
TCGv_i64 arg1, TCGv_i64 arg2) {
TCGv_ptr tuc = tcg_const_ptr(tcg_ctx, tcg_ctx->uc);
TCGv_i64 tcur_loc = tcg_const_i64(tcg_ctx, cur_loc);
gen_helper_afl_compcov_log_64(tcg_ctx, tuc, tcur_loc, arg1, arg2);
}

View File

@ -38,9 +38,8 @@ void HELPER(afl_compcov_log_16)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr;
if ((arg1 & 0xff) == (arg2 & 0xff)) {
INC_AFL_AREA(cur_loc);
}
if ((arg1 & 0xff) == (arg2 & 0xff)) { INC_AFL_AREA(cur_loc); }
}
void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
@ -49,14 +48,17 @@ void HELPER(afl_compcov_log_32)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr;
if ((arg1 & 0xff) == (arg2 & 0xff)) {
INC_AFL_AREA(cur_loc);
if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
INC_AFL_AREA(cur_loc + 1);
if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
INC_AFL_AREA(cur_loc +2);
}
if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) { INC_AFL_AREA(cur_loc + 2); }
}
}
}
void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
@ -65,25 +67,40 @@ void HELPER(afl_compcov_log_64)(void* uc_ptr, uint64_t cur_loc, uint64_t arg1,
u8* afl_area_ptr = ((struct uc_struct*)uc_ptr)->afl_area_ptr;
if ((arg1 & 0xff) == (arg2 & 0xff)) {
INC_AFL_AREA(cur_loc);
if ((arg1 & 0xffff) == (arg2 & 0xffff)) {
INC_AFL_AREA(cur_loc + 1);
if ((arg1 & 0xffffff) == (arg2 & 0xffffff)) {
INC_AFL_AREA(cur_loc + 2);
if ((arg1 & 0xffffffff) == (arg2 & 0xffffffff)) {
INC_AFL_AREA(cur_loc + 3);
if ((arg1 & 0xffffffffff) == (arg2 & 0xffffffffff)) {
INC_AFL_AREA(cur_loc + 4);
if ((arg1 & 0xffffffffffff) == (arg2 & 0xffffffffffff)) {
INC_AFL_AREA(cur_loc + 5);
if ((arg1 & 0xffffffffffffff) == (arg2 & 0xffffffffffffff)) {
INC_AFL_AREA(cur_loc + 6);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}