Android PR integration

This commit is contained in:
van Hauser
2019-11-11 23:50:31 +01:00
parent 613ab3ba2a
commit 760d4991f3
11 changed files with 33 additions and 26 deletions

View File

@ -61,7 +61,7 @@ fi
CUR_TIME=`date +%s` CUR_TIME=`date +%s`
TMP=`mktemp -t .afl-whatsup-XXXXXXXX` || TMP=`mktemp -p /data/local/tmp .afl-whatsup-XXXXXXXX` || exit 1 TMP=`mktemp -t .afl-whatsup-XXXXXXXX` || TMP=`mktemp -p /data/local/tmp .afl-whatsup-XXXXXXXX` || TMP=`mktemp -p /data/local/tmp .afl-whatsup-XXXXXXXX` || exit 1
ALIVE_CNT=0 ALIVE_CNT=0
DEAD_CNT=0 DEAD_CNT=0

View File

@ -27,6 +27,7 @@ Version ++2.58d (dev):
- ripped regex.dictionary from Google afl PR - ripped regex.dictionary from Google afl PR
- qemu and unicorn download scripts now try to download until the full - qemu and unicorn download scripts now try to download until the full
download succeeded. f*ckin travis fails downloading 40% of the time! download succeeded. f*ckin travis fails downloading 40% of the time!
- added the few Android stuff we didnt have already from Google afl repository
- removed unnecessary warnings - removed unnecessary warnings
- added the radamsa stage - added the radamsa stage

View File

@ -552,7 +552,7 @@ u8 has_new_bits(u8*);
u32 count_bits(u8*); u32 count_bits(u8*);
u32 count_bytes(u8*); u32 count_bytes(u8*);
u32 count_non_255_bytes(u8*); u32 count_non_255_bytes(u8*);
#ifdef __x86_64__ #ifdef WORD_SIZE_64
void simplify_trace(u64*); void simplify_trace(u64*);
void classify_counts(u64*); void classify_counts(u64*);
#else #else

View File

@ -63,7 +63,7 @@ static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
} }
static inline int shmget(key_t __key, size_t __size, int __shmflg) { static inline int shmget(key_t __key, size_t __size, int __shmflg) {
(void) __shmflg;
int fd, ret; int fd, ret;
char ourkey[11]; char ourkey[11];
@ -86,7 +86,7 @@ error:
} }
static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) { static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) {
(void) __shmflg;
int size; int size;
void *ptr; void *ptr;

View File

@ -52,13 +52,18 @@
#define EXEC_TM_ROUND 20 #define EXEC_TM_ROUND 20
/* 64bit arch MACRO */
#if (defined (__x86_64__) || defined (__arm64__) || defined (__aarch64__))
#define WORD_SIZE_64 1
#endif
/* Default memory limit for child process (MB): */ /* Default memory limit for child process (MB): */
#ifndef __x86_64__ #ifndef WORD_SIZE_64
#define MEM_LIMIT 25 #define MEM_LIMIT 25
#else #else
#define MEM_LIMIT 50 #define MEM_LIMIT 50
#endif /* ^!__x86_64__ */ #endif /* ^!WORD_SIZE_64 */
/* Default memory limit when running in QEMU mode (MB): */ /* Default memory limit when running in QEMU mode (MB): */

View File

@ -205,6 +205,7 @@ static void edit_params(u32 argc, char** argv) {
u8* cur = *(++argv); u8* cur = *(++argv);
if (!strcmp(cur, "-m32")) bit_mode = 32; if (!strcmp(cur, "-m32")) bit_mode = 32;
if (!strcmp(cur, "armv7a-linux-androideabi")) bit_mode = 32;
if (!strcmp(cur, "-m64")) bit_mode = 64; if (!strcmp(cur, "-m64")) bit_mode = 64;
if (!strcmp(cur, "-x")) x_set = 1; if (!strcmp(cur, "-x")) x_set = 1;

View File

@ -71,7 +71,7 @@ static u32 inst_ratio = 100, /* Instrumentation probability (%) */
instrumentation for whichever mode we were compiled with. This is not instrumentation for whichever mode we were compiled with. This is not
perfect, but should do the trick for almost all use cases. */ perfect, but should do the trick for almost all use cases. */
#ifdef __x86_64__ #ifdef WORD_SIZE_64
static u8 use_64bit = 1; static u8 use_64bit = 1;
@ -83,7 +83,7 @@ static u8 use_64bit = 0;
#error "Sorry, 32-bit Apple platforms are not supported." #error "Sorry, 32-bit Apple platforms are not supported."
#endif /* __APPLE__ */ #endif /* __APPLE__ */
#endif /* ^__x86_64__ */ #endif /* ^WORD_SIZE_64 */
/* Examine and modify parameters to pass to 'as'. Note that the file name /* Examine and modify parameters to pass to 'as'. Note that the file name
is always the last parameter passed by GCC, so we exploit this property is always the last parameter passed by GCC, so we exploit this property

View File

@ -73,7 +73,7 @@ void read_bitmap(u8* fname) {
u8 has_new_bits(u8* virgin_map) { u8 has_new_bits(u8* virgin_map) {
#ifdef __x86_64__ #ifdef WORD_SIZE_64
u64* current = (u64*)trace_bits; u64* current = (u64*)trace_bits;
u64* virgin = (u64*)virgin_map; u64* virgin = (u64*)virgin_map;
@ -87,7 +87,7 @@ u8 has_new_bits(u8* virgin_map) {
u32 i = (MAP_SIZE >> 2); u32 i = (MAP_SIZE >> 2);
#endif /* ^__x86_64__ */ #endif /* ^WORD_SIZE_64 */
u8 ret = 0; u8 ret = 0;
@ -107,7 +107,7 @@ u8 has_new_bits(u8* virgin_map) {
/* Looks like we have not found any new bytes yet; see if any non-zero /* Looks like we have not found any new bytes yet; see if any non-zero
bytes in current[] are pristine in virgin[]. */ bytes in current[] are pristine in virgin[]. */
#ifdef __x86_64__ #ifdef WORD_SIZE_64
if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
(cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) || (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
@ -125,7 +125,7 @@ u8 has_new_bits(u8* virgin_map) {
else else
ret = 1; ret = 1;
#endif /* ^__x86_64__ */ #endif /* ^WORD_SIZE_64 */
} }
@ -244,7 +244,7 @@ const u8 simplify_lookup[256] = {
}; };
#ifdef __x86_64__ #ifdef WORD_SIZE_64
void simplify_trace(u64* mem) { void simplify_trace(u64* mem) {
@ -306,7 +306,7 @@ void simplify_trace(u32* mem) {
} }
#endif /* ^__x86_64__ */ #endif /* ^WORD_SIZE_64 */
/* Destructively classify execution counts in a trace. This is used as a /* Destructively classify execution counts in a trace. This is used as a
preprocessing step for any newly acquired traces. Called on every exec, preprocessing step for any newly acquired traces. Called on every exec,
@ -339,7 +339,7 @@ void init_count_class16(void) {
} }
#ifdef __x86_64__ #ifdef WORD_SIZE_64
void classify_counts(u64* mem) { void classify_counts(u64* mem) {
@ -391,7 +391,7 @@ void classify_counts(u32* mem) {
} }
#endif /* ^__x86_64__ */ #endif /* ^WORD_SIZE_64 */
/* Compact trace bytes into a smaller bitmap. We effectively just drop the /* Compact trace bytes into a smaller bitmap. We effectively just drop the
count information here. This is called only sporadically, for some count information here. This is called only sporadically, for some
@ -595,11 +595,11 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
if (!dumb_mode) { if (!dumb_mode) {
#ifdef __x86_64__ #ifdef WORD_SIZE_64
simplify_trace((u64*)trace_bits); simplify_trace((u64*)trace_bits);
#else #else
simplify_trace((u32*)trace_bits); simplify_trace((u32*)trace_bits);
#endif /* ^__x86_64__ */ #endif /* ^WORD_SIZE_64 */
if (!has_new_bits(virgin_tmout)) return keeping; if (!has_new_bits(virgin_tmout)) return keeping;
@ -658,11 +658,11 @@ u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
if (!dumb_mode) { if (!dumb_mode) {
#ifdef __x86_64__ #ifdef WORD_SIZE_64
simplify_trace((u64*)trace_bits); simplify_trace((u64*)trace_bits);
#else #else
simplify_trace((u32*)trace_bits); simplify_trace((u32*)trace_bits);
#endif /* ^__x86_64__ */ #endif /* ^WORD_SIZE_64 */
if (!has_new_bits(virgin_crash)) return keeping; if (!has_new_bits(virgin_crash)) return keeping;

View File

@ -221,11 +221,11 @@ u8 run_target(char** argv, u32 timeout) {
tb4 = *(u32*)trace_bits; tb4 = *(u32*)trace_bits;
#ifdef __x86_64__ #ifdef WORD_SIZE_64
classify_counts((u64*)trace_bits); classify_counts((u64*)trace_bits);
#else #else
classify_counts((u32*)trace_bits); classify_counts((u32*)trace_bits);
#endif /* ^__x86_64__ */ #endif /* ^WORD_SIZE_64 */
prev_timed_out = child_timed_out; prev_timed_out = child_timed_out;

View File

@ -121,7 +121,7 @@ static void edit_params(u32 argc, char** argv) {
u8 fortify_set = 0, asan_set = 0; u8 fortify_set = 0, asan_set = 0;
u8* name; u8* name;
#if defined(__FreeBSD__) && defined(__x86_64__) #if defined(__FreeBSD__) && defined(WORD_SIZE_64)
u8 m32_set = 0; u8 m32_set = 0;
#endif #endif
@ -228,7 +228,7 @@ static void edit_params(u32 argc, char** argv) {
if (!strcmp(cur, "-pipe")) continue; if (!strcmp(cur, "-pipe")) continue;
#if defined(__FreeBSD__) && defined(__x86_64__) #if defined(__FreeBSD__) && defined(WORD_SIZE_64)
if (!strcmp(cur, "-m32")) m32_set = 1; if (!strcmp(cur, "-m32")) m32_set = 1;
#endif #endif
@ -288,7 +288,7 @@ static void edit_params(u32 argc, char** argv) {
if (!getenv("AFL_DONT_OPTIMIZE")) { if (!getenv("AFL_DONT_OPTIMIZE")) {
#if defined(__FreeBSD__) && defined(__x86_64__) #if defined(__FreeBSD__) && defined(WORD_SIZE_64)
/* On 64-bit FreeBSD systems, clang -g -m32 is broken, but -m32 itself /* On 64-bit FreeBSD systems, clang -g -m32 is broken, but -m32 itself
works OK. This has nothing to do with us, but let's avoid triggering works OK. This has nothing to do with us, but let's avoid triggering

View File

@ -204,7 +204,7 @@ int main(int argc, char** argv) {
#if defined(__linux__) #if defined(__linux__)
if (sched_setaffinity(0, sizeof(c), &c)) if (sched_setaffinity(0, sizeof(c), &c))
PFATAL("sched_setaffinity failed"); PFATAL("sched_setaffinity failed for cpu %d", i);
#endif #endif
util_perc = measure_preemption(CTEST_CORE_TRG_MS); util_perc = measure_preemption(CTEST_CORE_TRG_MS);