mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-16 20:08:07 +00:00
code format
This commit is contained in:
@ -604,11 +604,7 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
|
||||
|
||||
}
|
||||
|
||||
if (afl->afl_env.afl_pizza_mode) {
|
||||
|
||||
afl->pizza_is_served = 1;
|
||||
|
||||
}
|
||||
if (afl->afl_env.afl_pizza_mode) { afl->pizza_is_served = 1; }
|
||||
|
||||
if (issue_detected) { sleep(2); }
|
||||
|
||||
|
@ -1241,11 +1241,15 @@ int main(int argc, char **argv_orig, char **envp) {
|
||||
u32 save_be_quiet = be_quiet;
|
||||
be_quiet = !debug;
|
||||
if (map_size > 4194304) {
|
||||
fsrv->map_size = map_size;
|
||||
}
|
||||
else {
|
||||
fsrv->map_size = 4194304; // dummy temporary value
|
||||
|
||||
fsrv->map_size = map_size;
|
||||
|
||||
} else {
|
||||
|
||||
fsrv->map_size = 4194304; // dummy temporary value
|
||||
|
||||
}
|
||||
|
||||
u32 new_map_size =
|
||||
afl_fsrv_get_mapsize(fsrv, use_argv, &stop_soon,
|
||||
(get_afl_env("AFL_DEBUG_CHILD") ||
|
||||
|
@ -47,6 +47,7 @@
|
||||
#ifdef __NR_getrandom
|
||||
#define arc4random_buf(p, l) \
|
||||
do { \
|
||||
\
|
||||
ssize_t rd = syscall(__NR_getrandom, p, l, 0); \
|
||||
if (rd != l) DEBUGF("getrandom failed"); \
|
||||
\
|
||||
@ -56,6 +57,7 @@
|
||||
#include <time.h>
|
||||
#define arc4random_buf(p, l) \
|
||||
do { \
|
||||
\
|
||||
srand(time(NULL)); \
|
||||
u32 i; \
|
||||
u8 *ptr = (u8 *)p; \
|
||||
@ -78,6 +80,7 @@
|
||||
(defined(__FreeBSD__) && __FreeBSD_version < 1200000)
|
||||
// use this hack if not C11
|
||||
typedef struct {
|
||||
|
||||
long long __ll;
|
||||
long double __ld;
|
||||
|
||||
@ -89,11 +92,11 @@ typedef struct {
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
#define PAGE_SIZE 4096
|
||||
#endif /* !PAGE_SIZE */
|
||||
#endif /* !PAGE_SIZE */
|
||||
|
||||
#ifndef MAP_ANONYMOUS
|
||||
#define MAP_ANONYMOUS MAP_ANON
|
||||
#endif /* !MAP_ANONYMOUS */
|
||||
#endif /* !MAP_ANONYMOUS */
|
||||
|
||||
#define SUPER_PAGE_SIZE 1 << 21
|
||||
|
||||
@ -101,22 +104,30 @@ typedef struct {
|
||||
|
||||
#define DEBUGF(_x...) \
|
||||
do { \
|
||||
\
|
||||
if (alloc_verbose) { \
|
||||
\
|
||||
if (++call_depth == 1) { \
|
||||
\
|
||||
fprintf(stderr, "[AFL] " _x); \
|
||||
fprintf(stderr, "\n"); \
|
||||
\
|
||||
} \
|
||||
call_depth--; \
|
||||
\
|
||||
} \
|
||||
\
|
||||
} while (0)
|
||||
|
||||
#define FATAL(_x...) \
|
||||
do { \
|
||||
\
|
||||
if (++call_depth == 1) { \
|
||||
\
|
||||
fprintf(stderr, "*** [AFL] " _x); \
|
||||
fprintf(stderr, " ***\n"); \
|
||||
abort(); \
|
||||
\
|
||||
} \
|
||||
call_depth--; \
|
||||
\
|
||||
@ -138,19 +149,19 @@ typedef struct {
|
||||
|
||||
/* Configurable stuff (use AFL_LD_* to set): */
|
||||
|
||||
static size_t max_mem = MAX_ALLOC; /* Max heap usage to permit */
|
||||
static u8 alloc_verbose, /* Additional debug messages */
|
||||
hard_fail, /* abort() when max_mem exceeded? */
|
||||
no_calloc_over, /* abort() on calloc() overflows? */
|
||||
align_allocations; /* Force alignment to sizeof(void*) */
|
||||
static size_t max_mem = MAX_ALLOC; /* Max heap usage to permit */
|
||||
static u8 alloc_verbose, /* Additional debug messages */
|
||||
hard_fail, /* abort() when max_mem exceeded? */
|
||||
no_calloc_over, /* abort() on calloc() overflows? */
|
||||
align_allocations; /* Force alignment to sizeof(void*) */
|
||||
|
||||
#if defined __OpenBSD__ || defined __APPLE__
|
||||
#define __thread
|
||||
#warning no thread support available
|
||||
#endif
|
||||
static _Atomic size_t total_mem; /* Currently allocated mem */
|
||||
static _Atomic size_t total_mem; /* Currently allocated mem */
|
||||
|
||||
static __thread u32 call_depth; /* To avoid recursion via fprintf() */
|
||||
static __thread u32 call_depth; /* To avoid recursion via fprintf() */
|
||||
static u32 alloc_canary;
|
||||
|
||||
/* This is the main alloc function. It allocates one page more than necessary,
|
||||
@ -159,16 +170,19 @@ static u32 alloc_canary;
|
||||
the returned memory will be zeroed. */
|
||||
|
||||
static void *__dislocator_alloc(size_t len) {
|
||||
|
||||
u8 *ret, *base;
|
||||
size_t tlen;
|
||||
int flags, protflags, fd, sp;
|
||||
|
||||
if (total_mem + len > max_mem || total_mem + len < total_mem) {
|
||||
|
||||
if (hard_fail) FATAL("total allocs exceed %zu MB", max_mem / 1024 / 1024);
|
||||
|
||||
DEBUGF("total allocs exceed %zu MB, returning NULL", max_mem / 1024 / 1024);
|
||||
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
size_t rlen;
|
||||
@ -201,8 +215,10 @@ static void *__dislocator_alloc(size_t len) {
|
||||
if (sp) flags |= MAP_ALIGNED_SUPER;
|
||||
#elif defined(__sun)
|
||||
if (sp) {
|
||||
|
||||
base = (void *)(caddr_t)(1 << 21);
|
||||
flags |= MAP_ALIGN;
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -214,6 +230,7 @@ static void *__dislocator_alloc(size_t len) {
|
||||
#if defined(USEHUGEPAGE)
|
||||
/* We try one more time with regular call */
|
||||
if (ret == MAP_FAILED) {
|
||||
|
||||
#if defined(__APPLE__)
|
||||
fd = -1;
|
||||
#elif defined(__linux__)
|
||||
@ -224,16 +241,19 @@ static void *__dislocator_alloc(size_t len) {
|
||||
flags &= -MAP_ALIGN;
|
||||
#endif
|
||||
ret = (u8 *)mmap(NULL, tlen, protflags, flags, fd, 0);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
if (ret == MAP_FAILED) {
|
||||
|
||||
if (hard_fail) FATAL("mmap() failed on alloc (OOM?)");
|
||||
|
||||
DEBUGF("mmap() failed on alloc (OOM?)");
|
||||
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
#if defined(USENAMEDPAGE)
|
||||
@ -242,7 +262,9 @@ static void *__dislocator_alloc(size_t len) {
|
||||
// `<start>-<end> ---p 00000000 00:00 0 [anon:libdislocator]`
|
||||
if (prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)ret, tlen,
|
||||
(unsigned long)"libdislocator") < 0) {
|
||||
|
||||
DEBUGF("prctl() failed");
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -268,12 +290,15 @@ static void *__dislocator_alloc(size_t len) {
|
||||
total_mem += len;
|
||||
|
||||
if (rlen != len) {
|
||||
|
||||
size_t i;
|
||||
for (i = len; i < rlen; ++i)
|
||||
ret[i] = TAIL_ALLOC_CANARY;
|
||||
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
/* The "user-facing" wrapper for calloc(). This just checks for overflows and
|
||||
@ -281,6 +306,7 @@ static void *__dislocator_alloc(size_t len) {
|
||||
|
||||
__attribute__((malloc)) __attribute__((alloc_size(1, 2))) void *calloc(
|
||||
size_t elem_len, size_t elem_cnt) {
|
||||
|
||||
void *ret;
|
||||
|
||||
size_t len = elem_len * elem_cnt;
|
||||
@ -288,13 +314,17 @@ __attribute__((malloc)) __attribute__((alloc_size(1, 2))) void *calloc(
|
||||
/* Perform some sanity checks to detect obvious issues... */
|
||||
|
||||
if (elem_cnt && len / elem_cnt != elem_len) {
|
||||
|
||||
if (no_calloc_over) {
|
||||
|
||||
DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len,
|
||||
elem_cnt);
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt);
|
||||
|
||||
}
|
||||
|
||||
ret = __dislocator_alloc(len);
|
||||
@ -303,6 +333,7 @@ __attribute__((malloc)) __attribute__((alloc_size(1, 2))) void *calloc(
|
||||
total_mem);
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
/* The wrapper for malloc(). Roughly the same, also clobbers the returned
|
||||
@ -311,6 +342,7 @@ __attribute__((malloc)) __attribute__((alloc_size(1, 2))) void *calloc(
|
||||
|
||||
__attribute__((malloc)) __attribute__((alloc_size(1))) void *malloc(
|
||||
size_t len) {
|
||||
|
||||
void *ret;
|
||||
|
||||
ret = __dislocator_alloc(len);
|
||||
@ -320,6 +352,7 @@ __attribute__((malloc)) __attribute__((alloc_size(1))) void *malloc(
|
||||
if (ret && len) memset(ret, ALLOC_CLOBBER, len);
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
/* The wrapper for free(). This simply marks the entire region as PROT_NONE.
|
||||
@ -327,6 +360,7 @@ __attribute__((malloc)) __attribute__((alloc_size(1))) void *malloc(
|
||||
read the canary. Not very graceful, but works, right? */
|
||||
|
||||
void free(void *ptr) {
|
||||
|
||||
u32 len;
|
||||
|
||||
DEBUGF("free(%p)", ptr);
|
||||
@ -341,10 +375,12 @@ void free(void *ptr) {
|
||||
u8 *ptr_ = ptr;
|
||||
|
||||
if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1))) {
|
||||
|
||||
size_t rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
|
||||
for (; len < rlen; ++len)
|
||||
if (ptr_[len] != TAIL_ALLOC_CANARY)
|
||||
FATAL("bad tail allocator canary on free()");
|
||||
|
||||
}
|
||||
|
||||
/* Protect everything. Note that the extra page at the end is already
|
||||
@ -358,27 +394,32 @@ void free(void *ptr) {
|
||||
ptr = ptr_;
|
||||
|
||||
/* Keep the mapping; this is wasteful, but prevents ptr reuse. */
|
||||
|
||||
}
|
||||
|
||||
/* Realloc is pretty straightforward, too. We forcibly reallocate the buffer,
|
||||
move data, and then free (aka mprotect()) the original one. */
|
||||
|
||||
__attribute__((alloc_size(2))) void *realloc(void *ptr, size_t len) {
|
||||
|
||||
void *ret;
|
||||
|
||||
ret = malloc(len);
|
||||
|
||||
if (ret && ptr) {
|
||||
|
||||
if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on realloc()");
|
||||
// Here the tail canary check is delayed to free()
|
||||
|
||||
memcpy(ret, ptr, MIN(len, PTR_L(ptr)));
|
||||
free(ptr);
|
||||
|
||||
}
|
||||
|
||||
DEBUGF("realloc(%p, %zu) = %p [%zu total]", ptr, len, ret, total_mem);
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
/* posix_memalign we mainly check the proper alignment argument
|
||||
@ -386,11 +427,14 @@ __attribute__((alloc_size(2))) void *realloc(void *ptr, size_t len) {
|
||||
a normal request */
|
||||
|
||||
int posix_memalign(void **ptr, size_t align, size_t len) {
|
||||
|
||||
// if (*ptr == NULL) return EINVAL; // (andrea) Why? I comment it out for now
|
||||
if ((align % 2) || (align % sizeof(void *))) return EINVAL;
|
||||
if (len == 0) {
|
||||
|
||||
*ptr = NULL;
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
size_t rem = len % align;
|
||||
@ -403,57 +447,72 @@ int posix_memalign(void **ptr, size_t align, size_t len) {
|
||||
DEBUGF("posix_memalign(%p %zu, %zu) [*ptr = %p]", ptr, align, len, *ptr);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/* just the non-posix fashion */
|
||||
|
||||
__attribute__((malloc)) __attribute__((alloc_size(2))) void *memalign(
|
||||
size_t align, size_t len) {
|
||||
|
||||
void *ret = NULL;
|
||||
|
||||
if (posix_memalign(&ret, align, len)) {
|
||||
|
||||
DEBUGF("memalign(%zu, %zu) failed", align, len);
|
||||
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
/* sort of C11 alias of memalign only more severe, alignment-wise */
|
||||
|
||||
__attribute__((malloc)) __attribute__((alloc_size(2))) void *aligned_alloc(
|
||||
size_t align, size_t len) {
|
||||
|
||||
void *ret = NULL;
|
||||
|
||||
if ((len % align)) return NULL;
|
||||
|
||||
if (posix_memalign(&ret, align, len)) {
|
||||
|
||||
DEBUGF("aligned_alloc(%zu, %zu) failed", align, len);
|
||||
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
/* specific BSD api mainly checking possible overflow for the size */
|
||||
|
||||
__attribute__((alloc_size(2, 3))) void *reallocarray(void *ptr, size_t elem_len,
|
||||
size_t elem_cnt) {
|
||||
|
||||
const size_t elem_lim = 1UL << (sizeof(size_t) * 4);
|
||||
const size_t elem_tot = elem_len * elem_cnt;
|
||||
void *ret = NULL;
|
||||
|
||||
if ((elem_len >= elem_lim || elem_cnt >= elem_lim) && elem_len > 0 &&
|
||||
elem_cnt > (SIZE_MAX / elem_len)) {
|
||||
|
||||
DEBUGF("reallocarray size overflow (%zu)", elem_tot);
|
||||
|
||||
} else {
|
||||
|
||||
ret = realloc(ptr, elem_tot);
|
||||
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
#if defined(__APPLE__)
|
||||
size_t malloc_size(const void *ptr) {
|
||||
|
||||
#elif !defined(__ANDROID__)
|
||||
size_t malloc_usable_size(void *ptr) {
|
||||
|
||||
@ -463,24 +522,30 @@ size_t malloc_usable_size(const void *ptr) {
|
||||
#endif
|
||||
|
||||
return ptr ? PTR_L(ptr) : 0;
|
||||
|
||||
}
|
||||
|
||||
#if defined(__APPLE__)
|
||||
size_t malloc_good_size(size_t len) {
|
||||
|
||||
return (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
__attribute__((constructor)) void __dislocator_init(void) {
|
||||
|
||||
char *tmp = getenv("AFL_LD_LIMIT_MB");
|
||||
|
||||
if (tmp) {
|
||||
|
||||
char *tok;
|
||||
unsigned long long mmem = strtoull(tmp, &tok, 10);
|
||||
if (*tok != '\0' || errno == ERANGE || mmem > SIZE_MAX / 1024 / 1024)
|
||||
FATAL("Bad value for AFL_LD_LIMIT_MB");
|
||||
max_mem = mmem * 1024 * 1024;
|
||||
|
||||
}
|
||||
|
||||
alloc_canary = ALLOC_CANARY;
|
||||
@ -492,23 +557,33 @@ __attribute__((constructor)) void __dislocator_init(void) {
|
||||
hard_fail = !!getenv("AFL_LD_HARD_FAIL");
|
||||
no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER");
|
||||
align_allocations = !!getenv("AFL_ALIGNED_ALLOC");
|
||||
|
||||
}
|
||||
|
||||
/* NetBSD fault handler specific api subset */
|
||||
|
||||
void (*esetfunc(void (*fn)(int, const char *, ...)))(int, const char *, ...) {
|
||||
|
||||
/* Might not be meaningful to implement; upper calls already report errors */
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
void *emalloc(size_t len) {
|
||||
|
||||
return malloc(len);
|
||||
|
||||
}
|
||||
|
||||
void *ecalloc(size_t elem_len, size_t elem_cnt) {
|
||||
|
||||
return calloc(elem_len, elem_cnt);
|
||||
|
||||
}
|
||||
|
||||
void *erealloc(void *ptr, size_t len) {
|
||||
|
||||
return realloc(ptr, len);
|
||||
|
||||
}
|
||||
|
||||
|
@ -378,7 +378,8 @@ __attribute__((hot)) int strcmp(const char *str1, const char *str2) {
|
||||
|
||||
#undef strncmp
|
||||
|
||||
__attribute__((hot)) int strncmp(const char *str1, const char *str2, size_t len) {
|
||||
__attribute__((hot)) int strncmp(const char *str1, const char *str2,
|
||||
size_t len) {
|
||||
|
||||
if (__tokencap_is_ro(str1)) __tokencap_dump(str1, len, 1);
|
||||
if (__tokencap_is_ro(str2)) __tokencap_dump(str2, len, 1);
|
||||
@ -428,7 +429,8 @@ __attribute__((hot)) int strcasecmp(const char *str1, const char *str2) {
|
||||
|
||||
#undef strncasecmp
|
||||
|
||||
__attribute__((hot)) int strncasecmp(const char *str1, const char *str2, size_t len) {
|
||||
__attribute__((hot)) int strncasecmp(const char *str1, const char *str2,
|
||||
size_t len) {
|
||||
|
||||
if (__tokencap_is_ro(str1)) __tokencap_dump(str1, len, 1);
|
||||
if (__tokencap_is_ro(str2)) __tokencap_dump(str2, len, 1);
|
||||
@ -454,7 +456,8 @@ __attribute__((hot)) int strncasecmp(const char *str1, const char *str2, size_t
|
||||
|
||||
#undef memcmp
|
||||
|
||||
__attribute__((hot)) int memcmp(const void *mem1, const void *mem2, size_t len) {
|
||||
__attribute__((hot)) int memcmp(const void *mem1, const void *mem2,
|
||||
size_t len) {
|
||||
|
||||
if (__tokencap_is_ro(mem1)) __tokencap_dump(mem1, len, 0);
|
||||
if (__tokencap_is_ro(mem2)) __tokencap_dump(mem2, len, 0);
|
||||
@ -537,7 +540,8 @@ __attribute__((hot)) char *strstr(const char *haystack, const char *needle) {
|
||||
|
||||
#undef strcasestr
|
||||
|
||||
__attribute__((hot)) char *strcasestr(const char *haystack, const char *needle) {
|
||||
__attribute__((hot)) char *strcasestr(const char *haystack,
|
||||
const char *needle) {
|
||||
|
||||
if (__tokencap_is_ro(haystack))
|
||||
__tokencap_dump(haystack, strlen(haystack), 1);
|
||||
@ -566,8 +570,8 @@ __attribute__((hot)) char *strcasestr(const char *haystack, const char *needle)
|
||||
|
||||
#undef memmem
|
||||
|
||||
__attribute__((hot)) void *memmem(const void *haystack, size_t haystack_len, const void *needle,
|
||||
size_t needle_len) {
|
||||
__attribute__((hot)) void *memmem(const void *haystack, size_t haystack_len,
|
||||
const void *needle, size_t needle_len) {
|
||||
|
||||
if (__tokencap_is_ro(haystack)) __tokencap_dump(haystack, haystack_len, 1);
|
||||
|
||||
|
Reference in New Issue
Block a user