LTO optimization, variable map size, autodictionary (#307)

* lto module clean-up

* step 1/3

* step 1/3 completed

* if tmp is ever made non-static

* parts 2 and 3 - autodictionary is complete

* variable map_size support

* variable map size: changed overlooked functions

* remove debug for autodict

* 64 bit alignment of map size

* fix review comments

* force 64 bit alignment on both sides

* typo
This commit is contained in:
van Hauser
2020-04-10 22:33:11 +02:00
committed by GitHub
parent 6dcbc4dff4
commit 3a509c6168
25 changed files with 728 additions and 162 deletions

View File

@ -111,10 +111,15 @@ Then there are a few specific features that are only available in llvm_mode:
instrumentation which is 100% collision free (collisions are a big issue
in afl and afl-like instrumentations). This is performed by using
afl-clang-lto/afl-clang-lto++ instead of afl-clang-fast, but is only
built if LLVM 9 or newer is used.
built if LLVM 11 or newer is used.
None of these options are necessary to be used and are rather for manual
use (which only ever the author of this LTO implementation will use ;-)
- AFL_LLVM_LTO_AUTODICTIONARY will generate a dictionary in the target
binary based on string compare and memory compare functions.
afl-fuzz will automatically get these transmitted when starting to
fuzz.
None of the following options are necessary to be used and are rather for
manual use (which only ever the author of this LTO implementation will use).
These are used if several seperated instrumentation are performed which
are then later combined.

View File

@ -138,8 +138,8 @@ static void __afl_map_shm(void) {
static void __afl_start_forkserver(void) {
static u8 tmp[4];
s32 child_pid;
u8 tmp[4] = {0, 0, 0, 0};
s32 child_pid;
u8 child_stopped = 0;

View File

@ -577,7 +577,9 @@ typedef struct afl_state {
u32 document_counter;
#endif
/* statis file */
void *maybe_add_auto;
/* statistics file */
double last_bitmap_cvg, last_stability, last_eps;
/* plot file saves from last run */
@ -840,18 +842,18 @@ u32 calculate_score(afl_state_t *, struct queue_entry *);
void read_bitmap(afl_state_t *, u8 *);
void write_bitmap(afl_state_t *);
u32 count_bits(u8 *);
u32 count_bytes(u8 *);
u32 count_non_255_bytes(u8 *);
u32 count_bits(afl_state_t *, u8 *);
u32 count_bytes(afl_state_t *, u8 *);
u32 count_non_255_bytes(afl_state_t *, u8 *);
#ifdef WORD_SIZE_64
void simplify_trace(u64 *);
void classify_counts(u64 *);
void simplify_trace(afl_state_t *, u64 *);
void classify_counts(afl_state_t *, u64 *);
#else
void simplify_trace(u32 *);
void classify_counts(u32 *);
void simplify_trace(afl_state_t *, u32 *);
void classify_counts(afl_state_t *, u32 *);
#endif
void init_count_class16(void);
void minimize_bits(u8 *, u8 *);
void minimize_bits(afl_state_t *, u8 *, u8 *);
#ifndef SIMPLE_FILES
u8 *describe_op(afl_state_t *, u8);
#endif
@ -862,7 +864,7 @@ u8 has_new_bits(afl_state_t *, u8 *);
void load_extras_file(afl_state_t *, u8 *, u32 *, u32 *, u32);
void load_extras(afl_state_t *, u8 *);
void maybe_add_auto(afl_state_t *, u8 *, u32);
void maybe_add_auto(void *, u8 *, u32);
void save_auto(afl_state_t *);
void load_auto(afl_state_t *);
void destroy_extras(afl_state_t *);

View File

@ -201,8 +201,8 @@
(first value), and to keep in memory as candidates. The latter should be much
higher than the former. */
#define USE_AUTO_EXTRAS 50
#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 10)
#define USE_AUTO_EXTRAS 128
#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 64)
/* Scaling factor for the effector map used to skip some of the more
expensive deterministic steps. The actual divisor is set to
@ -400,5 +400,15 @@
#endif
#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
/* Extended forkserver option values */
#define FS_OPT_ENABLED 0x8f000001
#define FS_OPT_MAPSIZE 0x40000000
#define FS_OPT_SNAPSHOT 0x20000000
#define FS_OPT_AUTODICT 0x10000000
#define FS_OPT_GET_MAPSIZE(x) (((x & 0x00fffffe) >> 1) + 1)
#define FS_OPT_SET_MAPSIZE(x) \
(x <= 1 || x > MAP_SIZE || x > 0x1000000 ? 0 : ((x - 1) << 1))
#endif /* ! _HAVE_CONFIG_H */

View File

@ -51,6 +51,8 @@ typedef struct afl_forkserver {
fsrv_st_fd; /* Fork server status pipe (read) */
u32 exec_tmout; /* Configurable exec timeout (ms) */
u32 map_size; /* map size used by the target */
u32 snapshot; /* is snapshot feature used */
u64 mem_limit; /* Memory cap for child (MB) */
u8 *out_file, /* File to fuzz, if any */
@ -64,6 +66,10 @@ typedef struct afl_forkserver {
u32 prev_timed_out; /* if prev forkserver run timed out */
u8 *function_opt; /* for autodictionary: afl ptr */
void (*function_ptr)(void *afl_tmp, u8 *mem, u32 len);
} afl_forkserver_t;
void afl_fsrv_init(afl_forkserver_t *fsrv);

View File

@ -273,6 +273,7 @@ endif
../afl-llvm-lto-instrumentation.so: afl-llvm-lto-instrumentation.so.cc
ifeq "$(LLVM_LTO)" "1"
$(CXX) $(CLANG_CFL) -Wno-writable-strings -fno-rtti -fPIC -std=$(LLVM_STDCXX) -shared $< -o $@ $(CLANG_LFL)
$(CC) $(CFLAGS) -O0 $(AFL_CLANG_FLTO) -fPIC -c afl-llvm-rt-lto.o.c -o ../afl-llvm-rt-lto.o
endif
# laf
@ -318,7 +319,7 @@ all_done: test_build
install: all
install -d -m 755 $${DESTDIR}$(BIN_PATH) $${DESTDIR}$(HELPER_PATH) $${DESTDIR}$(DOC_PATH) $${DESTDIR}$(MISC_PATH)
if [ -f ../afl-clang-fast -a -f ../libLLVMInsTrim.so -a -f ../afl-llvm-rt.o ]; then set -e; install -m 755 ../afl-clang-fast $${DESTDIR}$(BIN_PATH); ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-fast++; install -m 755 ../libLLVMInsTrim.so ../afl-llvm-pass.so ../afl-llvm-rt.o $${DESTDIR}$(HELPER_PATH); fi
if [ -f ../afl-clang-lto ]; then set -e; ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-lto; ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-lto++; install -m 755 ../afl-llvm-lto-instrumentation.so $${DESTDIR}$(HELPER_PATH); install -m 755 ../afl-llvm-lto-whitelist.so $${DESTDIR}$(HELPER_PATH); fi
if [ -f ../afl-clang-lto ]; then set -e; ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-lto; ln -sf afl-clang-fast $${DESTDIR}$(BIN_PATH)/afl-clang-lto++; install -m 755 ../afl-llvm-lto-instrumentation.so ../afl-llvm-rt-lto.o ../afl-llvm-lto-whitelist.so $${DESTDIR}$(HELPER_PATH); fi
if [ -f ../afl-llvm-rt-32.o ]; then set -e; install -m 755 ../afl-llvm-rt-32.o $${DESTDIR}$(HELPER_PATH); fi
if [ -f ../afl-llvm-rt-64.o ]; then set -e; install -m 755 ../afl-llvm-rt-64.o $${DESTDIR}$(HELPER_PATH); fi
if [ -f ../compare-transform-pass.so ]; then set -e; install -m 755 ../compare-transform-pass.so $${DESTDIR}$(HELPER_PATH); fi

View File

@ -12,6 +12,8 @@ This version requires a current llvm 11 compiled from the github master.
3. It only works with llvm 11 (current github master state)
4. AUTODICTIONARY feature! see below
## Introduction and problem description
A big issue with how afl/afl++ works is that the basic block IDs that are
@ -33,33 +35,22 @@ and many dead ends until we got to this:
* Our compiler (afl-clang-lto/afl-clang-lto++) takes care of setting the
correct LTO options and runs our own afl-ld linker instead of the system
linker
* Our linker collects all LTO files to link and instruments them so that
* The LLVM linker collects all LTO files to link and instruments them so that
we have non-colliding edge overage
* We use a new (for afl) edge coverage - which is the same as in llvm
-fsanitize=coverage edge coverage mode :)
* after inserting our instrumentation in all interesting edges we link
all parts of the program together to our executable
The result:
* 10-15% speed gain compared to llvm_mode
* 10-20% speed gain compared to llvm_mode
* guaranteed non-colliding edge coverage :-)
* The compile time especially for libraries can be longer
Example build output from a libtiff build:
```
/bin/bash ../libtool --tag=CC --mode=link afl-clang-lto -g -O2 -Wall -W -o thumbnail thumbnail.o ../libtiff/libtiff.la ../port/libport.la -llzma -ljbig -ljpeg -lz -lm
libtool: link: afl-clang-lto -g -O2 -Wall -W -o thumbnail thumbnail.o ../libtiff/.libs/libtiff.a ../port/.libs/libport.a -llzma -ljbig -ljpeg -lz -lm
afl-clang-lto++2.62d by Marc "vanHauser" Heuse <mh@mh-sec.de>
afl-ld++2.62d by Marc "vanHauser" Heuse <mh@mh-sec.de> (level 0)
[+] Running ar unpacker on /prg/tests/lto/tiff-4.0.4/tools/../libtiff/.libs/libtiff.a into /tmp/.afl-3914343-1583339800.dir
[+] Running ar unpacker on /prg/tests/lto/tiff-4.0.4/tools/../port/.libs/libport.a into /tmp/.afl-3914343-1583339800.dir
[+] Running bitcode linker, creating /tmp/.afl-3914343-1583339800-1.ll
[+] Performing optimization via opt, creating /tmp/.afl-3914343-1583339800-2.bc
[+] Performing instrumentation via opt, creating /tmp/.afl-3914343-1583339800-3.bc
afl-llvm-lto++2.62d by Marc "vanHauser" Heuse <mh@mh-sec.de>
[+] Instrumented 15833 locations with no collisions (on average 1767 collisions would be in afl-gcc/afl-clang-fast) (non-hardened mode).
[+] Running real linker /bin/x86_64-linux-gnu-ld
[+] Linker was successful
afl-clang-lto++2.63d by Marc "vanHauser" Heuse <mh@mh-sec.de> in mode LTO
afl-llvm-lto++2.63d by Marc "vanHauser" Heuse <mh@mh-sec.de>
[+] Instrumented 11836 locations with no collisions (on average 1007 collisions would be in afl-gcc/afl-clang-fast) (non-hardened mode).
```
## Building llvm 11
@ -70,8 +61,8 @@ $ git clone https://github.com/llvm/llvm-project
$ cd llvm-project
$ mkdir build
$ cd build
$ cmake -DLLVM_ENABLE_PROJECTS='clang;clang-tools-extra;compiler-rt;libclc;libcxx;libcxxabi;libunwind;lld' -DLLVM_BINUTILS_INCDIR=/usr/include/ ../llvm/
$ make
$ cmake -DLLVM_ENABLE_PROJECTS='clang;clang-tools-extra;compiler-rt;libclc;libcxx;libcxxabi;libunwind;lld' -DCMAKE_BUILD_TYPE=Release -DLLVM_BINUTILS_INCDIR=/usr/include/ ../llvm/
$ make -j $(nproc)
$ export PATH=`pwd`/bin:$PATH
$ export LLVM_CONFIG=`pwd`/bin/llcm-config
$ cd /path/to/AFLplusplus/
@ -96,6 +87,13 @@ CC=afl-clang-lto CXX=afl-clang-lto++ ./configure
make
```
## AUTODICTIONARY feature
Setting `AFL_LLVM_LTO_AUTODICTIONARY` will generate a dictionary in the
target binary based on string compare and memory compare functions.
afl-fuzz will automatically get these transmitted when starting to fuzz.
This improves coverage on a lot of targets.
## Potential issues
### compiling libraries fails
@ -121,11 +119,8 @@ Please report issues at:
## Upcoming Work
1. Currently the LTO whitelist feature does not allow to not instrument main, start and init functions
2. Modify the forkserver + afl-fuzz so that only the necessary map size is
loaded and used - and communicated to afl-fuzz too.
Result: faster fork in the target and faster map analysis in afl-fuzz
=> more speed :-)
1. Currently the LTO whitelist feature does not allow to not instrument main,
start and init functions
## History

View File

@ -477,6 +477,9 @@ static void edit_params(u32 argc, char **argv, char **envp) {
}
if (instrument_mode == INSTRUMENT_LTO)
cc_params[cc_par_cnt++] = alloc_printf("%s/afl-llvm-rt-lto.o", obj_path);
#ifndef __ANDROID__
switch (bit_mode) {

View File

@ -38,17 +38,24 @@
#include <sys/time.h>
#include "llvm/Config/llvm-config.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/CFG.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Pass.h"
#include <set>
using namespace llvm;
@ -145,7 +152,7 @@ class AFLLTOPass : public ModulePass {
bool runOnModule(Module &M) override;
protected:
int afl_global_id = 1, debug = 0;
int afl_global_id = 1, debug = 0, autodictionary = 0;
uint32_t be_quiet = 0, inst_blocks = 0, inst_funcs = 0, total_instr = 0;
};
@ -154,7 +161,9 @@ class AFLLTOPass : public ModulePass {
bool AFLLTOPass::runOnModule(Module &M) {
LLVMContext &C = M.getContext();
LLVMContext & C = M.getContext();
std::vector<std::string> dictionary;
std::vector<CallInst *> calls;
IntegerType *Int8Ty = IntegerType::getInt8Ty(C);
IntegerType *Int32Ty = IntegerType::getInt32Ty(C);
@ -172,6 +181,10 @@ bool AFLLTOPass::runOnModule(Module &M) {
be_quiet = 1;
if (getenv("AFL_LLVM_AUTODICTIONARY") ||
getenv("AFL_LLVM_LTO_AUTODICTIONARY"))
autodictionary = 1;
/* Get globals for the SHM region and the previous location. Note that
__afl_prev_loc is thread-local. */
@ -193,6 +206,110 @@ bool AFLLTOPass::runOnModule(Module &M) {
std::vector<BasicBlock *> InsBlocks;
if (autodictionary) {
for (auto &BB : F) {
for (auto &IN : BB) {
CallInst *callInst = nullptr;
if ((callInst = dyn_cast<CallInst>(&IN))) {
bool isStrcmp = true;
bool isMemcmp = true;
bool isStrncmp = true;
bool isStrcasecmp = true;
bool isStrncasecmp = true;
Function *Callee = callInst->getCalledFunction();
if (!Callee) continue;
if (callInst->getCallingConv() != llvm::CallingConv::C) continue;
StringRef FuncName = Callee->getName();
isStrcmp &= !FuncName.compare(StringRef("strcmp"));
isMemcmp &= !FuncName.compare(StringRef("memcmp"));
isStrncmp &= !FuncName.compare(StringRef("strncmp"));
isStrcasecmp &= !FuncName.compare(StringRef("strcasecmp"));
isStrncasecmp &= !FuncName.compare(StringRef("strncasecmp"));
if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp &&
!isStrncasecmp)
continue;
/* Verify the strcmp/memcmp/strncmp/strcasecmp/strncasecmp function
* prototype */
FunctionType *FT = Callee->getFunctionType();
isStrcmp &= FT->getNumParams() == 2 &&
FT->getReturnType()->isIntegerTy(32) &&
FT->getParamType(0) == FT->getParamType(1) &&
FT->getParamType(0) ==
IntegerType::getInt8PtrTy(M.getContext());
isStrcasecmp &= FT->getNumParams() == 2 &&
FT->getReturnType()->isIntegerTy(32) &&
FT->getParamType(0) == FT->getParamType(1) &&
FT->getParamType(0) ==
IntegerType::getInt8PtrTy(M.getContext());
isMemcmp &= FT->getNumParams() == 3 &&
FT->getReturnType()->isIntegerTy(32) &&
FT->getParamType(0)->isPointerTy() &&
FT->getParamType(1)->isPointerTy() &&
FT->getParamType(2)->isIntegerTy();
isStrncmp &= FT->getNumParams() == 3 &&
FT->getReturnType()->isIntegerTy(32) &&
FT->getParamType(0) == FT->getParamType(1) &&
FT->getParamType(0) ==
IntegerType::getInt8PtrTy(M.getContext()) &&
FT->getParamType(2)->isIntegerTy();
isStrncasecmp &= FT->getNumParams() == 3 &&
FT->getReturnType()->isIntegerTy(32) &&
FT->getParamType(0) == FT->getParamType(1) &&
FT->getParamType(0) ==
IntegerType::getInt8PtrTy(M.getContext()) &&
FT->getParamType(2)->isIntegerTy();
if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp &&
!isStrncasecmp)
continue;
/* is a str{n,}{case,}cmp/memcmp, check if we have
* str{case,}cmp(x, "const") or str{case,}cmp("const", x)
* strn{case,}cmp(x, "const", ..) or strn{case,}cmp("const", x, ..)
* memcmp(x, "const", ..) or memcmp("const", x, ..) */
Value *Str1P = callInst->getArgOperand(0),
*Str2P = callInst->getArgOperand(1);
StringRef Str1, Str2;
bool HasStr1 = getConstantStringInfo(Str1P, Str1);
bool HasStr2 = getConstantStringInfo(Str2P, Str2);
/* handle cases of one string is const, one string is variable */
if (!(HasStr1 ^ HasStr2)) continue;
if (isMemcmp || isStrncmp || isStrncasecmp) {
/* check if third operand is a constant integer
* strlen("constStr") and sizeof() are treated as constant */
Value * op2 = callInst->getArgOperand(2);
ConstantInt *ilen = dyn_cast<ConstantInt>(op2);
if (!ilen) continue;
/* final precaution: if size of compare is larger than constant
* string skip it*/
uint64_t literalLength =
HasStr1 ? GetStringLength(Str1P) : GetStringLength(Str2P);
if (literalLength < ilen->getZExtValue()) continue;
}
calls.push_back(callInst);
}
}
}
}
for (auto &BB : F) {
uint32_t succ = 0;
@ -282,32 +399,201 @@ bool AFLLTOPass::runOnModule(Module &M) {
}
}
// save highest location ID to global variable
// do this after each function to fail faster
if (afl_global_id > MAP_SIZE) {
// save highest location ID to global variable
uint32_t pow2map = 1, map = afl_global_id;
while ((map = map >> 1))
pow2map++;
FATAL(
"We have %u blocks to instrument but the map size is only %u! Edit "
"config.h and set MAP_SIZE_POW2 from %u to %u, then recompile "
"afl-fuzz and llvm_mode.",
afl_global_id, MAP_SIZE, MAP_SIZE_POW2, pow2map);
if (afl_global_id > MAP_SIZE) {
uint32_t pow2map = 1, map = afl_global_id;
while ((map = map >> 1))
pow2map++;
FATAL(
"We have %u blocks to instrument but the map size is only %u! Edit "
"config.h and set MAP_SIZE_POW2 from %u to %u, then recompile "
"afl-fuzz and llvm_mode.",
afl_global_id, MAP_SIZE, MAP_SIZE_POW2, pow2map);
}
}
if (getenv("AFL_LLVM_LTO_DONTWRITEID") == NULL) {
if (calls.size()) {
GlobalVariable *AFLFinalLoc = new GlobalVariable(
M, Int32Ty, true, GlobalValue::ExternalLinkage, 0, "__afl_final_loc", 0,
GlobalVariable::GeneralDynamicTLSModel, 0, false);
ConstantInt *const_loc = ConstantInt::get(Int32Ty, afl_global_id);
MaybeAlign Align = MaybeAlign(4);
AFLFinalLoc->setAlignment(Align);
AFLFinalLoc->setInitializer(const_loc);
for (auto &callInst : calls) {
Value *Str1P = callInst->getArgOperand(0),
*Str2P = callInst->getArgOperand(1);
StringRef Str1, Str2, ConstStr;
std::string TmpConstStr;
Value * VarStr;
bool HasStr1 = getConstantStringInfo(Str1P, Str1);
getConstantStringInfo(Str2P, Str2);
uint64_t constLen, sizedLen;
bool isMemcmp = !callInst->getCalledFunction()->getName().compare(
StringRef("memcmp"));
bool isSizedcmp = isMemcmp ||
!callInst->getCalledFunction()->getName().compare(
StringRef("strncmp")) ||
!callInst->getCalledFunction()->getName().compare(
StringRef("strncasecmp"));
if (isSizedcmp) {
Value * op2 = callInst->getArgOperand(2);
ConstantInt *ilen = dyn_cast<ConstantInt>(op2);
sizedLen = ilen->getZExtValue();
} else {
sizedLen = 0;
}
if (HasStr1) {
TmpConstStr = Str1.str();
VarStr = Str2P;
constLen = isMemcmp ? sizedLen : GetStringLength(Str1P);
} else {
TmpConstStr = Str2.str();
VarStr = Str1P;
constLen = isMemcmp ? sizedLen : GetStringLength(Str2P);
}
/* properly handle zero terminated C strings by adding the terminating 0
* to the StringRef (in comparison to std::string a StringRef has built-in
* runtime bounds checking, which makes debugging easier) */
TmpConstStr.append("\0", 1);
ConstStr = StringRef(TmpConstStr);
if (isSizedcmp && constLen > sizedLen) { constLen = sizedLen; }
/*
if (!be_quiet)
errs() << callInst->getCalledFunction()->getName() << ": len "
<< constLen << ": " << ConstStr << "\n";
*/
if (constLen && constLen < MAX_DICT_FILE)
dictionary.push_back(ConstStr.str().substr(0, constLen));
}
}
if (getenv("AFL_LLVM_LTO_DONTWRITEID") == NULL || dictionary.size()) {
// yes we could create our own function, insert it into ctors ...
// but this would be a pain in the butt ... so we use afl-llvm-rt-lto.o
Function *f = M.getFunction("__afl_auto_init_globals");
if (!f) {
fprintf(stderr,
"Error: init function could not be found (this hould not "
"happen)\n");
exit(-1);
}
BasicBlock *bb = &f->getEntryBlock();
if (!bb) {
fprintf(stderr,
"Error: init function does not have an EntryBlock (this should "
"not happen)\n");
exit(-1);
}
BasicBlock::iterator IP = bb->getFirstInsertionPt();
IRBuilder<> IRB(&(*IP));
if (getenv("AFL_LLVM_LTO_DONTWRITEID") == NULL) {
GlobalVariable *AFLFinalLoc = new GlobalVariable(
M, Int32Ty, true, GlobalValue::ExternalLinkage, 0, "__afl_final_loc",
0, GlobalVariable::GeneralDynamicTLSModel, 0, false);
ConstantInt *const_loc = ConstantInt::get(Int32Ty, (((afl_global_id + 8) >> 3) << 3));
StoreInst * StoreFinalLoc = IRB.CreateStore(const_loc, AFLFinalLoc);
StoreFinalLoc->setMetadata(M.getMDKindID("nosanitize"),
MDNode::get(C, None));
}
if (dictionary.size()) {
size_t memlen = 0, count = 0, offset = 0;
char * ptr;
for (auto token : dictionary) {
memlen += token.length();
count++;
}
if (!be_quiet) printf("AUTODICTIONARY: %lu strings found\n", count);
if (count) {
if ((ptr = (char *)malloc(memlen + count)) == NULL) {
fprintf(stderr, "Error: malloc for %lu bytes failed!\n",
memlen + count);
exit(-1);
}
for (auto token : dictionary) {
if (offset + token.length() < 0xfffff0) {
ptr[offset++] = (uint8_t)token.length();
memcpy(ptr + offset, token.c_str(), token.length());
offset += token.length();
}
}
GlobalVariable *AFLDictionaryLen = new GlobalVariable(
M, Int32Ty, false, GlobalValue::ExternalLinkage, 0,
"__afl_dictionary_len", 0, GlobalVariable::GeneralDynamicTLSModel,
0, false);
ConstantInt *const_len = ConstantInt::get(Int32Ty, offset);
StoreInst *StoreDictLen = IRB.CreateStore(const_len, AFLDictionaryLen);
StoreDictLen->setMetadata(M.getMDKindID("nosanitize"),
MDNode::get(C, None));
ArrayType *ArrayTy = ArrayType::get(IntegerType::get(C, 8), offset);
GlobalVariable *AFLInternalDictionary = new GlobalVariable(
M, ArrayTy, true, GlobalValue::ExternalLinkage,
ConstantDataArray::get(C,
*(new ArrayRef<char>((char *)ptr, offset))),
"__afl_internal_dictionary", 0,
GlobalVariable::GeneralDynamicTLSModel, 0, false);
AFLInternalDictionary->setInitializer(ConstantDataArray::get(
C, *(new ArrayRef<char>((char *)ptr, offset))));
AFLInternalDictionary->setConstant(true);
GlobalVariable *AFLDictionary = new GlobalVariable(
M, PointerType::get(Int8Ty, 0), false, GlobalValue::ExternalLinkage,
0, "__afl_dictionary");
Value *AFLDictOff = IRB.CreateGEP(AFLInternalDictionary, Zero);
Value *AFLDictPtr =
IRB.CreatePointerCast(AFLDictOff, PointerType::get(Int8Ty, 0));
StoreInst *StoreDict = IRB.CreateStore(AFLDictPtr, AFLDictionary);
StoreDict->setMetadata(M.getMDKindID("nosanitize"),
MDNode::get(C, None));
}
}
}

View File

@ -50,11 +50,7 @@
Basically, we need to make sure that the forkserver is initialized after
the LLVM-generated runtime initialization pass, not before. */
#ifdef USE_TRACE_PC
#define CONST_PRIO 5
#else
#define CONST_PRIO 0
#endif /* ^USE_TRACE_PC */
#include <sys/mman.h>
#include <fcntl.h>
@ -65,17 +61,20 @@
u8 __afl_area_initial[MAP_SIZE];
u8 *__afl_area_ptr = __afl_area_initial;
u8 *__afl_dictionary;
#ifdef __ANDROID__
PREV_LOC_T __afl_prev_loc[NGRAM_SIZE_MAX];
u32 __afl_final_loc;
u32 __afl_prev_ctx;
u32 __afl_cmp_counter;
u32 __afl_dictionary_len;
#else
__thread PREV_LOC_T __afl_prev_loc[NGRAM_SIZE_MAX];
__thread u32 __afl_final_loc;
__thread u32 __afl_prev_ctx;
__thread u32 __afl_cmp_counter;
__thread u32 __afl_dictionary_len;
#endif
struct cmp_map *__afl_cmp_map;
@ -100,6 +99,10 @@ static void __afl_map_shm(void) {
const char * shm_file_path = id_str;
int shm_fd = -1;
unsigned char *shm_base = NULL;
unsigned int map_size = MAP_SIZE
if (__afl_final_loc > 1 && __afl_final_loc < MAP_SIZE) map_size =
__afl_final_loc;
/* create the shared memory segment as if it was a file */
shm_fd = shm_open(shm_file_path, O_RDWR, 0600);
@ -111,7 +114,7 @@ static void __afl_map_shm(void) {
}
/* map the shared memory segment to the address space of the process */
shm_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
shm_base = mmap(0, map_size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0);
if (shm_base == MAP_FAILED) {
close(shm_fd);
@ -187,8 +190,15 @@ static void __afl_map_shm(void) {
#ifdef __linux__
static void __afl_start_snapshots(void) {
static u8 tmp[4];
static u8 tmp[4] = {0, 0, 0, 0};
s32 child_pid;
u32 status = 0;
u32 map_size = MAP_SIZE;
u32 already_read_first = 0;
u32 was_killed;
if (__afl_final_loc > 1 && __afl_final_loc < MAP_SIZE)
map_size = __afl_final_loc;
u8 child_stopped = 0;
@ -197,16 +207,74 @@ static void __afl_start_snapshots(void) {
/* Phone home and tell the parent that we're OK. If parent isn't there,
assume we're not running in forkserver mode and just execute program. */
status |= (FS_OPT_ENABLED | FS_OPT_SNAPSHOT);
if (map_size <= 0x1000000)
status |= (FS_OPT_SET_MAPSIZE(map_size) | FS_OPT_MAPSIZE);
if (__afl_dictionary_len > 0 && __afl_dictionary) status |= FS_OPT_AUTODICT;
memcpy(tmp, &status, 4);
if (write(FORKSRV_FD + 1, tmp, 4) != 4) return;
if (__afl_dictionary_len > 0 && __afl_dictionary) {
if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1);
if ((was_killed & (FS_OPT_ENABLED | FS_OPT_AUTODICT)) ==
(FS_OPT_ENABLED | FS_OPT_AUTODICT)) {
// great lets pass the dictionary through the forkserver FD
u32 len = __afl_dictionary_len, offset = 0;
s32 ret;
if (write(FORKSRV_FD + 1, &len, 4) != 4) {
write(2, "Error: could not send dictionary len\n",
strlen("Error: could not send dictionary len\n"));
_exit(1);
}
while (len != 0) {
ret = write(FORKSRV_FD + 1, __afl_dictionary + offset, len);
if (ret < 1) {
write(2, "Error: could not send dictionary\n",
strlen("Error: could not send dictionary\n"));
_exit(1);
}
len -= ret;
offset += ret;
}
} else {
// uh this forkserver master does not understand extended option passing
// or does not want the dictionary
already_read_first = 1;
}
}
while (1) {
u32 was_killed;
int status;
/* Wait for parent by reading from the pipe. Abort if read fails. */
if (already_read_first) {
if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1);
already_read_first = 0;
} else {
/* Wait for parent by reading from the pipe. Abort if read fails. */
if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1);
}
/* If we stopped the child in persistent mode, but there was a race
condition and afl-fuzz already issued SIGKILL, write off the old
@ -291,26 +359,92 @@ static void __afl_start_forkserver(void) {
#endif
static u8 tmp[4];
s32 child_pid;
u8 tmp[4] = {0, 0, 0, 0};
s32 child_pid;
u32 status = 0;
u32 map_size = MAP_SIZE;
u32 already_read_first = 0;
u32 was_killed;
if (__afl_final_loc > 1 && __afl_final_loc < MAP_SIZE)
map_size = __afl_final_loc;
u8 child_stopped = 0;
void (*old_sigchld_handler)(int) = 0; // = signal(SIGCHLD, SIG_DFL);
if (map_size <= 0x1000000)
status |= (FS_OPT_SET_MAPSIZE(map_size) | FS_OPT_MAPSIZE);
if (__afl_dictionary_len > 0 && __afl_dictionary) status |= FS_OPT_AUTODICT;
if (status) status |= (FS_OPT_ENABLED);
memcpy(tmp, &status, 4);
/* Phone home and tell the parent that we're OK. If parent isn't there,
assume we're not running in forkserver mode and just execute program. */
if (write(FORKSRV_FD + 1, tmp, 4) != 4) return;
if (__afl_dictionary_len > 0 && __afl_dictionary) {
if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1);
if ((was_killed & (FS_OPT_ENABLED | FS_OPT_AUTODICT)) ==
(FS_OPT_ENABLED | FS_OPT_AUTODICT)) {
// great lets pass the dictionary through the forkserver FD
u32 len = __afl_dictionary_len, offset = 0;
s32 ret;
if (write(FORKSRV_FD + 1, &len, 4) != 4) {
write(2, "Error: could not send dictionary len\n",
strlen("Error: could not send dictionary len\n"));
_exit(1);
}
while (len != 0) {
ret = write(FORKSRV_FD + 1, __afl_dictionary + offset, len);
if (ret < 1) {
write(2, "Error: could not send dictionary\n",
strlen("Error: could not send dictionary\n"));
_exit(1);
}
len -= ret;
offset += ret;
}
} else {
// uh this forkserver master does not understand extended option passing
// or does not want the dictionary
already_read_first = 1;
}
}
while (1) {
u32 was_killed;
int status;
/* Wait for parent by reading from the pipe. Abort if read fails. */
if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1);
if (already_read_first) {
already_read_first = 0;
} else {
if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1);
}
/* If we stopped the child in persistent mode, but there was a race
condition and afl-fuzz already issued SIGKILL, write off the old
@ -378,8 +512,12 @@ static void __afl_start_forkserver(void) {
int __afl_persistent_loop(unsigned int max_cnt) {
static u8 first_pass = 1;
static u32 cycle_cnt;
static u8 first_pass = 1;
static u32 cycle_cnt;
unsigned int map_size = MAP_SIZE;
if (__afl_final_loc > 1 && __afl_final_loc < MAP_SIZE)
map_size = __afl_final_loc;
if (first_pass) {
@ -390,7 +528,7 @@ int __afl_persistent_loop(unsigned int max_cnt) {
if (is_persistent) {
memset(__afl_area_ptr, 0, MAP_SIZE);
memset(__afl_area_ptr, 0, map_size);
__afl_area_ptr[0] = 1;
memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T));

View File

@ -293,7 +293,7 @@ static void print_mappings(void) {
void afl_forkserver(CPUState *cpu) {
static unsigned char tmp[4];
static unsigned char tmp[4] = {0, 0, 0, 0};
if (forkserver_installed == 1) return;
forkserver_installed = 1;

View File

@ -59,6 +59,7 @@ char *afl_environment_variables[] = {
"AFL_LD_HARD_FAIL", "AFL_LD_LIMIT_MB", "AFL_LD_NO_CALLOC_OVER",
"AFL_LD_PRELOAD", "AFL_LD_VERBOSE", "AFL_LLVM_CMPLOG", "AFL_LLVM_INSTRIM",
"AFL_LLVM_CTX", "AFL_LLVM_INSTRUMENT", "AFL_LLVM_INSTRIM_LOOPHEAD",
"AFL_LLVM_LTO_AUTODICTIONARY", "AFL_LLVM_AUTODICTIONARY",
"AFL_LLVM_INSTRIM_SKIPSINGLEBLOCK", "AFL_LLVM_LAF_SPLIT_COMPARES",
"AFL_LLVM_LAF_SPLIT_COMPARES_BITW", "AFL_LLVM_LAF_SPLIT_FLOATS",
"AFL_LLVM_LAF_SPLIT_SWITCHES", "AFL_LLVM_LAF_TRANSFORM_COMPARES",

View File

@ -69,7 +69,7 @@ void afl_fsrv_init(afl_forkserver_t *fsrv) {
fsrv->mem_limit = MEM_LIMIT;
fsrv->child_pid = -1;
fsrv->out_dir_fd = -1;
fsrv->map_size = MAP_SIZE;
fsrv->use_fauxsrv = 0;
fsrv->prev_timed_out = 0;
@ -82,7 +82,7 @@ void afl_fsrv_init(afl_forkserver_t *fsrv) {
static void afl_fauxsrv_execv(afl_forkserver_t *fsrv, char **argv) {
unsigned char tmp[4] = {0};
unsigned char tmp[4] = {0, 0, 0, 0};
pid_t child_pid = -1;
/* Phone home and tell the parent that we're OK. If parent isn't there,
@ -167,9 +167,9 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
int status;
s32 rlen;
if (fsrv->use_fauxsrv) ACTF("Using Fauxserver:");
if (!be_quiet) ACTF("Using Fauxserver:");
if (!getenv("AFL_QUIET")) ACTF("Spinning up the fork server...");
if (!be_quiet) ACTF("Spinning up the fork server...");
if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed");
@ -340,7 +340,93 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
if (rlen == 4) {
if (!getenv("AFL_QUIET")) OKF("All right - fork server is up.");
if (!be_quiet) OKF("All right - fork server is up.");
if ((status & FS_OPT_ENABLED) == FS_OPT_ENABLED) {
if (!be_quiet)
ACTF("Extended forkserver functions received (%08x).", status);
if ((status & FS_OPT_SNAPSHOT) == FS_OPT_SNAPSHOT) {
fsrv->snapshot = 1;
if (!be_quiet) ACTF("Using SNAPSHOT feature.");
}
if ((status & FS_OPT_MAPSIZE) == FS_OPT_MAPSIZE) {
fsrv->map_size = FS_OPT_GET_MAPSIZE(status);
if (fsrv->map_size % 8)
fsrv->map_size = (((fsrv->map_size + 8) >> 3) << 3);
if (!be_quiet) ACTF("Target map size: %u", fsrv->map_size);
}
if (fsrv->function_ptr == NULL || fsrv->function_opt == NULL) {
// this is not afl-fuzz - we deny and return
status = (0xffffffff ^ (FS_OPT_ENABLED | FS_OPT_AUTODICT));
if (write(fsrv->fsrv_ctl_fd, &status, 4) != 4)
FATAL("Writing to forkserver failed.");
return;
}
if ((status & FS_OPT_AUTODICT) == FS_OPT_AUTODICT) {
if (!be_quiet) ACTF("Using AUTODICT feature.");
status = (FS_OPT_ENABLED | FS_OPT_AUTODICT);
if (write(fsrv->fsrv_ctl_fd, &status, 4) != 4)
FATAL("Writing to forkserver failed.");
if (read(fsrv->fsrv_st_fd, &status, 4) != 4)
FATAL("Reading from forkserver failed.");
if (status < 2 || (u32)status > 0xffffff)
FATAL("Dictionary has an illegal size: %d", status);
u32 len = status, offset = 0, count = 0;
u8 *dict = ck_alloc(len);
if (dict == NULL)
FATAL("Could not allocate %u bytes of autodictionary memmory", len);
while (len != 0) {
rlen = read(fsrv->fsrv_st_fd, dict + offset, len);
if (rlen > 0) {
len -= rlen;
offset += rlen;
} else {
FATAL(
"Reading autodictionary fail at position %u with %u bytes "
"left.",
offset, len);
}
}
len = status;
offset = 0;
while (offset < status && (u8)dict[offset] + offset < status) {
fsrv->function_ptr(fsrv->function_opt, dict + offset + 1,
(u8)dict[offset]);
offset += (1 + dict[offset]);
count++;
}
if (!be_quiet) ACTF("Loaded %u autodictionary entries", count);
ck_free(dict);
}
}
return;
}

View File

@ -78,16 +78,17 @@ u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
u64 *current = (u64 *)afl->fsrv.trace_bits;
u64 *virgin = (u64 *)virgin_map;
u32 i = (MAP_SIZE >> 3);
u32 i = (afl->fsrv.map_size >> 3);
#else
u32 *current = (u32 *)afl->fsrv.trace_bits;
u32 *virgin = (u32 *)virgin_map;
u32 i = (MAP_SIZE >> 2);
u32 i = (afl->fsrv.map_size >> 2);
#endif /* ^WORD_SIZE_64 */
if (i == 0) i = 1;
u8 ret = 0;
@ -148,12 +149,14 @@ u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
/* Count the number of bits set in the provided bitmap. Used for the status
screen several times every second, does not have to be fast. */
u32 count_bits(u8 *mem) {
u32 count_bits(afl_state_t *afl, u8 *mem) {
u32 *ptr = (u32 *)mem;
u32 i = (MAP_SIZE >> 2);
u32 i = (afl->fsrv.map_size >> 2);
u32 ret = 0;
if (i == 0) i = 1;
while (i--) {
u32 v = *(ptr++);
@ -182,12 +185,14 @@ u32 count_bits(u8 *mem) {
mostly to update the status screen or calibrate and examine confirmed
new paths. */
u32 count_bytes(u8 *mem) {
u32 count_bytes(afl_state_t *afl, u8 *mem) {
u32 *ptr = (u32 *)mem;
u32 i = (MAP_SIZE >> 2);
u32 i = (afl->fsrv.map_size >> 2);
u32 ret = 0;
if (i == 0) i = 1;
while (i--) {
u32 v = *(ptr++);
@ -207,12 +212,14 @@ u32 count_bytes(u8 *mem) {
/* Count the number of non-255 bytes set in the bitmap. Used strictly for the
status screen, several calls per second or so. */
u32 count_non_255_bytes(u8 *mem) {
u32 count_non_255_bytes(afl_state_t *afl, u8 *mem) {
u32 *ptr = (u32 *)mem;
u32 i = (MAP_SIZE >> 2);
u32 i = (afl->fsrv.map_size >> 2);
u32 ret = 0;
if (i == 0) i = 1;
while (i--) {
u32 v = *(ptr++);
@ -245,9 +252,11 @@ const u8 simplify_lookup[256] = {
#ifdef WORD_SIZE_64
void simplify_trace(u64 *mem) {
void simplify_trace(afl_state_t *afl, u64 *mem) {
u32 i = MAP_SIZE >> 3;
u32 i = (afl->fsrv.map_size >> 3);
if (i == 0) i = 1;
while (i--) {
@ -278,9 +287,11 @@ void simplify_trace(u64 *mem) {
#else
void simplify_trace(u32 *mem) {
void simplify_trace(afl_state_t *afl, u32 *mem) {
u32 i = MAP_SIZE >> 2;
u32 i = (afl->fsrv.map_size >> 2);
if (i == 0) i = 1;
while (i--) {
@ -340,9 +351,11 @@ void init_count_class16(void) {
#ifdef WORD_SIZE_64
void classify_counts(u64 *mem) {
void classify_counts(afl_state_t *afl, u64 *mem) {
u32 i = MAP_SIZE >> 3;
u32 i = (afl->fsrv.map_size >> 3);
if (i == 0) i = 1;
while (i--) {
@ -367,9 +380,11 @@ void classify_counts(u64 *mem) {
#else
void classify_counts(u32 *mem) {
void classify_counts(afl_state_t *afl, u32 *mem) {
u32 i = MAP_SIZE >> 2;
u32 i = (afl->fsrv.map_size >> 2);
if (i == 0) i = 1;
while (i--) {
@ -396,11 +411,11 @@ void classify_counts(u32 *mem) {
count information here. This is called only sporadically, for some
new paths. */
void minimize_bits(u8 *dst, u8 *src) {
void minimize_bits(afl_state_t *afl, u8 *dst, u8 *src) {
u32 i = 0;
while (i < MAP_SIZE) {
while (i < afl->fsrv.map_size) {
if (*(src++)) dst[i >> 3] |= 1 << (i & 7);
++i;
@ -527,7 +542,7 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
u8 fn[PATH_MAX];
/* Update path frequency. */
u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
struct queue_entry *q = afl->queue;
while (q) {
@ -611,9 +626,9 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
if (likely(!afl->dumb_mode)) {
#ifdef WORD_SIZE_64
simplify_trace((u64 *)afl->fsrv.trace_bits);
simplify_trace(afl, (u64 *)afl->fsrv.trace_bits);
#else
simplify_trace((u32 *)afl->fsrv.trace_bits);
simplify_trace(afl, (u32 *)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
if (!has_new_bits(afl, afl->virgin_tmout)) return keeping;
@ -675,9 +690,9 @@ u8 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
if (likely(!afl->dumb_mode)) {
#ifdef WORD_SIZE_64
simplify_trace((u64 *)afl->fsrv.trace_bits);
simplify_trace(afl, (u64 *)afl->fsrv.trace_bits);
#else
simplify_trace((u32 *)afl->fsrv.trace_bits);
simplify_trace(afl, (u32 *)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
if (!has_new_bits(afl, afl->virgin_crash)) return keeping;

View File

@ -389,7 +389,7 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
must prevent any earlier operations from venturing into that
territory. */
memset(afl->fsrv.trace_bits, 0, MAP_SIZE);
memset(afl->fsrv.trace_bits, 0, afl->fsrv.map_size);
MEM_BARRIER();
/* Since we always have a forkserver (or a fauxserver) running, we can simply
@ -469,9 +469,9 @@ u8 run_cmplog_target(afl_state_t *afl, u32 timeout) {
tb4 = *(u32 *)afl->fsrv.trace_bits;
#ifdef WORD_SIZE_64
classify_counts((u64 *)afl->fsrv.trace_bits);
classify_counts(afl, (u64 *)afl->fsrv.trace_bits);
#else
classify_counts((u32 *)afl->fsrv.trace_bits);
classify_counts(afl, (u32 *)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
afl->cmplog_prev_timed_out = afl->fsrv.child_timed_out;

View File

@ -305,10 +305,14 @@ static inline u8 memcmp_nocase(u8 *m1, u8 *m2, u32 len) {
}
/* Maybe add automatic extra. */
/* Ugly hack: afl state is transfered as u8* because we import data via
afl-forkserver.c - which is shared with other afl tools that do not
have the afl state struct */
void maybe_add_auto(afl_state_t *afl, u8 *mem, u32 len) {
void maybe_add_auto(void *afl_tmp, u8 *mem, u32 len) {
u32 i;
afl_state_t *afl = (afl_state_t *)afl_tmp;
u32 i;
/* Allow users to specify that they don't want auto dictionaries. */
@ -469,7 +473,7 @@ void load_auto(afl_state_t *afl) {
if (len < 0) PFATAL("Unable to read from '%s'", fn);
if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA)
maybe_add_auto(afl, tmp, len);
maybe_add_auto((u8 *)afl, tmp, len);
close(fd);
ck_free(fn);

View File

@ -448,11 +448,13 @@ static void check_map_coverage(afl_state_t *afl) {
u32 i;
if (count_bytes(afl->fsrv.trace_bits) < 100) return;
if (count_bytes(afl, afl->fsrv.trace_bits) < 100) return;
for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; ++i)
if (afl->fsrv.trace_bits[i]) return;
if (afl->fsrv.map_size != MAP_SIZE) return;
WARNF("Recompile binary with newer version of afl to improve coverage!");
}

View File

@ -244,7 +244,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
if (afl->stop_soon || fault == FAULT_ERROR) { goto abort_trimming; }
cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (cksum == q->exec_cksum) {
@ -257,7 +257,8 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
if (!needs_write) {
needs_write = 1;
memcpy(afl->clean_trace_custom, afl->fsrv.trace_bits, MAP_SIZE);
memcpy(afl->clean_trace_custom, afl->fsrv.trace_bits,
afl->fsrv.map_size);
}
@ -307,7 +308,7 @@ u8 trim_case_custom(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
ck_write(fd, in_buf, q->len, q->fname);
close(fd);
memcpy(afl->fsrv.trace_bits, afl->clean_trace_custom, MAP_SIZE);
memcpy(afl->fsrv.trace_bits, afl->clean_trace_custom, afl->fsrv.map_size);
update_bitmap_score(afl, q);
}

View File

@ -601,7 +601,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
if (!afl->dumb_mode && (afl->stage_cur & 7) == 7) {
u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
@ -613,7 +613,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
++a_len;
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
maybe_add_auto(afl, a_collect, a_len);
maybe_add_auto((u8 *)afl, a_collect, a_len);
} else if (cksum != prev_cksum) {
@ -621,7 +621,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
worthwhile queued up, and collect that if the answer is yes. */
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
maybe_add_auto(afl, a_collect, a_len);
maybe_add_auto((u8 *)afl, a_collect, a_len);
a_len = 0;
prev_cksum = cksum;
@ -761,7 +761,7 @@ u8 fuzz_one_original(afl_state_t *afl) {
without wasting time on checksums. */
if (!afl->dumb_mode && len >= EFF_MIN_LEN)
cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
else
cksum = ~afl->queue_cur->exec_cksum;
@ -2615,7 +2615,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
if (!afl->dumb_mode && (afl->stage_cur & 7) == 7) {
u32 cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
u32 cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (afl->stage_cur == afl->stage_max - 1 && cksum == prev_cksum) {
@ -2627,7 +2627,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
++a_len;
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
maybe_add_auto(afl, a_collect, a_len);
maybe_add_auto((u8 *)afl, a_collect, a_len);
} else if (cksum != prev_cksum) {
@ -2635,7 +2635,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
worthwhile queued up, and collect that if the answer is yes. */
if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA)
maybe_add_auto(afl, a_collect, a_len);
maybe_add_auto((u8 *)afl, a_collect, a_len);
a_len = 0;
prev_cksum = cksum;
@ -2775,7 +2775,7 @@ u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) {
without wasting time on checksums. */
if (!afl->dumb_mode && len >= EFF_MIN_LEN)
cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
else
cksum = ~afl->queue_cur->exec_cksum;

View File

@ -195,7 +195,7 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
/* For every byte set in afl->fsrv.trace_bits[], see if there is a previous
winner, and how it compares to us. */
for (i = 0; i < MAP_SIZE; ++i)
for (i = 0; i < afl->fsrv.map_size; ++i)
if (afl->fsrv.trace_bits[i]) {
@ -248,8 +248,10 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
if (!q->trace_mini) {
q->trace_mini = ck_alloc(MAP_SIZE >> 3);
minimize_bits(q->trace_mini, afl->fsrv.trace_bits);
u32 len = (afl->fsrv.map_size >> 3);
if (len == 0) len = 1;
q->trace_mini = ck_alloc(len);
minimize_bits(afl, q->trace_mini, afl->fsrv.trace_bits);
}
@ -268,14 +270,17 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) {
void cull_queue(afl_state_t *afl) {
struct queue_entry *q;
u8 temp_v[MAP_SIZE >> 3];
u32 len = (afl->fsrv.map_size >> 3);
u32 i;
u8 temp_v[MAP_SIZE >> 3];
if (len == 0) len = 1;
if (afl->dumb_mode || !afl->score_changed) return;
afl->score_changed = 0;
memset(temp_v, 255, MAP_SIZE >> 3);
memset(temp_v, 255, len);
afl->queued_favored = 0;
afl->pending_favored = 0;
@ -292,10 +297,10 @@ void cull_queue(afl_state_t *afl) {
/* Let's see if anything in the bitmap isn't captured in temp_v.
If yes, and if it has a afl->top_rated[] contender, let's use it. */
for (i = 0; i < MAP_SIZE; ++i)
for (i = 0; i < afl->fsrv.map_size; ++i)
if (afl->top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) {
u32 j = MAP_SIZE >> 3;
u32 j = len;
/* Remove all bits belonging to the current entry from temp_v. */

View File

@ -88,7 +88,7 @@ static u8 get_exec_checksum(afl_state_t *afl, u8 *buf, u32 len, u32 *cksum) {
if (unlikely(common_fuzz_stuff(afl, buf, len))) return 1;
*cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
*cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
return 0;
}
@ -332,7 +332,7 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
}
maybe_add_auto(afl, (u8 *)&v, shape);
maybe_add_auto((u8 *)afl, (u8 *)&v, shape);
u64 rev;
switch (shape) {
@ -340,15 +340,15 @@ static void try_to_add_to_dict(afl_state_t *afl, u64 v, u8 shape) {
case 1: break;
case 2:
rev = SWAP16((u16)v);
maybe_add_auto(afl, (u8 *)&rev, shape);
maybe_add_auto((u8 *)afl, (u8 *)&rev, shape);
break;
case 4:
rev = SWAP32((u32)v);
maybe_add_auto(afl, (u8 *)&rev, shape);
maybe_add_auto((u8 *)afl, (u8 *)&rev, shape);
break;
case 8:
rev = SWAP64(v);
maybe_add_auto(afl, (u8 *)&rev, shape);
maybe_add_auto((u8 *)afl, (u8 *)&rev, shape);
break;
}
@ -486,8 +486,8 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u32 len) {
// If failed, add to dictionary
if (fails == 8) {
maybe_add_auto(afl, o->v0, SHAPE_BYTES(h->shape));
maybe_add_auto(afl, o->v1, SHAPE_BYTES(h->shape));
maybe_add_auto((u8 *)afl, o->v0, SHAPE_BYTES(h->shape));
maybe_add_auto((u8 *)afl, o->v1, SHAPE_BYTES(h->shape));
}

View File

@ -44,7 +44,7 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
must prevent any earlier operations from venturing into that
territory. */
memset(afl->fsrv.trace_bits, 0, MAP_SIZE);
memset(afl->fsrv.trace_bits, 0, afl->fsrv.map_size);
MEM_BARRIER();
@ -122,9 +122,9 @@ u8 run_target(afl_state_t *afl, u32 timeout) {
tb4 = *(u32 *)afl->fsrv.trace_bits;
#ifdef WORD_SIZE_64
classify_counts((u64 *)afl->fsrv.trace_bits);
classify_counts(afl, (u64 *)afl->fsrv.trace_bits);
#else
classify_counts((u32 *)afl->fsrv.trace_bits);
classify_counts(afl, (u32 *)afl->fsrv.trace_bits);
#endif /* ^WORD_SIZE_64 */
afl->fsrv.prev_timed_out = afl->fsrv.child_timed_out;
@ -315,7 +315,8 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
afl->shm.cmplog_mode)
init_cmplog_forkserver(afl);
if (q->exec_cksum) memcpy(afl->first_trace, afl->fsrv.trace_bits, MAP_SIZE);
if (q->exec_cksum)
memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
start_us = get_cur_time_us();
@ -336,14 +337,14 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
if (afl->stop_soon || fault != afl->crash_mode) goto abort_calibration;
if (!afl->dumb_mode && !afl->stage_cur &&
!count_bytes(afl->fsrv.trace_bits)) {
!count_bytes(afl, afl->fsrv.trace_bits)) {
fault = FAULT_NOINST;
goto abort_calibration;
}
cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
if (q->exec_cksum != cksum) {
@ -354,7 +355,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
u32 i;
for (i = 0; i < MAP_SIZE; ++i) {
for (i = 0; i < afl->fsrv.map_size; ++i) {
if (unlikely(!afl->var_bytes[i]) &&
unlikely(afl->first_trace[i] != afl->fsrv.trace_bits[i]))
@ -368,7 +369,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
} else {
q->exec_cksum = cksum;
memcpy(afl->first_trace, afl->fsrv.trace_bits, MAP_SIZE);
memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
}
@ -385,7 +386,7 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
This is used for fuzzing air time calculations in calculate_score(). */
q->exec_us = (stop_us - start_us) / afl->stage_max;
q->bitmap_size = count_bytes(afl->fsrv.trace_bits);
q->bitmap_size = count_bytes(afl, afl->fsrv.trace_bits);
q->handicap = handicap;
q->cal_failed = 0;
@ -413,7 +414,7 @@ abort_calibration:
if (var_detected) {
afl->var_byte_count = count_bytes(afl->var_bytes);
afl->var_byte_count = count_bytes(afl, afl->var_bytes);
if (!q->var_behavior) {
@ -640,7 +641,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
/* Note that we don't keep track of crashes or hangs here; maybe TODO?
*/
cksum = hash32(afl->fsrv.trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
/* If the deletion had no impact on the trace, make it permanent. This
isn't perfect for variable-path inputs, but we're just making a
@ -663,7 +664,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
if (!needs_write) {
needs_write = 1;
memcpy(afl->clean_trace, afl->fsrv.trace_bits, MAP_SIZE);
memcpy(afl->clean_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
}
@ -705,7 +706,7 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
ck_write(fd, in_buf, q->len, q->fname);
close(fd);
memcpy(afl->fsrv.trace_bits, afl->clean_trace, MAP_SIZE);
memcpy(afl->fsrv.trace_bits, afl->clean_trace, afl->fsrv.map_size);
update_bitmap_score(afl, q);
}

View File

@ -99,6 +99,10 @@ void afl_state_init(afl_state_t *afl) {
afl->fsrv.use_stdin = 1;
afl->fsrv.map_size = MAP_SIZE;
afl->fsrv.function_opt = (u8 *)afl;
afl->fsrv.function_ptr = &maybe_add_auto;
afl->cal_cycles = CAL_CYCLES;
afl->cal_cycles_long = CAL_CYCLES_LONG;

View File

@ -37,7 +37,7 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
u8 fn[PATH_MAX];
s32 fd;
FILE * f;
uint32_t t_bytes = count_non_255_bytes(afl->virgin_bits);
uint32_t t_bytes = count_non_255_bytes(afl, afl->virgin_bits);
snprintf(fn, PATH_MAX, "%s/fuzzer_stats", afl->out_dir);
@ -258,8 +258,8 @@ void show_stats(afl_state_t *afl) {
/* Do some bitmap stats. */
t_bytes = count_non_255_bytes(afl->virgin_bits);
t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
t_bytes = count_non_255_bytes(afl, afl->virgin_bits);
t_byte_ratio = ((double)t_bytes * 100) / afl->fsrv.map_size;
if (likely(t_bytes) && unlikely(afl->var_byte_count))
stab_ratio = 100 - (((double)afl->var_byte_count * 100) / t_bytes);
@ -305,7 +305,7 @@ void show_stats(afl_state_t *afl) {
/* Compute some mildly useful bitmap stats. */
t_bits = (MAP_SIZE << 3) - count_bits(afl->virgin_bits);
t_bits = (afl->fsrv.map_size << 3) - count_bits(afl, afl->virgin_bits);
/* Now, for the visuals... */
@ -465,7 +465,8 @@ void show_stats(afl_state_t *afl) {
SAYF(bV bSTOP " now processing : " cRST "%-16s " bSTG bV bSTOP, tmp);
sprintf(tmp, "%0.02f%% / %0.02f%%",
((double)afl->queue_cur->bitmap_size) * 100 / MAP_SIZE, t_byte_ratio);
((double)afl->queue_cur->bitmap_size) * 100 / afl->fsrv.map_size,
t_byte_ratio);
SAYF(" map density : %s%-21s" bSTG bV "\n",
t_byte_ratio > 70 ? cLRD

View File

@ -258,7 +258,7 @@ static u8 run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len,
fsrv->child_timed_out = 0;
memset(fsrv->trace_bits, 0, MAP_SIZE);
memset(fsrv->trace_bits, 0, fsrv->map_size);
MEM_BARRIER();
write_to_testcase(fsrv, mem, len);
@ -393,7 +393,7 @@ static u8 run_target(afl_forkserver_t *fsrv, char **argv, u8 *mem, u32 len,
}
cksum = hash32(fsrv->trace_bits, MAP_SIZE, HASH_CONST);
cksum = hash32(fsrv->trace_bits, fsrv->map_size, HASH_CONST);
if (first_run) orig_cksum = cksum;