Compare commits

..

42 Commits

Author SHA1 Message Date
5a2688c213 fi 2024-06-05 10:34:24 +02:00
c31817863b ensure model.bin 2024-06-05 10:12:45 +02:00
54684728a1 fix 2024-06-05 10:00:46 +02:00
4c8e473376 get libxgboost 2024-06-05 09:50:53 +02:00
f1d829c7ca xgbooster 2024-06-04 16:15:39 +02:00
c900a8e30c model.bin 2024-06-04 13:45:25 +02:00
fd82e3330c add model 2024-06-04 13:45:17 +02:00
1db82f3303 update grammar mutator 2024-06-03 09:23:43 +02:00
2d4a4ba73f fix afl-showmap 2024-06-01 16:55:56 +02:00
ca55858aa7 Merge pull request #2107 from AFLplusplus/reg
fix regression
2024-06-01 16:34:50 +02:00
e639521b01 changelog 2024-06-01 16:34:23 +02:00
894339c5d7 try regression fix 2024-06-01 12:26:26 +02:00
e13dc9b7e6 todo 2024-06-01 12:17:53 +02:00
9419e39fdf nits 2024-05-31 18:32:31 +02:00
a3125c38f4 fix afl-showmap shmmemleak 2024-05-29 12:55:28 +02:00
224add0222 update unicorn 2024-05-28 11:12:33 +02:00
19636f748c Unicornafl: Fix incorrect comment (#2103) 2024-05-28 02:24:43 +02:00
7aa5e1c443 Merge pull request #2104 from Evian-Zhang/fix-unicorn-lldb-dumper
Make lldb dumper of unicorn_mode work in modern LLDB
2024-05-27 21:18:28 +02:00
93279db71b Make lldb dumper of unicorn_mode work in modern LLDB 2024-05-27 19:15:35 +08:00
5bf760510e Merge pull request #2102 from ndrewh/testcache-fix
fix: testcache hangs for large test cases
2024-05-26 08:12:29 +02:00
03dc80afc4 fix: testcache hangs for large test cases 2024-05-25 23:44:57 +00:00
fda3106fd9 Merge pull request #2099 from Atlante45/dev
Fix dynamic_lookup linker flag for Apple clang
2024-05-24 01:33:55 +02:00
9721a77204 Fix dynamic_lookup linker flag for Apple clang 2024-05-23 11:14:53 -07:00
92a8c2804f fix the fix for symcc 2024-05-21 16:31:42 +02:00
e1521fa8eb fix symcc custom mutator 2024-05-21 11:04:25 +02:00
4e3cd8ac3f nit 2024-05-20 14:02:22 +02:00
31a8beb449 support new llvm 19 changes 2024-05-19 13:47:53 +02:00
e7d871c8bf Merge pull request #2093 from AFLplusplus/dev
push to stable
2024-05-17 23:55:55 +02:00
56d5aa3101 log 2024-05-17 23:55:43 +02:00
c6a2a4046e Merge pull request #2092 from fbeqv/dev
Fix runtime underflow & -V exiting before syncing
2024-05-17 23:41:39 +02:00
6dd5e931fc Fix runtime underflow & -V exiting before syncing
print_stats sets exit_soon even while syncing, this leaves -V 0 still broken, as we don't finish syncing.

Additionally, the change that introduced the previous -V fix also broke the runtime tracking, as runtime needs to include all time including sync, splice etc. This caused an underflow in the reported runtime.
2024-05-17 14:33:32 -07:00
635140ba43 help qemu build for some linux platforms 2024-05-17 09:45:56 +02:00
497f341eac Revert "no weights"
This reverts commit 068aa13c6b.
2024-05-16 14:27:33 +02:00
068aa13c6b no weights 2024-05-16 14:27:04 +02:00
ba7ae6c59d nits 2024-05-16 14:21:00 +02:00
6ae95271be nits 2024-05-16 09:17:59 +02:00
a2e0163cc1 Merge pull request #2091 from bet4it/collect_coverage
Fix bug of `afl-showmap` in `collect_coverage` mode
2024-05-16 09:14:15 +02:00
a26bb0b0f2 Merge pull request #2090 from AFLplusplus/dev
push to stable
2024-05-14 13:18:22 +02:00
0cf78b7748 Fix bug of afl-showmap in collect_coverage mode 2024-05-14 17:17:58 +08:00
90fbf59bf1 Merge pull request #2084 from AFLplusplus/dev
push to stable
2024-05-11 09:16:21 +02:00
ad0d0c77fb Merge pull request #2071 from AFLplusplus/dev
Push to stable
2024-04-26 16:17:41 +02:00
1d17210d9f Merge pull request #2052 from AFLplusplus/dev
4.20 release pre-PR
2024-04-13 11:50:49 +02:00
30 changed files with 227 additions and 738 deletions

4
.gitmodules vendored
View File

@ -25,3 +25,7 @@
[submodule "nyx_mode/QEMU-Nyx"]
path = nyx_mode/QEMU-Nyx
url = https://github.com/nyx-fuzz/QEMU-Nyx
[submodule "xgboost"]
path = xgboost
url = https://github.com/dmlc/xgboost
branch = 742c19f

View File

@ -471,8 +471,13 @@ src/afl-forkserver.o : $(COMM_HDR) src/afl-forkserver.c include/forkserver.h
src/afl-sharedmem.o : $(COMM_HDR) src/afl-sharedmem.c include/sharedmem.h
$(CC) $(CFLAGS) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) -c src/afl-sharedmem.c -o src/afl-sharedmem.o
afl-fuzz: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o src/hashmap.c | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) -Wno-shift-count-overflow $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o src/hashmap.c -o $@ $(PYFLAGS) $(LDFLAGS) -lm
libxgboost.so:
git submodule init
git submodule update --recursive
mkdir -p xgboost/build && cd xgboost && git submodule init && git submodule update --recursive && cd build && cmake -DUSE_OPENMP=OFF -DHIDE_CXX_SYMBOLS=ON .. && make && cp -v ../lib/libxgboost.so ../..
afl-fuzz: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o src/hashmap.c libxgboost.so | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) -Wno-shift-count-overflow $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o src/hashmap.c -o $@ $(PYFLAGS) $(LDFLAGS) -I./xgboost/include -lm -L. -lxgboost
afl-showmap: src/afl-showmap.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o $(COMM_HDR) | test_x86
$(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) src/$@.c src/afl-fuzz-mutators.c src/afl-fuzz-python.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(PYFLAGS) $(LDFLAGS)

View File

@ -17,6 +17,7 @@
## Should
- afl-crash-analysis
- cmplog: add loop count resolving (byte -> loop cnt change, calc special values)
- support persistent and deferred fork server in afl-showmap?
- better autodetection of shifting runtime timeout values
- afl-plot to support multiple plot_data

View File

@ -1 +1 @@
5ed4f8d
95a6857

View File

@ -22,10 +22,10 @@ afl_state_t *afl_struct;
typedef struct my_mutator {
afl_state_t *afl;
u8 * mutator_buf;
u8 * out_dir;
u8 * tmp_dir;
u8 * target;
u8 *mutator_buf;
u8 *out_dir;
u8 *tmp_dir;
u8 *target;
uint32_t seed;
} my_mutator_t;
@ -101,7 +101,7 @@ my_mutator_t *afl_custom_init(afl_state_t *afl, unsigned int seed) {
/* When a new queue entry is added we run this input with the symcc
instrumented binary */
uint8_t afl_custom_queue_new_entry(my_mutator_t * data,
uint8_t afl_custom_queue_new_entry(my_mutator_t *data,
const uint8_t *filename_new_queue,
const uint8_t *filename_orig_queue) {
@ -176,7 +176,7 @@ uint8_t afl_custom_queue_new_entry(my_mutator_t * data,
struct dirent **nl;
int32_t items = scandir(data->tmp_dir, &nl, NULL, NULL);
u8 * origin_name = basename(filename_new_queue);
u8 *origin_name = basename(filename_new_queue);
int32_t i;
if (items > 0) {
@ -187,8 +187,8 @@ uint8_t afl_custom_queue_new_entry(my_mutator_t * data,
DBG("test=%s\n", fn);
if (stat(source_name, &st) == 0 && S_ISREG(st.st_mode) && st.st_size) {
u8 *destination_name =
alloc_printf("%s/%s.%s", data->out_dir, origin_name, nl[i]->d_name);
u8 *destination_name = alloc_printf("%s/%s.%s", data->out_dir,
origin_name, nl[i]->d_name);
rename(source_name, destination_name);
ck_free(destination_name);
DBG("found=%s\n", source_name);
@ -248,7 +248,7 @@ uint32_t afl_custom_fuzz_count(my_mutator_t *data, const u8 *buf,
for (i = 0; i < (u32)items; ++i) {
struct stat st;
u8 * fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name);
u8 *fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name);
DBG("test=%s\n", fn);
if (stat(fn, &st) == 0 && S_ISREG(st.st_mode) && st.st_size) {
@ -282,12 +282,12 @@ size_t afl_custom_fuzz(my_mutator_t *data, uint8_t *buf, size_t buf_size,
if (items <= 0) return 0;
for (i = 0; i < (u32)items; ++i) {
for (i = 0; i < (s32)items; ++i) {
struct stat st;
u8 * fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name);
if (!done) {
if (done == 0) {
struct stat st;
u8 *fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name);
if (stat(fn, &st) == 0 && S_ISREG(st.st_mode) && st.st_size) {
@ -306,10 +306,10 @@ size_t afl_custom_fuzz(my_mutator_t *data, uint8_t *buf, size_t buf_size,
}
unlink(fn);
ck_free(fn);
}
ck_free(fn);
free(nl[i]);
}

View File

@ -5,21 +5,33 @@
### Version ++4.21a (dev)
* afl-fuzz
- fixed a regression in afl-fuzz that resulted in a 5-10% performace loss
do a switch from gettimeofday() to clock_gettime() which should be rather
three times faster. The reason for this is unknown.
- added AFL_DISABLE_REDUNDANT for huge queues
- fix AFL_PERSISTENT_RECORD
- run custom_post_process after standard trimming
- prevent filenames in the queue that have spaces
- minor fix for FAST schedules
- more frequent stats update when syncing (todo: check performance impact)
- now timing of calibration, trimming and syncing is measured seperately,
thanks to @eqv!
- -V timing is now accurately the fuzz time (without syncing), before
long calibration times and syncing could result in now fuzzing being
made when the time was already run out until then, thanks to @eqv!
* afl-cc:
- re-enable i386 support that was accidently disabled
- fixes for LTO and outdated afl-gcc mode
- fixes for LTO and outdated afl-gcc mode for i386
- fix COMPCOV split compare for old LLVMs
- disable xml/curl/g_ string transform functions because we do not check
for null pointers ... TODO
- ensure shared memory variables are visible in weird build setups
- compatability to new LLVM 19 changes
* afl-cmin
- work with input files that have a space
* afl-showmap
- fix memory leak on shmem testcase usage (thanks to @ndrewh)
- minor fix to collect coverage -C (thanks to @bet4it)
* enhanced the ASAN configuration
@ -56,12 +68,13 @@
- afl-whatsup:
- now also displays current average speed
- small bugfixes
- Fixes for aflpp custom mutator and standalone tool
- custom mutators:
- fixes for aflpp custom mutator and standalone tool
- important fix to the symcc custom mutator
- Minor edits to afl-persistent-config
- Prevent temporary files being left behind on aborted afl-whatsup
- More CPU benchmarks added to benchmark/
### Version ++4.10c (release)
- afl-fuzz:
- default power schedule is now EXPLORE, due a fix in fast schedules

View File

@ -139,6 +139,10 @@
#define AFL_RAND_RETURN u32
#endif
#ifndef INTERESTING_32_LEN
#error INTERESTING_32_LEN not defined - BUG!
#endif
extern s8 interesting_8[INTERESTING_8_LEN];
extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
extern s32
@ -200,8 +204,6 @@ struct queue_entry {
u8 *fname; /* File name for the test case */
u32 len; /* Input length */
u32 id; /* entry number in queue_buf */
u32 found;
s32 cmp, fcmp, rtn;
u8 colorized, /* Do not run redqueen stage again */
cal_failed; /* Calibration failed? */
@ -253,9 +255,6 @@ struct queue_entry {
struct skipdet_entry *skipdet_e;
u32 score; /* complexity/vulnerability score */
u64 total_execs; /* total executes of this item */
};
struct extra_data {
@ -837,9 +836,6 @@ typedef struct afl_state {
/* How often did we evict from the cache (for statistics only) */
u32 q_testcase_evictions;
/* current complexity/vulnerability score received */
u32 current_score;
/* Refs to each queue entry with cached testcase (for eviction, if cache_count
* is too large) */
struct queue_entry **q_testcase_cache;

View File

@ -33,6 +33,10 @@
#define MUT_STRATEGY_ARRAY_SIZE 256
#ifndef INTERESTING_32
#error INTERESTING_32 is not defined - BUG!
#endif
s8 interesting_8[] = {INTERESTING_8};
s16 interesting_16[] = {INTERESTING_8, INTERESTING_16};
s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};

View File

@ -21,20 +21,18 @@ static char *afl_environment_variables[] = {
"AFL_BENCH_UNTIL_CRASH", "AFL_CAL_FAST", "AFL_CC", "AFL_CC_COMPILER",
"AFL_CMIN_ALLOW_ANY", "AFL_CMIN_CRASHES_ONLY", "AFL_CMPLOG_ONLY_NEW",
"AFL_CODE_END", "AFL_CODE_START", "AFL_COMPCOV_BINNAME",
"AFL_DUMP_QUEUE_ON_EXIT", "AFL_DUMP_CYCLOMATIC_COMPLEXITY",
"AFL_DUMP_VULNERABILITY_COMPLEXITY", "AFL_CMPLOG_MAX_LEN",
"AFL_COMPCOV_LEVEL", "AFL_CRASH_EXITCODE",
"AFL_CRASHING_SEEDS_AS_NEW_CRASH", "AFL_CUSTOM_MUTATOR_LIBRARY",
"AFL_CUSTOM_MUTATOR_ONLY", "AFL_CUSTOM_INFO_PROGRAM",
"AFL_CUSTOM_INFO_PROGRAM_ARGV", "AFL_CUSTOM_INFO_PROGRAM_INPUT",
"AFL_CUSTOM_INFO_OUT", "AFL_CXX", "AFL_CYCLE_SCHEDULES", "AFL_DEBUG",
"AFL_DEBUG_CHILD", "AFL_DEBUG_GDB", "AFL_DEBUG_UNICORN",
"AFL_DISABLE_REDUNDANT", "AFL_NO_REDUNDANT", "AFL_DISABLE_TRIM",
"AFL_NO_TRIM", "AFL_DISABLE_LLVM_INSTRUMENTATION", "AFL_DONT_OPTIMIZE",
"AFL_DRIVER_STDERR_DUPLICATE_FILENAME", "AFL_DUMB_FORKSRV",
"AFL_EARLY_FORKSERVER", "AFL_ENTRYPOINT", "AFL_EXIT_WHEN_DONE",
"AFL_EXIT_ON_TIME", "AFL_EXIT_ON_SEED_ISSUES", "AFL_FAST_CAL",
"AFL_FINAL_SYNC", "AFL_FORCE_UI", "AFL_FRIDA_DEBUG_MAPS",
"AFL_DUMP_CYCLOMATIC_COMPLEXITY", "AFL_CMPLOG_MAX_LEN", "AFL_COMPCOV_LEVEL",
"AFL_CRASH_EXITCODE", "AFL_CRASHING_SEEDS_AS_NEW_CRASH",
"AFL_CUSTOM_MUTATOR_LIBRARY", "AFL_CUSTOM_MUTATOR_ONLY",
"AFL_CUSTOM_INFO_PROGRAM", "AFL_CUSTOM_INFO_PROGRAM_ARGV",
"AFL_CUSTOM_INFO_PROGRAM_INPUT", "AFL_CUSTOM_INFO_OUT", "AFL_CXX",
"AFL_CYCLE_SCHEDULES", "AFL_DEBUG", "AFL_DEBUG_CHILD", "AFL_DEBUG_GDB",
"AFL_DEBUG_UNICORN", "AFL_DISABLE_REDUNDANT", "AFL_NO_REDUNDANT",
"AFL_DISABLE_TRIM", "AFL_NO_TRIM", "AFL_DISABLE_LLVM_INSTRUMENTATION",
"AFL_DONT_OPTIMIZE", "AFL_DRIVER_STDERR_DUPLICATE_FILENAME",
"AFL_DUMB_FORKSRV", "AFL_EARLY_FORKSERVER", "AFL_ENTRYPOINT",
"AFL_EXIT_WHEN_DONE", "AFL_EXIT_ON_TIME", "AFL_EXIT_ON_SEED_ISSUES",
"AFL_FAST_CAL", "AFL_FINAL_SYNC", "AFL_FORCE_UI", "AFL_FRIDA_DEBUG_MAPS",
"AFL_FRIDA_DRIVER_NO_HOOK", "AFL_FRIDA_EXCLUDE_RANGES",
"AFL_FRIDA_INST_CACHE_SIZE", "AFL_FRIDA_INST_COVERAGE_ABSOLUTE",
"AFL_FRIDA_INST_COVERAGE_FILE", "AFL_FRIDA_INST_DEBUG_FILE",

View File

@ -60,8 +60,6 @@
#include "llvm/Passes/PassPlugin.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "config.h"
#include "debug.h"
@ -174,7 +172,6 @@ SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) {
}
using LoopInfoCallback = function_ref<const LoopInfo *(Function &F)>;
using DomTreeCallback = function_ref<const DominatorTree *(Function &F)>;
using PostDomTreeCallback =
function_ref<const PostDominatorTree *(Function &F)>;
@ -190,15 +187,13 @@ class ModuleSanitizerCoverageLTO
}
bool instrumentModule(Module &M, DomTreeCallback DTCallback,
PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback);
PostDomTreeCallback PDTCallback);
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
private:
void instrumentFunction(Function &F, DomTreeCallback DTCallback,
PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback);
PostDomTreeCallback PDTCallback);
/* void InjectCoverageForIndirectCalls(Function &F,
ArrayRef<Instruction *>
IndirCalls);*/
@ -255,7 +250,6 @@ class ModuleSanitizerCoverageLTO
uint32_t afl_global_id = 0;
uint32_t unhandled = 0;
uint32_t select_cnt = 0;
uint32_t dump_cc = 0, dump_vc = 0;
uint32_t instrument_ctx = 0;
uint32_t instrument_ctx_max_depth = 0;
uint32_t extra_ctx_inst = 0;
@ -297,7 +291,6 @@ class ModuleSanitizerCoverageLTOLegacyPass : public ModulePass {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<PostDominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
}
@ -326,15 +319,7 @@ class ModuleSanitizerCoverageLTOLegacyPass : public ModulePass {
};
auto LoopCallback = [this](Function &F) -> const LoopInfo * {
return &this->getAnalysis<LoopInfoWrapperPass>(F).getLoopInfo();
};
ModuleSancov.instrumentModule(M, DTCallback, PDTCallback, LoopCallback);
return 1;
return ModuleSancov.instrumentModule(M, DTCallback, PDTCallback);
}
@ -387,21 +372,15 @@ PreservedAnalyses ModuleSanitizerCoverageLTO::run(Module &M,
};
auto LoopCallback = [&FAM](Function &F) -> const LoopInfo * {
if (ModuleSancov.instrumentModule(M, DTCallback, PDTCallback))
return PreservedAnalyses::none();
return &FAM.getResult<LoopAnalysis>(F);
};
ModuleSancov.instrumentModule(M, DTCallback, PDTCallback, LoopCallback);
return PreservedAnalyses::none();
return PreservedAnalyses::all();
}
bool ModuleSanitizerCoverageLTO::instrumentModule(
Module &M, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback) {
Module &M, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) {
if (Options.CoverageType == SanitizerCoverageOptions::SCK_None) return false;
/*
@ -495,10 +474,6 @@ bool ModuleSanitizerCoverageLTO::instrumentModule(
}
if (getenv("AFL_DUMP_CYCLOMATIC_COMPLEXITY")) { dump_cc = 1; }
if (getenv("AFL_DUMP_VULNERABILITY_COMPLEXITY")) { dump_vc = 1; }
skip_nozero = getenv("AFL_LLVM_SKIP_NEVERZERO");
use_threadsafe_counters = getenv("AFL_LLVM_THREADSAFE_INST");
@ -1082,7 +1057,7 @@ bool ModuleSanitizerCoverageLTO::instrumentModule(
// M.getOrInsertFunction(SanCovTracePCGuardName, VoidTy, Int32PtrTy);
for (auto &F : M)
instrumentFunction(F, DTCallback, PDTCallback, LCallback);
instrumentFunction(F, DTCallback, PDTCallback);
// AFL++ START
if (dFile.is_open()) dFile.close();
@ -1372,8 +1347,7 @@ Function *returnOnlyCaller(Function *F) {
}
void ModuleSanitizerCoverageLTO::instrumentFunction(
Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback) {
Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) {
if (F.empty()) return;
if (F.getName().find(".module_ctor") != std::string::npos)
@ -1447,7 +1421,6 @@ void ModuleSanitizerCoverageLTO::instrumentFunction(
const DominatorTree *DT = DTCallback(F);
const PostDominatorTree *PDT = PDTCallback(F);
const LoopInfo *LI = LCallback(F);
bool IsLeafFunc = true;
uint32_t skip_next = 0;
uint32_t call_counter = 0, call_depth = 0;
@ -1982,51 +1955,6 @@ void ModuleSanitizerCoverageLTO::instrumentFunction(
}
unsigned int score = 0;
if (dump_cc) { score += calcCyclomaticComplexity(&F, LI); }
if (dump_vc) { score += calcVulnerabilityScore(&F, LI, DT, PDT); }
if (score) {
BasicBlock::iterator IP = F.getEntryBlock().getFirstInsertionPt();
IRBuilder<> builder(&*IP);
// Access the int32 value at u8 offset 1 (unaligned access)
LoadInst *MapPtr =
builder.CreateLoad(PointerType::get(Int8Ty, 0), AFLMapPtr);
llvm::Value *CastToInt8Ptr =
builder.CreateBitCast(MapPtr, llvm::PointerType::get(Int8Ty, 0));
llvm::Value *Int32Ptr = builder.CreateGEP(
Int8Ty, CastToInt8Ptr, llvm::ConstantInt::get(Int32Ty, 1));
llvm::Value *CastToInt32Ptr =
builder.CreateBitCast(Int32Ptr, llvm::PointerType::get(Int32Ty, 0));
// Load the unaligned int32 value
llvm::LoadInst *Load = builder.CreateLoad(Int32Ty, CastToInt32Ptr);
Load->setAlignment(llvm::Align(1));
// Value to add
llvm::Value *ValueToAdd = llvm::ConstantInt::get(Int32Ty, score);
// Perform addition and check for wrap around
llvm::Value *Add =
builder.CreateAdd(Load, ValueToAdd, "addValue", true, true);
// Check if addition wrapped (unsigned)
llvm::Value *DidWrap = builder.CreateICmpULT(Add, Load, "didWrap");
// Select the maximum value if there was a wrap, otherwise use the result
llvm::Value *MaxInt32 = llvm::ConstantInt::get(Int32Ty, UINT32_MAX);
llvm::Value *Result =
builder.CreateSelect(DidWrap, MaxInt32, Add, "selectMaxOrResult");
// Store the result back at the same unaligned offset
llvm::StoreInst *Store = builder.CreateStore(Result, CastToInt32Ptr);
Store->setAlignment(llvm::Align(1));
}
InjectCoverage(F, BlocksToInstrument, IsLeafFunc);
// InjectCoverageForIndirectCalls(F, IndirCalls);

View File

@ -70,8 +70,6 @@
#endif
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "config.h"
#include "debug.h"
@ -121,7 +119,6 @@ SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) {
}
using LoopInfoCallback = function_ref<const LoopInfo *(Function &F)>;
using DomTreeCallback = function_ref<const DominatorTree *(Function &F)>;
using PostDomTreeCallback =
function_ref<const PostDominatorTree *(Function &F)>;
@ -138,13 +135,11 @@ class ModuleSanitizerCoverageAFL
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
bool instrumentModule(Module &M, DomTreeCallback DTCallback,
PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback);
PostDomTreeCallback PDTCallback);
private:
void instrumentFunction(Function &F, DomTreeCallback DTCallback,
PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback);
PostDomTreeCallback PDTCallback);
void InjectTraceForCmp(Function &F, ArrayRef<Instruction *> CmpTraceTargets);
void InjectTraceForSwitch(Function &F,
ArrayRef<Instruction *> SwitchTraceTargets);
@ -200,7 +195,7 @@ class ModuleSanitizerCoverageAFL
SanitizerCoverageOptions Options;
uint32_t instr = 0, selects = 0, unhandled = 0, dump_cc = 0, dump_vc = 0;
uint32_t instr = 0, selects = 0, unhandled = 0, dump_cc = 0;
GlobalVariable *AFLMapPtr = NULL;
ConstantInt *One = NULL;
ConstantInt *Zero = NULL;
@ -238,10 +233,8 @@ PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M,
ModuleAnalysisManager &MAM) {
ModuleSanitizerCoverageAFL ModuleSancov(Options);
auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
auto DTCallback = [&FAM](Function &F) -> const DominatorTree * {
auto DTCallback = [&FAM](Function &F) -> const DominatorTree *{
return &FAM.getResult<DominatorTreeAnalysis>(F);
@ -253,21 +246,9 @@ PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M,
};
auto LoopCallback = [&FAM](Function &F) -> const LoopInfo * {
return &FAM.getResult<LoopAnalysis>(F);
};
if (ModuleSancov.instrumentModule(M, DTCallback, PDTCallback, LoopCallback)) {
if (ModuleSancov.instrumentModule(M, DTCallback, PDTCallback))
return PreservedAnalyses::none();
} else {
return PreservedAnalyses::all();
}
return PreservedAnalyses::all();
}
@ -343,8 +324,7 @@ Function *ModuleSanitizerCoverageAFL::CreateInitCallsForSections(
}
bool ModuleSanitizerCoverageAFL::instrumentModule(
Module &M, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback) {
Module &M, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) {
setvbuf(stdout, NULL, _IONBF, 0);
@ -352,8 +332,6 @@ bool ModuleSanitizerCoverageAFL::instrumentModule(
if (getenv("AFL_DUMP_CYCLOMATIC_COMPLEXITY")) { dump_cc = 1; }
if (getenv("AFL_DUMP_VULNERABILITY_COMPLEXITY")) { dump_vc = 1; }
if ((isatty(2) && !getenv("AFL_QUIET")) || debug) {
SAYF(cCYA "SanitizerCoveragePCGUARD" VERSION cRST "\n");
@ -451,7 +429,7 @@ bool ModuleSanitizerCoverageAFL::instrumentModule(
M.getOrInsertFunction(SanCovTracePCGuardName, VoidTy, Int32PtrTy);
for (auto &F : M)
instrumentFunction(F, DTCallback, PDTCallback, LCallback);
instrumentFunction(F, DTCallback, PDTCallback);
Function *Ctor = nullptr;
@ -590,8 +568,7 @@ static bool IsInterestingCmp(ICmpInst *CMP, const DominatorTree *DT,
#endif
void ModuleSanitizerCoverageAFL::instrumentFunction(
Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback) {
Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) {
if (F.empty()) return;
if (!isInInstrumentList(&F, FMNAME)) return;
@ -627,7 +604,6 @@ void ModuleSanitizerCoverageAFL::instrumentFunction(
const DominatorTree *DT = DTCallback(F);
const PostDominatorTree *PDT = PDTCallback(F);
const LoopInfo *LI = LCallback(F);
bool IsLeafFunc = true;
for (auto &BB : F) {
@ -660,55 +636,12 @@ void ModuleSanitizerCoverageAFL::instrumentFunction(
}
unsigned int score = 0;
if (dump_cc) { score += calcCyclomaticComplexity(&F, LI); }
if (dump_vc) { score += calcVulnerabilityScore(&F, LI, DT, PDT); }
if (score) {
BasicBlock::iterator IP = F.getEntryBlock().getFirstInsertionPt();
IRBuilder<> builder(&*IP);
// Access the int32 value at u8 offset 1 (unaligned access)
LoadInst *MapPtr =
builder.CreateLoad(PointerType::get(Int8Ty, 0), AFLMapPtr);
llvm::Value *CastToInt8Ptr =
builder.CreateBitCast(MapPtr, llvm::PointerType::get(Int8Ty, 0));
llvm::Value *Int32Ptr = builder.CreateGEP(
Int8Ty, CastToInt8Ptr, llvm::ConstantInt::get(Int32Ty, 1));
llvm::Value *CastToInt32Ptr =
builder.CreateBitCast(Int32Ptr, llvm::PointerType::get(Int32Ty, 0));
// Load the unaligned int32 value
llvm::LoadInst *Load = builder.CreateLoad(Int32Ty, CastToInt32Ptr);
Load->setAlignment(llvm::Align(1));
// Value to add
llvm::Value *ValueToAdd = llvm::ConstantInt::get(Int32Ty, score);
// Perform addition and check for wrap around
llvm::Value *Add =
builder.CreateAdd(Load, ValueToAdd, "addValue", true, true);
// Check if addition wrapped (unsigned)
llvm::Value *DidWrap = builder.CreateICmpULT(Add, Load, "didWrap");
// Select the maximum value if there was a wrap, otherwise use the result
llvm::Value *MaxInt32 = llvm::ConstantInt::get(Int32Ty, UINT32_MAX);
llvm::Value *Result =
builder.CreateSelect(DidWrap, MaxInt32, Add, "selectMaxOrResult");
// Store the result back at the same unaligned offset
llvm::StoreInst *Store = builder.CreateStore(Result, CastToInt32Ptr);
Store->setAlignment(llvm::Align(1));
}
InjectCoverage(F, BlocksToInstrument, IsLeafFunc);
// InjectTraceForCmp(F, CmpTraceTargets);
// InjectTraceForSwitch(F, SwitchTraceTargets);
if (dump_cc) { calcCyclomaticComplexity(&F); }
}
GlobalVariable *ModuleSanitizerCoverageAFL::CreateFunctionLocalArrayInSection(

View File

@ -1849,7 +1849,7 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) {
to avoid duplicate calls (which can happen as an artifact of the underlying
implementation in LLVM). */
if (__afl_final_loc < 4) __afl_final_loc = 4; // we skip the first 5 entries
if (__afl_final_loc < 5) __afl_final_loc = 5; // we skip the first 5 entries
*(start++) = ++__afl_final_loc;

View File

@ -14,21 +14,7 @@
#include <fstream>
#include <cmath>
#if LLVM_VERSION_MAJOR >= 13
#include "llvm/Support/raw_ostream.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Dominators.h"
#include "llvm/Analysis/PostDominators.h"
#endif
// #define LEOPARD_USE_WEIGHTS 1
#include <llvm/Support/raw_ostream.h>
#define IS_EXTERN extern
#include "afl-llvm-common.h"
@ -40,79 +26,11 @@ static std::list<std::string> allowListFunctions;
static std::list<std::string> denyListFiles;
static std::list<std::string> denyListFunctions;
#if LLVM_VERSION_MAJOR >= 13
// Leopard complexity calculations
#ifndef LEOPARD_USE_WEIGHTS
#define C1_WEIGHT 1.0
#define C2_WEIGHT 1.0
#define C3_WEIGHT 1.0
#define C4_WEIGHT 1.0
#define V1_WEIGHT 1.0
#define V2_WEIGHT 1.0
#define V3_WEIGHT 1.0
#define V4_WEIGHT 1.0
#define V5_WEIGHT 1.0
#define V6_WEIGHT 1.0
#define V7_WEIGHT 1.0
#define V8_WEIGHT 1.0
#define V9_WEIGHT 1.0
#define V10_WEIGHT 1.0
#define V11_WEIGHT 1.0
#else
// Cyclomatic weights
#define C1_WEIGHT 1.0
#define C2_WEIGHT 1.0
#define C3_WEIGHT 1.0
#define C4_WEIGHT 1.0
// Vulnerability weights
#define V1_WEIGHT 1.5
#define V2_WEIGHT 3.25
#define V3_WEIGHT 4.25
#define V4_WEIGHT 3.0
#define V5_WEIGHT 4.25
#define V6_WEIGHT 7.75
#define V7_WEIGHT 2.5
#define V8_WEIGHT 2.5
#define V9_WEIGHT 4.0
#define V10_WEIGHT 5.25
#define V11_WEIGHT 3.5
#endif
static void countNestedLoops(Loop *L, int depth, unsigned int &loopCount,
unsigned int &nestedLoopCount,
unsigned int &maxNestingLevel) {
loopCount++;
if (!L->getSubLoops().empty()) {
// Increment nested loop count by the number of sub-loops
nestedLoopCount += L->getSubLoops().size();
// Update maximum nesting level
if (depth > maxNestingLevel) { maxNestingLevel = depth; }
// Recursively count sub-loops
for (Loop *SubLoop : L->getSubLoops()) {
countNestedLoops(SubLoop, depth + 1, loopCount, nestedLoopCount,
maxNestingLevel);
}
}
}
unsigned int calcCyclomaticComplexity(llvm::Function *F,
const llvm::LoopInfo *LI) {
unsigned int calcCyclomaticComplexity(llvm::Function *F) {
unsigned int numBlocks = 0;
unsigned int numEdges = 0;
unsigned int numCalls = 0;
unsigned int numLoops = 0;
unsigned int numNestedLoops = 0;
unsigned int maxLoopNesting = 0;
// Iterate through each basic block in the function
for (BasicBlock &BB : *F) {
@ -137,197 +55,22 @@ unsigned int calcCyclomaticComplexity(llvm::Function *F,
}
for (Loop *L : *LI) {
countNestedLoops(L, 1, numLoops, numNestedLoops, maxLoopNesting);
}
// Cyclomatic Complexity V(G) = E - N + 2P
// For a single function, P (number of connected components) is 1
// Calls are considered to be an edge
unsigned int cc =
(unsigned int)(C1_WEIGHT * (double)(2 + numCalls + numEdges - numBlocks) +
C2_WEIGHT * (double)numLoops +
C3_WEIGHT * (double)numNestedLoops +
C4_WEIGHT * (double)maxLoopNesting);
unsigned int CC = 2 + numCalls + numEdges - numBlocks;
// if (debug) {
fprintf(stderr,
"CyclomaticComplexity for %s: %u (calls=%u edges=%u blocks=%u "
"loops=%u nested_loops=%u max_loop_nesting_level=%u)\n",
F->getName().str().c_str(), cc, numCalls, numEdges, numBlocks,
numLoops, numNestedLoops, maxLoopNesting);
fprintf(stderr, "CyclomaticComplexity for %s: %u\n",
F->getName().str().c_str(), CC);
//}
return cc;
return CC;
}
unsigned int calcVulnerabilityScore(llvm::Function *F, const llvm::LoopInfo *LI,
const llvm::DominatorTree *DT,
const llvm::PostDominatorTree *PDT) {
unsigned int score = 0;
// V1 and V2
unsigned paramCount = F->arg_size();
unsigned calledParamCount = 0;
// V3, V4 and V5
unsigned pointerArithCount = 0;
unsigned totalPointerArithParams = 0;
unsigned maxPointerArithVars = 0;
// V6 to V11
unsigned nestedControlStructCount = 0;
unsigned maxNestingLevel = 0;
unsigned maxControlDependentControls = 0;
unsigned maxDataDependentControls = 0;
unsigned ifWithoutElseCount = 0;
unsigned controlPredicateVarCount = 0;
std::function<void(Loop *, unsigned)> countNestedLoops = [&](Loop *L,
unsigned depth) {
nestedControlStructCount++;
if (depth > maxNestingLevel) { maxNestingLevel = depth; }
for (Loop *SubLoop : L->getSubLoops()) {
countNestedLoops(SubLoop, depth + 1);
}
};
for (Loop *TopLoop : *LI) {
countNestedLoops(TopLoop, 1);
}
for (inst_iterator I = inst_begin(*F), E = inst_end(*F); I != E; ++I) {
if (CallInst *CI = dyn_cast<CallInst>(&*I)) {
if (Function *CalledF = CI->getCalledFunction()) {
calledParamCount += CalledF->arg_size();
}
}
if (auto *GEP = dyn_cast<GetElementPtrInst>(&*I)) {
pointerArithCount++;
unsigned numPointerArithVars = GEP->getNumOperands();
totalPointerArithParams += numPointerArithVars;
if (numPointerArithVars > maxPointerArithVars) {
maxPointerArithVars = numPointerArithVars;
}
}
if (BranchInst *BI = dyn_cast<BranchInst>(&*I)) {
if (BI->isConditional()) {
unsigned controlDependentCount = 0;
unsigned dataDependentCount = 0;
for (Use &U : BI->operands()) {
if (Instruction *Op = dyn_cast<Instruction>(U.get())) {
if (DT->dominates(Op, &*I)) { controlDependentCount++; }
if (PDT->dominates(Op, &*I)) { dataDependentCount++; }
}
}
if (controlDependentCount > maxControlDependentControls) {
maxControlDependentControls = controlDependentCount;
}
if (dataDependentCount > maxDataDependentControls) {
maxDataDependentControls = dataDependentCount;
}
// Check for if() without else
BasicBlock *TrueBB = BI->getSuccessor(0);
BasicBlock *FalseBB = BI->getSuccessor(1);
if (TrueBB && FalseBB) {
if (TrueBB->getSinglePredecessor() == &*I->getParent() &&
FalseBB->empty()) {
ifWithoutElseCount++;
}
}
// Count variables involved in control predicates
if (ICmpInst *ICmp = dyn_cast<ICmpInst>(BI->getCondition())) {
controlPredicateVarCount += ICmp->getNumOperands();
} else if (BinaryOperator *BinOp =
dyn_cast<BinaryOperator>(BI->getCondition())) {
controlPredicateVarCount += BinOp->getNumOperands();
} else if (SelectInst *Select =
dyn_cast<SelectInst>(BI->getCondition())) {
controlPredicateVarCount += Select->getNumOperands();
}
}
}
}
score = (unsigned int)(V1_WEIGHT * (double)paramCount +
V2_WEIGHT * (double)calledParamCount +
V3_WEIGHT * (double)pointerArithCount +
V4_WEIGHT * (double)totalPointerArithParams +
V5_WEIGHT * (double)maxPointerArithVars +
V6_WEIGHT * (double)nestedControlStructCount +
V7_WEIGHT * (double)maxNestingLevel +
V8_WEIGHT * (double)maxControlDependentControls +
V9_WEIGHT * (double)maxDataDependentControls +
V10_WEIGHT * (double)ifWithoutElseCount +
V11_WEIGHT * (double)controlPredicateVarCount);
fprintf(stderr,
"VulnerabilityScore for %s: %u (paramCount=%u "
"calledParamCount=%u|pointerArithCount=%u totalPointerArithParams=%u "
"maxPointerArithVars=%u|maxNestingLevel=%u "
"maxControlDependentControls=%u maxDataDependentControls=%u "
"ifWithoutElseCount=%u controlPredicateVarCount=%u)\n",
F->getName().str().c_str(), score, paramCount, calledParamCount,
pointerArithCount, totalPointerArithParams, maxPointerArithVars,
maxNestingLevel, maxControlDependentControls,
maxDataDependentControls, ifWithoutElseCount,
controlPredicateVarCount);
return score;
}
#endif
char *getBBName(const llvm::BasicBlock *BB) {
static char *name;

View File

@ -12,7 +12,6 @@
#include <sys/time.h>
#include "llvm/Config/llvm-config.h"
#if LLVM_VERSION_MAJOR == 3 && LLVM_VERSION_MINOR < 5
typedef long double max_align_t;
#endif
@ -27,19 +26,6 @@ typedef long double max_align_t;
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#endif
#if LLVM_VERSION_MAJOR > 12
#include "llvm/Support/raw_ostream.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/IR/Function.h"
#include "llvm/Pass.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Dominators.h"
#include "llvm/Analysis/PostDominators.h"
#endif
#if LLVM_VERSION_MAJOR > 3 || \
(LLVM_VERSION_MAJOR == 3 && LLVM_VERSION_MINOR > 4)
#include "llvm/IR/DebugInfo.h"
@ -69,11 +55,7 @@ void initInstrumentList();
bool isInInstrumentList(llvm::Function *F, std::string Filename);
unsigned long long int calculateCollisions(uint32_t edges);
void scanForDangerousFunctions(llvm::Module *M);
unsigned int calcCyclomaticComplexity(llvm::Function *F,
const llvm::LoopInfo *LI);
unsigned int calcVulnerabilityScore(llvm::Function *F, const llvm::LoopInfo *LI,
const llvm::DominatorTree *DT,
const llvm::PostDominatorTree *PDT);
unsigned int calcCyclomaticComplexity(llvm::Function *F);
#ifndef IS_EXTERN
#define IS_EXTERN

View File

@ -54,15 +54,15 @@
#define nullptr 0
#endif
#include <set>
#include "afl-llvm-common.h"
#if LLVM_MAJOR >= 19
#define STARTSWITH starts_with
#else
#define STARTSWITH startswith
#endif
#include <set>
#include "afl-llvm-common.h"
using namespace llvm;
namespace {

BIN
model.bin Normal file

Binary file not shown.

View File

@ -202,6 +202,8 @@ QEMU_CONF_FLAGS=" \
--disable-xfsctl \
--target-list="${CPU_TARGET}-linux-user" \
--without-default-devices \
--extra-cflags=-Wno-int-conversion \
--disable-werror \
"
if [ -n "${CROSS_PREFIX}" ]; then
@ -243,7 +245,6 @@ if [ "$DEBUG" = "1" ]; then
--enable-debug-stack-usage \
--enable-debug-tcg \
--enable-qom-cast-debug \
--enable-werror \
"
else
@ -254,7 +255,6 @@ else
--disable-debug-tcg \
--disable-qom-cast-debug \
--disable-stack-protector \
--disable-werror \
--disable-docs \
"

View File

@ -2366,8 +2366,7 @@ static void add_aflpplib(aflcc_state_t *aflcc) {
insert_param(aflcc, afllib);
#ifdef __APPLE__
insert_param(aflcc, "-Wl,-undefined");
insert_param(aflcc, "dynamic_lookup");
insert_param(aflcc, "-Wl,-undefined,dynamic_lookup");
#endif
}
@ -2794,11 +2793,11 @@ static void maybe_usage(aflcc_state_t *aflcc, int argc, char **argv) {
"MODES: NCC PERSIST DICT LAF "
"CMPLOG SELECT\n"
" [LLVM] LLVM: %s%s\n"
" PCGUARD %s yes yes module yes yes "
" PCGUARD %s yes yes module yes yes "
"yes\n"
" NATIVE AVAILABLE no yes no no "
"part. yes\n"
" CLASSIC %s no yes module yes yes "
" CLASSIC %s no yes module yes yes "
"yes\n"
" - NORMAL\n"
" - CALLER\n"
@ -2815,10 +2814,10 @@ static void maybe_usage(aflcc_state_t *aflcc, int argc, char **argv) {
" [GCC/CLANG] simple gcc/clang: %s%s\n"
" CLASSIC DEFAULT no no no no no "
"no\n\n",
aflcc->have_llvm ? "AVAILABLE" : "unavailable!",
aflcc->have_llvm ? "AVAILABLE " : "unavailable!",
aflcc->compiler_mode == LLVM ? " [SELECTED]" : "",
aflcc->have_llvm ? "AVAILABLE" : "unavailable!",
aflcc->have_llvm ? "AVAILABLE" : "unavailable!",
aflcc->have_llvm ? "AVAILABLE " : "unavailable!",
aflcc->have_llvm ? "AVAILABLE " : "unavailable!",
aflcc->have_lto ? "AVAILABLE" : "unavailable!",
aflcc->compiler_mode == LTO ? " [SELECTED]" : "",
aflcc->have_gcc_plugin ? "AVAILABLE" : "unavailable!",
@ -2844,7 +2843,7 @@ static void maybe_usage(aflcc_state_t *aflcc, int argc, char **argv) {
" The best is LTO but it often needs RANLIB and AR settings outside "
"of afl-cc.\n\n");
#if LLVM_MAJOR > 10 || (LLVM_MAJOR == 10 && LLVM_MINOR > 0)
#if LLVM_MAJOR >= 11 || (LLVM_MAJOR == 10 && LLVM_MINOR > 0)
#define NATIVE_MSG \
" LLVM-NATIVE: use llvm's native PCGUARD instrumentation (less " \
"performant)\n"

View File

@ -59,27 +59,6 @@ u8 last_intr = 0;
#define AFL_PATH "/usr/local/lib/afl/"
#endif
/* - Some BSD (i.e.: FreeBSD) offer the FAST clock source as
* equivalent to Linux COARSE clock source. Aliasing COARSE to
* FAST on such systems when COARSE is not already defined.
* - macOS has no support of CLOCK_MONOTONIC_COARSE clock type.
*/
#if defined(OS_DARWIN) || defined(OS_SUNOS) || defined(__APPLE__) || \
defined(__sun) || defined(__NetBSD__)
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
#elif defined(OS_FREEBSD)
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST
#endif
/* Convert seconds to milliseconds. */
#define SEC_TO_MS(sec) ((sec) * 1000)
/* Convert seconds to microseconds. */
#define SEC_TO_US(sec) ((sec) * 1000000)
/* Convert nanoseconds to milliseconds. */
#define NS_TO_MS(ns) ((ns) / 1000000)
/* Convert nanoseconds to microseconds. */
#define NS_TO_US(ns) ((ns) / 1000)
void *afl_memmem(const void *haystack, size_t haystacklen, const void *needle,
size_t needlelen) {
@ -507,7 +486,7 @@ u8 *find_afl_binary(u8 *own_loc, u8 *fname) {
if ((tmp = strrchr(fname, '.'))) {
if (!strcasecmp(tmp, ".so") || !strcasecmp(tmp, ".dylib")) { perm = R_OK; }
if (!strcasecmp(tmp, ".bin") || !strcasecmp(tmp, ".so") || !strcasecmp(tmp, ".dylib")) { perm = R_OK; }
}
@ -997,33 +976,25 @@ void read_bitmap(u8 *fname, u8 *map, size_t len) {
inline u64 get_cur_time(void) {
struct timespec ts;
int rc = clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
if (rc == -1) {
struct timeval tv;
struct timezone tz;
PFATAL("Failed to obtain timestamp (errno = %i: %s)\n", errno,
strerror(errno));
gettimeofday(&tv, &tz);
}
return SEC_TO_MS((uint64_t)ts.tv_sec) + NS_TO_MS((uint64_t)ts.tv_nsec);
return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
}
/* Get unix time in microseconds */
u64 get_cur_time_us(void) {
inline u64 get_cur_time_us(void) {
struct timespec ts;
int rc = clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
if (rc == -1) {
struct timeval tv;
struct timezone tz;
PFATAL("Failed to obtain timestamp (errno = %i: %s)\n", errno,
strerror(errno));
gettimeofday(&tv, &tz);
}
return SEC_TO_US((uint64_t)ts.tv_sec) + NS_TO_US((uint64_t)ts.tv_nsec);
return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
}

View File

@ -481,14 +481,6 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
s32 fd;
u64 cksum = 0;
// will be classified away otherwise
if (unlikely((afl->current_score = *(u32 *)((u8 *)afl->fsrv.trace_bits + 1)) >
0)) {
memset(afl->fsrv.trace_bits + 1, 0, 4);
}
/* Update path frequency. */
/* Generating a hash on every input is super expensive. Bad idea and should

View File

@ -26,6 +26,7 @@
#include <limits.h>
#include <ctype.h>
#include <math.h>
#include <xgboost/c_api.h>
#ifdef _STANDALONE_MODULE
void minimize_bits(afl_state_t *afl, u8 *dst, u8 *src) {
@ -60,61 +61,27 @@ inline u32 select_next_queue_entry(afl_state_t *afl) {
}
// #define DEBUG_QUEUE 1
double compute_weight(afl_state_t *afl, struct queue_entry *q,
double avg_exec_us, double avg_bitmap_size,
double avg_top_size, double avg_score) {
double avg_top_size) {
double weight = 1.0;
/*
if (likely(afl->schedule >= FAST && afl->schedule <= RARE)) {
u32 hits = afl->n_fuzz[q->n_fuzz_entry];
if (likely(hits)) { weight /= (log10(hits) + 1); }
if (likely(afl->schedule >= FAST && afl->schedule <= RARE)) {
}
u32 hits = afl->n_fuzz[q->n_fuzz_entry];
if (likely(hits)) { weight /= (log10(hits) + 1); }
#ifdef DEBUG_QUEUE
fprintf(stderr, "WEIGHT id=%u fname=%s start_weight=1.0\n", q->id,
q->fname); fprintf(stderr, " after step 1: %.2f (log10(hits))\n", weight);
#endif
if (likely(afl->schedule < RARE)) { weight *= (avg_exec_us / q->exec_us); }
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 2: %.2f (exec_us)\n", weight);
#endif
weight *= (log(q->bitmap_size) / avg_bitmap_size);
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 3: %.2f (log(bitmap_size))\n", weight);
#endif
weight *= (1 + (q->tc_ref / avg_top_size));
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 4: %.2f (top_size)\n", weight);
#endif
if (unlikely(avg_score != 0.0)) { weight *= (q->score / avg_score); }
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 5: %.2f (score)\n", weight);
#endif
}
if (unlikely(weight < 0.1)) { weight = 0.1; }
if (unlikely(q->favored)) {
if (likely(afl->schedule < RARE)) { weight *= (avg_exec_us / q->exec_us); }
weight *= (log(q->bitmap_size) / avg_bitmap_size);
weight *= (1 + (q->tc_ref / avg_top_size));
weight += 1;
weight *= 5;
}
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 6: %.2f (favored)\n", weight);
#endif
*/
if (unlikely(!q->was_fuzzed)) { weight *= 5; }
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 7: %.2f (was_fuzzed)\n", weight);
#endif
if (unlikely(q->fs_redundant)) { weight = 0.0; }
#ifdef DEBUG_QUEUE
fprintf(stderr, " after final step: %.2f (fs_redundant)\n", weight);
#endif
if (unlikely(weight < 0.1)) { weight = 0.1; }
if (unlikely(q->favored)) { weight *= 5; }
if (unlikely(!q->was_fuzzed)) { weight *= 2; }
if (unlikely(q->fs_redundant)) { weight *= 0.8; }
return weight;
@ -124,8 +91,7 @@ double compute_weight(afl_state_t *afl, struct queue_entry *q,
void create_alias_table(afl_state_t *afl) {
u32 n = afl->queued_items, i = 0, nSmall = 0, nLarge = n - 1,
exploit = afl->fuzz_mode;
u32 n = afl->queued_items, i = 0, nSmall = 0, nLarge = n - 1;
double sum = 0;
double *P = (double *)afl_realloc(AFL_BUF_PARAM(out), n * sizeof(double));
@ -152,8 +118,7 @@ void create_alias_table(afl_state_t *afl) {
double avg_exec_us = 0.0;
double avg_bitmap_size = 0.0;
double avg_top_size = 0.0;
double avg_score = 0.0;
double avg_len = 0.0;
u32 active = 0;
for (i = 0; i < n; i++) {
@ -164,9 +129,8 @@ void create_alias_table(afl_state_t *afl) {
if (likely(!q->disabled)) {
avg_exec_us += q->exec_us;
avg_bitmap_size += log(q->bitmap_size);
avg_top_size += q->tc_ref;
if (exploit) { avg_score += /*log(*/ q->score /*)*/; }
avg_bitmap_size += q->bitmap_size;
avg_len += q->len;
++active;
}
@ -175,9 +139,10 @@ void create_alias_table(afl_state_t *afl) {
avg_exec_us /= active;
avg_bitmap_size /= active;
avg_top_size /= active;
avg_len /= active;
if (exploit) { avg_score /= active; }
float *table = malloc((active + 1) * 3 * sizeof(float));
float *pentry = table;
for (i = 0; i < n; i++) {
@ -185,29 +150,52 @@ void create_alias_table(afl_state_t *afl) {
if (likely(!q->disabled)) {
q->weight = compute_weight(afl, q, avg_exec_us, avg_bitmap_size,
avg_top_size, avg_score);
*pentry++ = q->len / avg_len;
*pentry++ = q->exec_us / avg_exec_us;
*pentry++ = q->bitmap_size / avg_bitmap_size;
q->perf_score = calculate_score(afl, q);
sum += q->weight;
}
}
if (unlikely(afl->schedule == MMOPT) && afl->queued_discovered) {
DMatrixHandle dtest;
BoosterHandle booster;
u32 cnt = afl->queued_discovered >= 5 ? 5 : afl->queued_discovered;
// Erstellen einer DMatrix aus dem Array
XGDMatrixCreateFromMat((float *)table, 3, active, -1, &dtest);
XGBoosterCreate(&dtest, 1, &booster);
u8* model = NULL;//find_afl_binary("/out", "model.bin");
if (!model) model = find_afl_binary("./", "model.bin");
if (!model) FATAL("mode.bin not found!");
if (XGBoosterLoadModel(booster, "./model.bin"))
FATAL("model load failed!");
bst_ulong out_len;
const float *predictions;
XGBoosterPredict(booster, dtest, 0, 0, 0, &out_len, &predictions);
for (i = n - cnt; i < n; i++) {
// Ausgabe der Vorhersagen
int count = 0;
for (i = 0; i < n; i++) {
struct queue_entry *q = afl->queue_buf[i];
struct queue_entry *q = afl->queue_buf[i];
if (likely(!q->disabled)) { q->weight *= 2.0; }
if (likely(!q->disabled)) {
if (unlikely(afl->debug))
fprintf(stderr, "Prediction[%u] = %f\n", i, predictions[count]);
afl->queue_buf[i]->weight = predictions[count++];
sum += predictions[count++];
}
}
// Freigeben der Ressourcen
XGBoosterFree(booster);
XGDMatrixFree(dtest);
free(table);
for (i = 0; i < n; i++) {
// weight is always 0 for disabled entries
@ -635,13 +623,6 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
q->trace_mini = NULL;
q->testcase_buf = NULL;
q->mother = afl->queue_cur;
q->cmp = q->fcmp = q->rtn = -1;
if (afl->queue_cur) {
afl->queue_cur->found++;
}
q->score = afl->current_score;
if (unlikely(!q->score)) { q->score = 1; }
#ifdef INTROSPECTION
q->bitsmap_size = afl->bitsmap_size;
@ -960,8 +941,6 @@ u32 calculate_score(afl_state_t *afl, struct queue_entry *q) {
u32 avg_bitmap_size = afl->total_bitmap_size / bitmap_entries;
u32 perf_score = 100;
return perf_score;
/* Adjust score based on execution speed of this path, compared to the
global average. Multiplier ranges from 0.1x to 3x. Fast inputs are
less expensive to fuzz, so we're giving them more air time. */
@ -1349,7 +1328,8 @@ inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
static u32 do_once = 0; // because even threaded we would want this. WIP
while (unlikely(
afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size ||
(afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size &&
afl->q_testcase_cache_count > 1) ||
afl->q_testcase_cache_count >= afl->q_testcase_max_cache_entries - 1)) {
/* We want a max number of entries to the cache that we learn.

View File

@ -3072,8 +3072,6 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
afl->stage_max = 0;
afl->stage_cur = 0;
afl->queue_cur->cmp = afl->queue_cur->fcmp = afl->queue_cur->rtn = 0;
u32 lvl = (afl->queue_cur->colorized ? 0 : LVL1) +
(afl->cmplog_lvl == CMPLOG_LVL_MAX ? LVL3 : 0);
@ -3091,13 +3089,6 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
if (!afl->shm.cmp_map->headers[k].hits) { continue; }
if (afl->shm.cmp_map->headers[k].type != CMP_TYPE_INS)
afl->queue_cur->rtn++;
else if (unlikely((afl->shm.cmp_map->headers[k].attribute & 8) == 8))
afl->queue_cur->fcmp++;
else
afl->queue_cur->cmp++;
if (afl->pass_stats[k].faileds >= CMPLOG_FAIL_MAX ||
afl->pass_stats[k].total >= CMPLOG_FAIL_MAX) {

View File

@ -321,8 +321,10 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
#ifndef __HAIKU__
if (getrusage(RUSAGE_CHILDREN, &rus)) { rus.ru_maxrss = 0; }
#endif
u64 runtime = afl->prev_run_time + cur_time - afl->start_time;
if (!runtime) { runtime = 1; }
u64 runtime_ms = afl->prev_run_time + cur_time - afl->start_time;
u64 overhead_ms =
(afl->calibration_time_us + afl->sync_time_us + afl->trim_time_us) / 1000;
if (!runtime_ms) { runtime_ms = 1; }
fprintf(
f,
@ -375,20 +377,17 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
"target_mode : %s%s%s%s%s%s%s%s%s%s\n"
"command_line : %s\n",
(afl->start_time /*- afl->prev_run_time*/) / 1000, cur_time / 1000,
runtime / 1000, (u32)getpid(),
runtime_ms / 1000, (u32)getpid(),
afl->queue_cycle ? (afl->queue_cycle - 1) : 0, afl->cycles_wo_finds,
afl->longest_find_time > cur_time - afl->last_find_time
? afl->longest_find_time / 1000
: ((afl->start_time == 0 || afl->last_find_time == 0)
? 0
: (cur_time - afl->last_find_time) / 1000),
(runtime -
((afl->calibration_time_us + afl->sync_time_us + afl->trim_time_us) /
1000)) /
1000,
(runtime_ms - MIN(runtime_ms, overhead_ms)) / 1000,
afl->calibration_time_us / 1000000, afl->sync_time_us / 1000000,
afl->trim_time_us / 1000000, afl->fsrv.total_execs,
afl->fsrv.total_execs / ((double)(runtime) / 1000),
afl->fsrv.total_execs / ((double)(runtime_ms) / 1000),
afl->last_avg_execs_saved, afl->queued_items, afl->queued_favored,
afl->queued_discovered, afl->queued_imported, afl->queued_variable,
afl->max_depth, afl->current_entry, afl->pending_favored,
@ -632,9 +631,10 @@ void show_stats_normal(afl_state_t *afl) {
cur_ms = get_cur_time();
if (afl->most_time_key) {
if (afl->most_time_key && afl->queue_cycle) {
if (afl->most_time * 1000 < cur_ms - afl->start_time) {
if (afl->most_time * 1000 + afl->sync_time_us / 1000 <
cur_ms - afl->start_time) {
afl->most_time_key = 2;
afl->stop_soon = 2;
@ -643,7 +643,7 @@ void show_stats_normal(afl_state_t *afl) {
}
if (afl->most_execs_key == 1) {
if (afl->most_execs_key == 1 && afl->queue_cycle) {
if (afl->most_execs <= afl->fsrv.total_execs) {
@ -1331,7 +1331,9 @@ void show_stats_normal(afl_state_t *afl) {
sprintf(tmp, "disabled, ");
} else if (unlikely(!afl->bytes_trim_out)) {
} else if (unlikely(!afl->bytes_trim_out ||
afl->bytes_trim_in <= afl->bytes_trim_out)) {
sprintf(tmp, "n/a, ");
@ -1348,7 +1350,9 @@ void show_stats_normal(afl_state_t *afl) {
strcat(tmp, "disabled");
} else if (unlikely(!afl->blocks_eff_total)) {
} else if (unlikely(!afl->blocks_eff_total ||
afl->blocks_eff_select >= afl->blocks_eff_total)) {
strcat(tmp, "n/a");
@ -1462,9 +1466,10 @@ void show_stats_pizza(afl_state_t *afl) {
cur_ms = get_cur_time();
if (afl->most_time_key) {
if (afl->most_time_key && afl->queue_cycle) {
if (afl->most_time * 1000 < cur_ms - afl->start_time) {
if (afl->most_time * 1000 + afl->sync_time_us / 1000 <
cur_ms - afl->start_time) {
afl->most_time_key = 2;
afl->stop_soon = 2;
@ -1473,7 +1478,7 @@ void show_stats_pizza(afl_state_t *afl) {
}
if (afl->most_execs_key == 1) {
if (afl->most_execs_key == 1 && afl->queue_cycle) {
if (afl->most_execs <= afl->fsrv.total_execs) {

View File

@ -2594,13 +2594,6 @@ int main(int argc, char **argv_orig, char **envp) {
sync_fuzzers(afl);
if (!afl->queue_cycle && afl->afl_env.afl_import_first) {
// real start time, we reset, so this works correctly with -V
afl->start_time = get_cur_time();
}
}
++afl->queue_cycle;
@ -2868,9 +2861,7 @@ int main(int argc, char **argv_orig, char **envp) {
}
u64 execs_before = afl->fsrv.total_execs;
skipped_fuzz = fuzz_one(afl);
afl->queue_cur->total_execs += afl->fsrv.total_execs - execs_before;
#ifdef INTROSPECTION
++afl->queue_cur->stats_selected;
@ -3069,39 +3060,6 @@ stop_fuzzing:
}
if (getenv("AFL_DUMP_QUEUE_ON_EXIT")) {
for (u32 mode = 0; mode < 1; mode++) {
afl->fuzz_mode = mode;
create_alias_table(afl);
fprintf(stderr, "\nQUEUE DUMP MODE: %u\n", mode);
for (u32 k = 0; k < afl->queued_items; ++k) {
struct queue_entry *q = afl->queue_buf[k];
fprintf(stderr,
"item=%u fname=%s len=%u exec_us=%llu total_execs=%llu "
"has_new_cov=%u "
"var_behavior=%u favored=%u fs_redundant=%u disabled=%u "
"bitmap_size=%u tc_ref=%u fuzz_level=%u was_fuzzed=%u "
"cmp=%d fcmp=%d rtn=%d "
"mother=%d found=%u perf_score=%.2f weight=%.2f score=%u\n",
k, q->fname, q->len, q->exec_us, q->total_execs, q->has_new_cov,
q->var_behavior, q->favored, q->fs_redundant, q->disabled,
q->bitmap_size, q->tc_ref, q->fuzz_level, q->was_fuzzed,
q->cmp, q->fcmp, q->rtn,
q->mother == NULL ? -1 : (int)q->mother->id, q->found,
q->perf_score, q->weight, q->score);
}
fprintf(stderr, "\n");
}
}
if (frida_afl_preload) { ck_free(frida_afl_preload); }
fclose(afl->fsrv.plot_file);

View File

@ -83,8 +83,6 @@ static u32 tcnt, highest; /* tuple content information */
static u32 in_len; /* Input data length */
static u32 score;
static u32 map_size = MAP_SIZE, timed_out = 0;
static bool quiet_mode, /* Hide non-essential messages? */
@ -180,7 +178,8 @@ fsrv_run_result_t fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv,
void classify_counts(afl_forkserver_t *fsrv) {
u8 *mem = fsrv->trace_bits;
const u8 *map = binary_mode ? count_class_binary : count_class_human;
const u8 *map = (binary_mode || collect_coverage) ? count_class_binary
: count_class_human;
u32 i = map_size;
@ -226,8 +225,13 @@ static void at_exit_handler(void) {
if (remove_shm) {
remove_shm = false;
if (shm.map) afl_shm_deinit(&shm);
if (fsrv->use_shmem_fuzz) deinit_shmem(fsrv, shm_fuzz);
if ((shm_fuzz && shm_fuzz->shmemfuzz_mode) || fsrv->use_shmem_fuzz) {
shm_fuzz = deinit_shmem(fsrv, shm_fuzz);
}
}
@ -240,23 +244,9 @@ static void at_exit_handler(void) {
static void analyze_results(afl_forkserver_t *fsrv) {
u32 i;
if (unlikely((score = *(u32 *)((u8 *)fsrv->trace_bits + 1)) > 0)) {
memset(fsrv->trace_bits + 1, 0, 4);
}
for (i = 0; i < map_size; i++) {
if (fsrv->trace_bits[i]) {
total += fsrv->trace_bits[i];
if (fsrv->trace_bits[i] > highest) highest = fsrv->trace_bits[i];
// if (!coverage_map[i]) { coverage_map[i] = 1; }
coverage_map[i] |= fsrv->trace_bits[i];
}
if (fsrv->trace_bits[i]) { coverage_map[i] |= fsrv->trace_bits[i]; }
}
@ -278,12 +268,6 @@ static u32 write_results_to_file(afl_forkserver_t *fsrv, u8 *outfile) {
}
if (unlikely((score = *(u32 *)((u8 *)fsrv->trace_bits + 1)) > 0)) {
memset(fsrv->trace_bits + 1, 0, 4);
}
if (cmin_mode &&
(fsrv->last_run_timed_out || (!caa && child_crashed != cco))) {
@ -1354,6 +1338,8 @@ int main(int argc, char **argv_orig, char **envp) {
}
if (collect_coverage) { binary_mode = false; } // ensure this
if (optind == argc || !out_file) { usage(argv[0]); }
if (in_dir && in_filelist) { FATAL("you can only specify either -i or -I"); }
@ -1546,6 +1532,8 @@ int main(int argc, char **argv_orig, char **envp) {
/* initialize cmplog_mode */
shm_fuzz->cmplog_mode = 0;
atexit(at_exit_handler);
u8 *map = afl_shm_init(shm_fuzz, MAX_FILE + sizeof(u32), 1);
shm_fuzz->shmemfuzz_mode = true;
if (!map) { FATAL("BUG: Zero return from afl_shm_init."); }
@ -1692,12 +1680,9 @@ int main(int argc, char **argv_orig, char **envp) {
if ((coverage_map = (u8 *)malloc(map_size + 64)) == NULL)
FATAL("coult not grab memory");
edges_only = false;
raw_instr_output = true;
}
atexit(at_exit_handler);
if (get_afl_env("AFL_DEBUG")) {
int j = optind;
@ -1714,9 +1699,12 @@ int main(int argc, char **argv_orig, char **envp) {
map_size = fsrv->map_size;
if (fsrv->support_shmem_fuzz && !fsrv->use_shmem_fuzz)
if (fsrv->support_shmem_fuzz && !fsrv->use_shmem_fuzz) {
shm_fuzz = deinit_shmem(fsrv, shm_fuzz);
}
if (in_dir) {
if (execute_testcases(in_dir) == 0) {
@ -1748,9 +1736,12 @@ int main(int argc, char **argv_orig, char **envp) {
} else {
if (fsrv->support_shmem_fuzz && !fsrv->use_shmem_fuzz)
if (fsrv->support_shmem_fuzz && !fsrv->use_shmem_fuzz) {
shm_fuzz = deinit_shmem(fsrv, shm_fuzz);
}
#ifdef __linux__
if (!fsrv->nyx_mode) {
@ -1781,20 +1772,12 @@ int main(int argc, char **argv_orig, char **envp) {
OKF("Captured %u tuples (map size %u, highest value %u, total values %llu) "
"in '%s'." cRST,
tcnt, fsrv->real_map_size, highest, total, out_file);
if (collect_coverage) {
if (collect_coverage)
OKF("A coverage of %u edges were achieved out of %u existing (%.02f%%) "
"with %llu input files.",
tcnt, map_size, ((float)tcnt * 100) / (float)map_size,
fsrv->total_execs);
} else if (score > 0) {
OKF("Path score is %u (cyclomatic and/or vulnerability scoring).\n",
score);
}
}
if (stdin_file) {
@ -1805,9 +1788,9 @@ int main(int argc, char **argv_orig, char **envp) {
}
remove_shm = 0;
remove_shm = false;
afl_shm_deinit(&shm);
if (fsrv->use_shmem_fuzz) shm_fuzz = deinit_shmem(fsrv, shm_fuzz);
if (fsrv->use_shmem_fuzz) { shm_fuzz = deinit_shmem(fsrv, shm_fuzz); }
u32 ret;

View File

@ -1 +1 @@
764b66b2
4b4fdab1

View File

@ -136,7 +136,7 @@ def overlap_alignments(segments, memory):
# https://github.com/llvm-mirror/llvm/blob/master/include/llvm/ADT/Triple.h
def get_arch():
arch, arch_vendor, arch_os = lldb.target.GetTriple().split("-")
arch, arch_vendor, arch_os, *arch_remains = lldb.debugger.GetSelectedTarget().GetTriple().split("-")
if arch == "x86_64":
return "x64"
elif arch == "x86" or arch == "i386":
@ -165,7 +165,7 @@ def dump_arch_info():
def dump_regs():
reg_state = {}
for reg_list in lldb.frame.GetRegisters():
for reg_list in lldb.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().GetSelectedFrame().GetRegisters():
if "general purpose registers" in reg_list.GetName().lower():
for reg in reg_list:
reg_state[reg.GetName()] = int(reg.GetValue(), 16)
@ -180,8 +180,9 @@ def get_section_info(sec):
module_name = sec.addr.module.file.GetFilename()
module_name = module_name if module_name is not None else ""
long_name = module_name + "." + name
load_addr = sec.addr.GetLoadAddress(lldb.debugger.GetSelectedTarget())
return sec.addr.load_addr, (sec.addr.load_addr + sec.size), sec.size, long_name
return load_addr, (load_addr + sec.size), sec.size, long_name
def dump_process_memory(output_dir):
@ -191,7 +192,7 @@ def dump_process_memory(output_dir):
# 1st pass:
# Loop over the segments, fill in the segment info dictionary
for module in lldb.target.module_iter():
for module in lldb.debugger.GetSelectedTarget().module_iter():
for seg_ea in module.section_iter():
seg_info = {"module": module.file.GetFilename()}
(
@ -201,8 +202,8 @@ def dump_process_memory(output_dir):
seg_info["name"],
) = get_section_info(seg_ea)
# TODO: Ugly hack for -1 LONG address on 32-bit
if seg_info["start"] >= sys.maxint or seg_size <= 0:
print "Throwing away page: {}".format(seg_info["name"])
if seg_info["start"] >= sys.maxsize or seg_size <= 0:
print ("Throwing away page: {}".format(seg_info["name"]))
continue
# Page-align segment
@ -212,7 +213,7 @@ def dump_process_memory(output_dir):
raw_segment_list.append(seg_info)
# Add the stack memory region (just hardcode 0x1000 around the current SP)
sp = lldb.frame.GetSP()
sp = lldb.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().GetSelectedFrame().GetSP()
start_sp = ALIGN_PAGE_DOWN(sp)
raw_segment_list.append(
{"start": start_sp, "end": start_sp + 0x1000, "name": "STACK"}
@ -228,7 +229,7 @@ def dump_process_memory(output_dir):
start_addr = -1
next_region_addr = 0
while next_region_addr > start_addr:
err = lldb.process.GetMemoryRegionInfo(next_region_addr, mem_info)
err = lldb.debugger.GetSelectedTarget().GetProcess().GetMemoryRegionInfo(next_region_addr, mem_info)
# TODO: Should check err.success. If False, what do we do?
if not err.success:
break
@ -267,7 +268,7 @@ def dump_process_memory(output_dir):
region_name = seg_info["name"]
# Compress and dump the content to a file
err = lldb.SBError()
seg_content = lldb.process.ReadMemory(
seg_content = lldb.debugger.GetSelectedTarget().GetProcess().ReadMemory(
start_addr, end_addr - start_addr, err
)
if seg_content == None:
@ -340,11 +341,12 @@ def main():
index_file.close()
print ("Done.")
except Exception, e:
except Exception as e:
print ("!!! ERROR:\n\t{}".format(repr(e)))
if __name__ == "__main__":
lldb.debugger = lldb.SBDebugger.Create()
main()
elif lldb.debugger:
main()

View File

@ -119,7 +119,7 @@ def main():
binary_code = binary_file.read()
binary_file.close()
# Apply constraints to the mutated input
# Assert that the binary size is within limits
if len(binary_code) > CODE_SIZE_MAX:
print("Binary code is too large (> {} bytes)".format(CODE_SIZE_MAX))
return

1
xgboost Submodule

Submodule xgboost added at 742c19f3ec