Compare commits

..

16 Commits

Author SHA1 Message Date
9287f45e3e change 2024-06-30 14:57:43 +02:00
b0839ffcaf remove debug 2024-06-30 14:56:23 +02:00
3f065ea70a score 2024-06-28 16:47:20 +02:00
d869913efa score output in afl-showmap 2024-06-02 16:51:37 +02:00
6a246516df more data 2024-05-25 11:07:26 +02:00
daab85f3f1 nit 2024-05-21 11:50:43 +02:00
97ed8c2877 fix leopard-v plus initial weights 2024-05-21 11:34:51 +02:00
dca144fbff leopard lto support, llvm19 fixes 2024-05-19 14:28:14 +02:00
ab9bd37b86 multi support 2024-05-16 15:54:55 +02:00
d4071b0fe4 debug 2024-05-16 14:40:53 +02:00
5a0a33e52a debug 2024-05-16 13:30:25 +02:00
c510ba6863 fixes and debug 2024-05-16 12:30:53 +02:00
bd4c9a5eab use score for weighting in exploit mode 2024-05-16 11:43:17 +02:00
f9e85817ad write score to map 2024-05-16 11:24:15 +02:00
8758be3630 add vuln complexity score 2024-05-15 18:03:07 +02:00
31a7ff2ba2 add loop analysis to CC 2024-05-15 15:28:03 +02:00
39 changed files with 950 additions and 603 deletions

View File

@ -2,9 +2,9 @@
<img align="right" src="https://raw.githubusercontent.com/AFLplusplus/Website/main/static/aflpp_bg.svg" alt="AFL++ logo" width="250" heigh="250">
Release version: [4.21c](https://github.com/AFLplusplus/AFLplusplus/releases)
Release version: [4.20c](https://github.com/AFLplusplus/AFLplusplus/releases)
GitHub version: 4.21c
GitHub version: 4.21a
Repository:
[https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus)

View File

@ -2,8 +2,6 @@
## Must
- fast restart of afl-fuzz if cmdline + target hash is the same
- check for null ptr for xml/curl/g_ string transform functions
- hardened_usercopy=0 page_alloc.shuffle=0
- add value_profile but only enable after 15 minutes without finds
- cmplog max items env?
@ -13,12 +11,12 @@
- afl-showmap -f support
- afl-fuzz multicore wrapper script
- when trimming then perform crash detection
- cyclomatic complexity: 2 + calls + edges - blocks
## Should
- afl-crash-analysis
- cmplog: add loop count resolving (byte -> loop cnt change, calc special values)
- support persistent and deferred fork server in afl-showmap?
- better autodetection of shifting runtime timeout values
- afl-plot to support multiple plot_data

View File

@ -1 +1 @@
95a6857
5ed4f8d

View File

@ -22,10 +22,10 @@ afl_state_t *afl_struct;
typedef struct my_mutator {
afl_state_t *afl;
u8 *mutator_buf;
u8 *out_dir;
u8 *tmp_dir;
u8 *target;
u8 * mutator_buf;
u8 * out_dir;
u8 * tmp_dir;
u8 * target;
uint32_t seed;
} my_mutator_t;
@ -101,7 +101,7 @@ my_mutator_t *afl_custom_init(afl_state_t *afl, unsigned int seed) {
/* When a new queue entry is added we run this input with the symcc
instrumented binary */
uint8_t afl_custom_queue_new_entry(my_mutator_t *data,
uint8_t afl_custom_queue_new_entry(my_mutator_t * data,
const uint8_t *filename_new_queue,
const uint8_t *filename_orig_queue) {
@ -176,7 +176,7 @@ uint8_t afl_custom_queue_new_entry(my_mutator_t *data,
struct dirent **nl;
int32_t items = scandir(data->tmp_dir, &nl, NULL, NULL);
u8 *origin_name = basename(filename_new_queue);
u8 * origin_name = basename(filename_new_queue);
int32_t i;
if (items > 0) {
@ -187,8 +187,8 @@ uint8_t afl_custom_queue_new_entry(my_mutator_t *data,
DBG("test=%s\n", fn);
if (stat(source_name, &st) == 0 && S_ISREG(st.st_mode) && st.st_size) {
u8 *destination_name = alloc_printf("%s/%s.%s", data->out_dir,
origin_name, nl[i]->d_name);
u8 *destination_name =
alloc_printf("%s/%s.%s", data->out_dir, origin_name, nl[i]->d_name);
rename(source_name, destination_name);
ck_free(destination_name);
DBG("found=%s\n", source_name);
@ -248,7 +248,7 @@ uint32_t afl_custom_fuzz_count(my_mutator_t *data, const u8 *buf,
for (i = 0; i < (u32)items; ++i) {
struct stat st;
u8 *fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name);
u8 * fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name);
DBG("test=%s\n", fn);
if (stat(fn, &st) == 0 && S_ISREG(st.st_mode) && st.st_size) {
@ -282,12 +282,12 @@ size_t afl_custom_fuzz(my_mutator_t *data, uint8_t *buf, size_t buf_size,
if (items <= 0) return 0;
for (i = 0; i < (s32)items; ++i) {
for (i = 0; i < (u32)items; ++i) {
if (!done) {
struct stat st;
u8 * fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name);
struct stat st;
u8 *fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name);
if (done == 0) {
if (stat(fn, &st) == 0 && S_ISREG(st.st_mode) && st.st_size) {
@ -306,10 +306,10 @@ size_t afl_custom_fuzz(my_mutator_t *data, uint8_t *buf, size_t buf_size,
}
unlink(fn);
ck_free(fn);
}
ck_free(fn);
free(nl[i]);
}

View File

@ -3,44 +3,24 @@
This is the list of all noteworthy changes made in every public
release of the tool. See README.md for the general instruction manual.
### Version ++4.21c (release)
### Version ++4.21a (dev)
* afl-fuzz
- fixed a regression in afl-fuzz that resulted in a 5-10% performace loss
do a switch from gettimeofday() to clock_gettime() which should be rather
three times faster. The reason for this is unknown.
- new queue selection algorithm based on 2 core years of queue data
analysis. gives a noticable improvement on coverage although the results
seem counterintuitive :-)
- added AFL_DISABLE_REDUNDANT for huge queues
- added `AFL_NO_SYNC` environment variable that does what you think it does
- fix AFL_PERSISTENT_RECORD
- run custom_post_process after standard trimming
- prevent filenames in the queue that have spaces
- minor fix for FAST schedules
- more frequent stats update when syncing (todo: check performance impact)
- now timing of calibration, trimming and syncing is measured seperately,
thanks to @eqv!
- -V timing is now accurately the fuzz time (without syncing), before
long calibration times and syncing could result in now fuzzing being
made when the time was already run out until then, thanks to @eqv!
- fix -n uninstrumented mode when ending fuzzing
- enhanced the ASAN configuration
- make afl-fuzz use less memory with cmplog and fix a memleak
* afl-cc:
- re-enable i386 support that was accidently disabled
- fixes for LTO and outdated afl-gcc mode for i386
- fixes for LTO and outdated afl-gcc mode
- fix COMPCOV split compare for old LLVMs
- disable xml/curl/g_ string transform functions because we do not check
for null pointers ... TODO
- ensure shared memory variables are visible in weird build setups
- compatability to new LLVM 19 changes
* afl-cmin
- work with input files that have a space
* afl-showmap
- fix memory leak on shmem testcase usage (thanks to @ndrewh)
- minor fix to collect coverage -C (thanks to @bet4it)
* Fixed a shmem mmap bug (that rarely came up on MacOS)
* libtokencap: script generate_libtoken_dict.sh added by @a-shvedov
* enhanced the ASAN configuration
### Version ++4.20c (release)
@ -76,13 +56,12 @@
- afl-whatsup:
- now also displays current average speed
- small bugfixes
- custom mutators:
- fixes for aflpp custom mutator and standalone tool
- important fix to the symcc custom mutator
- Fixes for aflpp custom mutator and standalone tool
- Minor edits to afl-persistent-config
- Prevent temporary files being left behind on aborted afl-whatsup
- More CPU benchmarks added to benchmark/
### Version ++4.10c (release)
- afl-fuzz:
- default power schedule is now EXPLORE, due a fix in fast schedules

View File

@ -588,9 +588,6 @@ checks or alter some of the more exotic semantics of the tool:
between fuzzing instances synchronization. Default sync time is 30 minutes,
note that time is halved for -M main nodes.
- `AFL_NO_SYNC` disables any syncing whatsoever and takes priority on all
other syncing parameters.
- Setting `AFL_TARGET_ENV` causes AFL++ to set extra environment variables for
the target binary. Example: `AFL_TARGET_ENV="VAR1=1 VAR2='a b c'" afl-fuzz
... `. This exists mostly for things like `LD_LIBRARY_PATH` but it would

View File

@ -49,23 +49,14 @@ void instrument_cache_init(void) {
if (setrlimit(RLIMIT_AS, &data_limit) != 0) {
FWARNF("Failed to setrlimit: %d, you may need root or CAP_SYS_RESOURCE",
errno);
FFATAL("Failed to setrlimit: %d", errno);
}
map_base =
gum_memory_allocate(NULL, instrument_cache_size, instrument_cache_size,
GUM_PAGE_READ | GUM_PAGE_WRITE);
if (map_base == MAP_FAILED) {
FFATAL(
"Failed to map segment: %d. This can be caused by failure to setrlimit."
"Disabling or reducing the size of the allocation using "
"AFL_FRIDA_INST_NO_CACHE or AFL_FRIDA_INST_CACHE_SIZE may help",
errno);
}
if (map_base == MAP_FAILED) { FFATAL("Failed to map segment: %d", errno); }
FOKF(cBLU "Instrumentation" cRST " - " cGRN "cache addr:" cYEL " [0x%016lX]",
GUM_ADDRESS(map_base));

View File

@ -194,23 +194,24 @@ static gboolean print_ranges_callback(const GumRangeDetails *details,
if (details->file == NULL) {
OKF("\t0x%016" G_GINT64_MODIFIER "x-0x%016" G_GINT64_MODIFIER "X %c%c%c",
details->range->base_address,
details->range->base_address + details->range->size,
details->protection & GUM_PAGE_READ ? 'R' : '-',
details->protection & GUM_PAGE_WRITE ? 'W' : '-',
details->protection & GUM_PAGE_EXECUTE ? 'X' : '-');
FVERBOSE("\t0x%016" G_GINT64_MODIFIER "x-0x%016" G_GINT64_MODIFIER
"X %c%c%c",
details->range->base_address,
details->range->base_address + details->range->size,
details->protection & GUM_PAGE_READ ? 'R' : '-',
details->protection & GUM_PAGE_WRITE ? 'W' : '-',
details->protection & GUM_PAGE_EXECUTE ? 'X' : '-');
} else {
OKF("\t0x%016" G_GINT64_MODIFIER "x-0x%016" G_GINT64_MODIFIER
"X %c%c%c %s(0x%016" G_GINT64_MODIFIER "x)",
details->range->base_address,
details->range->base_address + details->range->size,
details->protection & GUM_PAGE_READ ? 'R' : '-',
details->protection & GUM_PAGE_WRITE ? 'W' : '-',
details->protection & GUM_PAGE_EXECUTE ? 'X' : '-', details->file->path,
details->file->offset);
FVERBOSE("\t0x%016" G_GINT64_MODIFIER "x-0x%016" G_GINT64_MODIFIER
"X %c%c%c %s(0x%016" G_GINT64_MODIFIER "x)",
details->range->base_address,
details->range->base_address + details->range->size,
details->protection & GUM_PAGE_READ ? 'R' : '-',
details->protection & GUM_PAGE_WRITE ? 'W' : '-',
details->protection & GUM_PAGE_EXECUTE ? 'X' : '-',
details->file->path, details->file->offset);
}
@ -580,7 +581,7 @@ static GArray *merge_ranges(GArray *a) {
void ranges_print_debug_maps(void) {
OKF("Maps");
FVERBOSE("Maps");
gum_process_enumerate_ranges(GUM_PAGE_NO_ACCESS, print_ranges_callback, NULL);
}

View File

@ -139,10 +139,6 @@
#define AFL_RAND_RETURN u32
#endif
#ifndef INTERESTING_32_LEN
#error INTERESTING_32_LEN not defined - BUG!
#endif
extern s8 interesting_8[INTERESTING_8_LEN];
extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
extern s32
@ -204,6 +200,7 @@ struct queue_entry {
u8 *fname; /* File name for the test case */
u32 len; /* Input length */
u32 id; /* entry number in queue_buf */
u32 found;
u8 colorized, /* Do not run redqueen stage again */
cal_failed; /* Calibration failed? */
@ -255,6 +252,9 @@ struct queue_entry {
struct skipdet_entry *skipdet_e;
u32 score; /* complexity/vulnerability score */
u64 total_execs; /* total executes of this item */
};
struct extra_data {
@ -457,7 +457,7 @@ typedef struct afl_env_vars {
afl_no_startup_calibration, afl_no_warn_instability,
afl_post_process_keep_original, afl_crashing_seeds_as_new_crash,
afl_final_sync, afl_ignore_seed_problems, afl_disable_redundant,
afl_sha1_filenames, afl_no_sync;
afl_sha1_filenames;
u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path,
*afl_hang_tmout, *afl_forksrv_init_tmout, *afl_preload,
@ -656,7 +656,6 @@ typedef struct afl_state {
switch_fuzz_mode, /* auto or fixed fuzz mode */
calibration_time_us, /* Time spend on calibration */
sync_time_us, /* Time spend on sync */
cmplog_time_us, /* Time spend on cmplog */
trim_time_us; /* Time spend on trimming */
u32 slowest_exec_ms, /* Slowest testcase non hang in ms */
@ -837,6 +836,9 @@ typedef struct afl_state {
/* How often did we evict from the cache (for statistics only) */
u32 q_testcase_evictions;
/* current complexity/vulnerability score received */
u32 current_score;
/* Refs to each queue entry with cached testcase (for eviction, if cache_count
* is too large) */
struct queue_entry **q_testcase_cache;
@ -1227,7 +1229,6 @@ void show_init_stats(afl_state_t *);
void update_calibration_time(afl_state_t *afl, u64 *time);
void update_trim_time(afl_state_t *afl, u64 *time);
void update_sync_time(afl_state_t *afl, u64 *time);
void update_cmplog_time(afl_state_t *afl, u64 *time);
/* StatsD */
@ -1278,7 +1279,6 @@ void get_core_count(afl_state_t *);
void fix_up_sync(afl_state_t *);
void check_asan_opts(afl_state_t *);
void check_binary(afl_state_t *, u8 *);
u64 get_binary_hash(u8 *fn);
void check_if_tty(afl_state_t *);
void save_cmdline(afl_state_t *, u32, char **);
void read_foreign_testcases(afl_state_t *, int);

View File

@ -33,10 +33,6 @@
#define MUT_STRATEGY_ARRAY_SIZE 256
#ifndef INTERESTING_32
#error INTERESTING_32 is not defined - BUG!
#endif
s8 interesting_8[] = {INTERESTING_8};
s16 interesting_16[] = {INTERESTING_8, INTERESTING_16};
s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32};

View File

@ -26,7 +26,7 @@
/* Version string: */
// c = release, a = volatile github dev, e = experimental branch
#define VERSION "++4.21c"
#define VERSION "++4.21a"
/******************************************************
* *
@ -324,9 +324,9 @@
#define SYNC_INTERVAL 8
/* Sync time (minimum time between syncing in ms, time is halfed for -M main
nodes) - default is 20 minutes: */
nodes) - default is 30 minutes: */
#define SYNC_TIME (20 * 60 * 1000)
#define SYNC_TIME (30 * 60 * 1000)
/* Output directory reuse grace period (minutes): */

View File

@ -21,18 +21,20 @@ static char *afl_environment_variables[] = {
"AFL_BENCH_UNTIL_CRASH", "AFL_CAL_FAST", "AFL_CC", "AFL_CC_COMPILER",
"AFL_CMIN_ALLOW_ANY", "AFL_CMIN_CRASHES_ONLY", "AFL_CMPLOG_ONLY_NEW",
"AFL_CODE_END", "AFL_CODE_START", "AFL_COMPCOV_BINNAME",
"AFL_DUMP_CYCLOMATIC_COMPLEXITY", "AFL_CMPLOG_MAX_LEN", "AFL_COMPCOV_LEVEL",
"AFL_CRASH_EXITCODE", "AFL_CRASHING_SEEDS_AS_NEW_CRASH",
"AFL_CUSTOM_MUTATOR_LIBRARY", "AFL_CUSTOM_MUTATOR_ONLY",
"AFL_CUSTOM_INFO_PROGRAM", "AFL_CUSTOM_INFO_PROGRAM_ARGV",
"AFL_CUSTOM_INFO_PROGRAM_INPUT", "AFL_CUSTOM_INFO_OUT", "AFL_CXX",
"AFL_CYCLE_SCHEDULES", "AFL_DEBUG", "AFL_DEBUG_CHILD", "AFL_DEBUG_GDB",
"AFL_DEBUG_UNICORN", "AFL_DISABLE_REDUNDANT", "AFL_NO_REDUNDANT",
"AFL_DISABLE_TRIM", "AFL_NO_TRIM", "AFL_DISABLE_LLVM_INSTRUMENTATION",
"AFL_DONT_OPTIMIZE", "AFL_DRIVER_STDERR_DUPLICATE_FILENAME",
"AFL_DUMB_FORKSRV", "AFL_EARLY_FORKSERVER", "AFL_ENTRYPOINT",
"AFL_EXIT_WHEN_DONE", "AFL_EXIT_ON_TIME", "AFL_EXIT_ON_SEED_ISSUES",
"AFL_FAST_CAL", "AFL_FINAL_SYNC", "AFL_FORCE_UI", "AFL_FRIDA_DEBUG_MAPS",
"AFL_DUMP_QUEUE_ON_EXIT", "AFL_DUMP_CYCLOMATIC_COMPLEXITY",
"AFL_DUMP_VULNERABILITY_COMPLEXITY", "AFL_CMPLOG_MAX_LEN",
"AFL_COMPCOV_LEVEL", "AFL_CRASH_EXITCODE",
"AFL_CRASHING_SEEDS_AS_NEW_CRASH", "AFL_CUSTOM_MUTATOR_LIBRARY",
"AFL_CUSTOM_MUTATOR_ONLY", "AFL_CUSTOM_INFO_PROGRAM",
"AFL_CUSTOM_INFO_PROGRAM_ARGV", "AFL_CUSTOM_INFO_PROGRAM_INPUT",
"AFL_CUSTOM_INFO_OUT", "AFL_CXX", "AFL_CYCLE_SCHEDULES", "AFL_DEBUG",
"AFL_DEBUG_CHILD", "AFL_DEBUG_GDB", "AFL_DEBUG_UNICORN",
"AFL_DISABLE_REDUNDANT", "AFL_NO_REDUNDANT", "AFL_DISABLE_TRIM",
"AFL_NO_TRIM", "AFL_DISABLE_LLVM_INSTRUMENTATION", "AFL_DONT_OPTIMIZE",
"AFL_DRIVER_STDERR_DUPLICATE_FILENAME", "AFL_DUMB_FORKSRV",
"AFL_EARLY_FORKSERVER", "AFL_ENTRYPOINT", "AFL_EXIT_WHEN_DONE",
"AFL_EXIT_ON_TIME", "AFL_EXIT_ON_SEED_ISSUES", "AFL_FAST_CAL",
"AFL_FINAL_SYNC", "AFL_FORCE_UI", "AFL_FRIDA_DEBUG_MAPS",
"AFL_FRIDA_DRIVER_NO_HOOK", "AFL_FRIDA_EXCLUDE_RANGES",
"AFL_FRIDA_INST_CACHE_SIZE", "AFL_FRIDA_INST_COVERAGE_ABSOLUTE",
"AFL_FRIDA_INST_COVERAGE_FILE", "AFL_FRIDA_INST_DEBUG_FILE",
@ -81,13 +83,14 @@ static char *afl_environment_variables[] = {
"AFL_LLVM_MAP_DYNAMIC", "AFL_LLVM_NGRAM_SIZE", "AFL_NGRAM_SIZE",
"AFL_LLVM_NO_RPATH", "AFL_LLVM_NOT_ZERO", "AFL_LLVM_INSTRUMENT_FILE",
"AFL_LLVM_THREADSAFE_INST", "AFL_LLVM_SKIP_NEVERZERO", "AFL_NO_AFFINITY",
"AFL_TRY_AFFINITY", "AFL_LLVM_LTO_DONTWRITEID", "AFL_LLVM_LTO_SKIPINIT",
"AFL_LLVM_LTO_STARTID", "AFL_FUZZER_LOOPCOUNT", "AFL_NO_ARITH",
"AFL_NO_AUTODICT", "AFL_NO_BUILTIN",
"AFL_TRY_AFFINITY", "AFL_LLVM_LTO_DONTWRITEID",
"AFL_LLVM_LTO_SKIPINIT"
"AFL_LLVM_LTO_STARTID",
"AFL_FUZZER_LOOPCOUNT", "AFL_NO_ARITH", "AFL_NO_AUTODICT", "AFL_NO_BUILTIN",
#if defined USE_COLOR && !defined ALWAYS_COLORED
"AFL_NO_COLOR", "AFL_NO_COLOUR",
#endif
"AFL_NO_CPU_RED", "AFL_NO_SYNC",
"AFL_NO_CPU_RED",
"AFL_NO_CFG_FUZZING", // afl.rs rust crate option
"AFL_NO_CRASH_README", "AFL_NO_FORKSRV", "AFL_NO_UI", "AFL_NO_PYTHON",
"AFL_NO_STARTUP_CALIBRATION", "AFL_NO_WARN_INSTABILITY",

View File

@ -60,6 +60,8 @@
#include "llvm/Passes/PassPlugin.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "config.h"
#include "debug.h"
@ -172,6 +174,7 @@ SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) {
}
using LoopInfoCallback = function_ref<const LoopInfo *(Function &F)>;
using DomTreeCallback = function_ref<const DominatorTree *(Function &F)>;
using PostDomTreeCallback =
function_ref<const PostDominatorTree *(Function &F)>;
@ -187,13 +190,15 @@ class ModuleSanitizerCoverageLTO
}
bool instrumentModule(Module &M, DomTreeCallback DTCallback,
PostDomTreeCallback PDTCallback);
PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback);
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
private:
void instrumentFunction(Function &F, DomTreeCallback DTCallback,
PostDomTreeCallback PDTCallback);
PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback);
/* void InjectCoverageForIndirectCalls(Function &F,
ArrayRef<Instruction *>
IndirCalls);*/
@ -250,6 +255,7 @@ class ModuleSanitizerCoverageLTO
uint32_t afl_global_id = 0;
uint32_t unhandled = 0;
uint32_t select_cnt = 0;
uint32_t dump_cc = 0, dump_vc = 0;
uint32_t instrument_ctx = 0;
uint32_t instrument_ctx_max_depth = 0;
uint32_t extra_ctx_inst = 0;
@ -291,6 +297,7 @@ class ModuleSanitizerCoverageLTOLegacyPass : public ModulePass {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<PostDominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
}
@ -319,7 +326,15 @@ class ModuleSanitizerCoverageLTOLegacyPass : public ModulePass {
};
return ModuleSancov.instrumentModule(M, DTCallback, PDTCallback);
auto LoopCallback = [this](Function &F) -> const LoopInfo * {
return &this->getAnalysis<LoopInfoWrapperPass>(F).getLoopInfo();
};
ModuleSancov.instrumentModule(M, DTCallback, PDTCallback, LoopCallback);
return 1;
}
@ -372,15 +387,21 @@ PreservedAnalyses ModuleSanitizerCoverageLTO::run(Module &M,
};
if (ModuleSancov.instrumentModule(M, DTCallback, PDTCallback))
return PreservedAnalyses::none();
auto LoopCallback = [&FAM](Function &F) -> const LoopInfo * {
return PreservedAnalyses::all();
return &FAM.getResult<LoopAnalysis>(F);
};
ModuleSancov.instrumentModule(M, DTCallback, PDTCallback, LoopCallback);
return PreservedAnalyses::none();
}
bool ModuleSanitizerCoverageLTO::instrumentModule(
Module &M, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) {
Module &M, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback) {
if (Options.CoverageType == SanitizerCoverageOptions::SCK_None) return false;
/*
@ -474,6 +495,10 @@ bool ModuleSanitizerCoverageLTO::instrumentModule(
}
if (getenv("AFL_DUMP_CYCLOMATIC_COMPLEXITY")) { dump_cc = 1; }
if (getenv("AFL_DUMP_VULNERABILITY_COMPLEXITY")) { dump_vc = 1; }
skip_nozero = getenv("AFL_LLVM_SKIP_NEVERZERO");
use_threadsafe_counters = getenv("AFL_LLVM_THREADSAFE_INST");
@ -1057,7 +1082,7 @@ bool ModuleSanitizerCoverageLTO::instrumentModule(
// M.getOrInsertFunction(SanCovTracePCGuardName, VoidTy, Int32PtrTy);
for (auto &F : M)
instrumentFunction(F, DTCallback, PDTCallback);
instrumentFunction(F, DTCallback, PDTCallback, LCallback);
// AFL++ START
if (dFile.is_open()) dFile.close();
@ -1347,7 +1372,8 @@ Function *returnOnlyCaller(Function *F) {
}
void ModuleSanitizerCoverageLTO::instrumentFunction(
Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) {
Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback) {
if (F.empty()) return;
if (F.getName().find(".module_ctor") != std::string::npos)
@ -1421,6 +1447,7 @@ void ModuleSanitizerCoverageLTO::instrumentFunction(
const DominatorTree *DT = DTCallback(F);
const PostDominatorTree *PDT = PDTCallback(F);
const LoopInfo *LI = LCallback(F);
bool IsLeafFunc = true;
uint32_t skip_next = 0;
uint32_t call_counter = 0, call_depth = 0;
@ -1955,6 +1982,51 @@ void ModuleSanitizerCoverageLTO::instrumentFunction(
}
unsigned int score = 0;
if (dump_cc) { score += calcCyclomaticComplexity(&F, LI); }
if (dump_vc) { score += calcVulnerabilityScore(&F, LI, DT, PDT); }
if (score) {
BasicBlock::iterator IP = F.getEntryBlock().getFirstInsertionPt();
IRBuilder<> builder(&*IP);
// Access the int32 value at u8 offset 1 (unaligned access)
LoadInst *MapPtr =
builder.CreateLoad(PointerType::get(Int8Ty, 0), AFLMapPtr);
llvm::Value *CastToInt8Ptr =
builder.CreateBitCast(MapPtr, llvm::PointerType::get(Int8Ty, 0));
llvm::Value *Int32Ptr = builder.CreateGEP(
Int8Ty, CastToInt8Ptr, llvm::ConstantInt::get(Int32Ty, 1));
llvm::Value *CastToInt32Ptr =
builder.CreateBitCast(Int32Ptr, llvm::PointerType::get(Int32Ty, 0));
// Load the unaligned int32 value
llvm::LoadInst *Load = builder.CreateLoad(Int32Ty, CastToInt32Ptr);
Load->setAlignment(llvm::Align(1));
// Value to add
llvm::Value *ValueToAdd = llvm::ConstantInt::get(Int32Ty, score);
// Perform addition and check for wrap around
llvm::Value *Add =
builder.CreateAdd(Load, ValueToAdd, "addValue", true, true);
// Check if addition wrapped (unsigned)
llvm::Value *DidWrap = builder.CreateICmpULT(Add, Load, "didWrap");
// Select the maximum value if there was a wrap, otherwise use the result
llvm::Value *MaxInt32 = llvm::ConstantInt::get(Int32Ty, UINT32_MAX);
llvm::Value *Result =
builder.CreateSelect(DidWrap, MaxInt32, Add, "selectMaxOrResult");
// Store the result back at the same unaligned offset
llvm::StoreInst *Store = builder.CreateStore(Result, CastToInt32Ptr);
Store->setAlignment(llvm::Align(1));
}
InjectCoverage(F, BlocksToInstrument, IsLeafFunc);
// InjectCoverageForIndirectCalls(F, IndirCalls);

View File

@ -70,6 +70,8 @@
#endif
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "config.h"
#include "debug.h"
@ -119,6 +121,7 @@ SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) {
}
using LoopInfoCallback = function_ref<const LoopInfo *(Function &F)>;
using DomTreeCallback = function_ref<const DominatorTree *(Function &F)>;
using PostDomTreeCallback =
function_ref<const PostDominatorTree *(Function &F)>;
@ -135,11 +138,13 @@ class ModuleSanitizerCoverageAFL
PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
bool instrumentModule(Module &M, DomTreeCallback DTCallback,
PostDomTreeCallback PDTCallback);
PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback);
private:
void instrumentFunction(Function &F, DomTreeCallback DTCallback,
PostDomTreeCallback PDTCallback);
PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback);
void InjectTraceForCmp(Function &F, ArrayRef<Instruction *> CmpTraceTargets);
void InjectTraceForSwitch(Function &F,
ArrayRef<Instruction *> SwitchTraceTargets);
@ -195,7 +200,7 @@ class ModuleSanitizerCoverageAFL
SanitizerCoverageOptions Options;
uint32_t instr = 0, selects = 0, unhandled = 0, dump_cc = 0;
uint32_t instr = 0, selects = 0, unhandled = 0, dump_cc = 0, dump_vc = 0;
GlobalVariable *AFLMapPtr = NULL;
ConstantInt *One = NULL;
ConstantInt *Zero = NULL;
@ -233,8 +238,10 @@ PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M,
ModuleAnalysisManager &MAM) {
ModuleSanitizerCoverageAFL ModuleSancov(Options);
auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
auto DTCallback = [&FAM](Function &F) -> const DominatorTree *{
auto DTCallback = [&FAM](Function &F) -> const DominatorTree * {
return &FAM.getResult<DominatorTreeAnalysis>(F);
@ -246,9 +253,21 @@ PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M,
};
if (ModuleSancov.instrumentModule(M, DTCallback, PDTCallback))
auto LoopCallback = [&FAM](Function &F) -> const LoopInfo * {
return &FAM.getResult<LoopAnalysis>(F);
};
if (ModuleSancov.instrumentModule(M, DTCallback, PDTCallback, LoopCallback)) {
return PreservedAnalyses::none();
return PreservedAnalyses::all();
} else {
return PreservedAnalyses::all();
}
}
@ -324,7 +343,8 @@ Function *ModuleSanitizerCoverageAFL::CreateInitCallsForSections(
}
bool ModuleSanitizerCoverageAFL::instrumentModule(
Module &M, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) {
Module &M, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback) {
setvbuf(stdout, NULL, _IONBF, 0);
@ -332,6 +352,8 @@ bool ModuleSanitizerCoverageAFL::instrumentModule(
if (getenv("AFL_DUMP_CYCLOMATIC_COMPLEXITY")) { dump_cc = 1; }
if (getenv("AFL_DUMP_VULNERABILITY_COMPLEXITY")) { dump_vc = 1; }
if ((isatty(2) && !getenv("AFL_QUIET")) || debug) {
SAYF(cCYA "SanitizerCoveragePCGUARD" VERSION cRST "\n");
@ -429,7 +451,7 @@ bool ModuleSanitizerCoverageAFL::instrumentModule(
M.getOrInsertFunction(SanCovTracePCGuardName, VoidTy, Int32PtrTy);
for (auto &F : M)
instrumentFunction(F, DTCallback, PDTCallback);
instrumentFunction(F, DTCallback, PDTCallback, LCallback);
Function *Ctor = nullptr;
@ -568,7 +590,8 @@ static bool IsInterestingCmp(ICmpInst *CMP, const DominatorTree *DT,
#endif
void ModuleSanitizerCoverageAFL::instrumentFunction(
Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) {
Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback,
LoopInfoCallback LCallback) {
if (F.empty()) return;
if (!isInInstrumentList(&F, FMNAME)) return;
@ -604,6 +627,7 @@ void ModuleSanitizerCoverageAFL::instrumentFunction(
const DominatorTree *DT = DTCallback(F);
const PostDominatorTree *PDT = PDTCallback(F);
const LoopInfo *LI = LCallback(F);
bool IsLeafFunc = true;
for (auto &BB : F) {
@ -636,12 +660,55 @@ void ModuleSanitizerCoverageAFL::instrumentFunction(
}
unsigned int score = 0;
if (dump_cc) { score += calcCyclomaticComplexity(&F, LI); }
if (dump_vc) { score += calcVulnerabilityScore(&F, LI, DT, PDT); }
if (score) {
BasicBlock::iterator IP = F.getEntryBlock().getFirstInsertionPt();
IRBuilder<> builder(&*IP);
// Access the int32 value at u8 offset 1 (unaligned access)
LoadInst *MapPtr =
builder.CreateLoad(PointerType::get(Int8Ty, 0), AFLMapPtr);
llvm::Value *CastToInt8Ptr =
builder.CreateBitCast(MapPtr, llvm::PointerType::get(Int8Ty, 0));
llvm::Value *Int32Ptr = builder.CreateGEP(
Int8Ty, CastToInt8Ptr, llvm::ConstantInt::get(Int32Ty, 1));
llvm::Value *CastToInt32Ptr =
builder.CreateBitCast(Int32Ptr, llvm::PointerType::get(Int32Ty, 0));
// Load the unaligned int32 value
llvm::LoadInst *Load = builder.CreateLoad(Int32Ty, CastToInt32Ptr);
Load->setAlignment(llvm::Align(1));
// Value to add
llvm::Value *ValueToAdd = llvm::ConstantInt::get(Int32Ty, score);
// Perform addition and check for wrap around
llvm::Value *Add =
builder.CreateAdd(Load, ValueToAdd, "addValue", true, true);
// Check if addition wrapped (unsigned)
llvm::Value *DidWrap = builder.CreateICmpULT(Add, Load, "didWrap");
// Select the maximum value if there was a wrap, otherwise use the result
llvm::Value *MaxInt32 = llvm::ConstantInt::get(Int32Ty, UINT32_MAX);
llvm::Value *Result =
builder.CreateSelect(DidWrap, MaxInt32, Add, "selectMaxOrResult");
// Store the result back at the same unaligned offset
llvm::StoreInst *Store = builder.CreateStore(Result, CastToInt32Ptr);
Store->setAlignment(llvm::Align(1));
}
InjectCoverage(F, BlocksToInstrument, IsLeafFunc);
// InjectTraceForCmp(F, CmpTraceTargets);
// InjectTraceForSwitch(F, SwitchTraceTargets);
if (dump_cc) { calcCyclomaticComplexity(&F); }
}
GlobalVariable *ModuleSanitizerCoverageAFL::CreateFunctionLocalArrayInSection(

View File

@ -14,7 +14,21 @@
#include <fstream>
#include <cmath>
#include <llvm/Support/raw_ostream.h>
#if LLVM_VERSION_MAJOR >= 13
#include "llvm/Support/raw_ostream.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Dominators.h"
#include "llvm/Analysis/PostDominators.h"
#endif
// #define LEOPARD_USE_WEIGHTS 1
#define IS_EXTERN extern
#include "afl-llvm-common.h"
@ -26,11 +40,79 @@ static std::list<std::string> allowListFunctions;
static std::list<std::string> denyListFiles;
static std::list<std::string> denyListFunctions;
unsigned int calcCyclomaticComplexity(llvm::Function *F) {
#if LLVM_VERSION_MAJOR >= 13
// Leopard complexity calculations
#ifndef LEOPARD_USE_WEIGHTS
#define C1_WEIGHT 1.0
#define C2_WEIGHT 1.0
#define C3_WEIGHT 1.0
#define C4_WEIGHT 1.0
#define V1_WEIGHT 1.0
#define V2_WEIGHT 1.0
#define V3_WEIGHT 1.0
#define V4_WEIGHT 1.0
#define V5_WEIGHT 1.0
#define V6_WEIGHT 1.0
#define V7_WEIGHT 1.0
#define V8_WEIGHT 1.0
#define V9_WEIGHT 1.0
#define V10_WEIGHT 1.0
#define V11_WEIGHT 1.0
#else
// Cyclomatic weights
#define C1_WEIGHT 1.0
#define C2_WEIGHT 1.0
#define C3_WEIGHT 1.0
#define C4_WEIGHT 1.0
// Vulnerability weights
#define V1_WEIGHT 1.5
#define V2_WEIGHT 3.25
#define V3_WEIGHT 4.25
#define V4_WEIGHT 3.0
#define V5_WEIGHT 4.25
#define V6_WEIGHT 7.75
#define V7_WEIGHT 2.5
#define V8_WEIGHT 2.5
#define V9_WEIGHT 4.0
#define V10_WEIGHT 5.25
#define V11_WEIGHT 3.5
#endif
static void countNestedLoops(Loop *L, int depth, unsigned int &loopCount,
unsigned int &nestedLoopCount,
unsigned int &maxNestingLevel) {
loopCount++;
if (!L->getSubLoops().empty()) {
// Increment nested loop count by the number of sub-loops
nestedLoopCount += L->getSubLoops().size();
// Update maximum nesting level
if (depth > maxNestingLevel) { maxNestingLevel = depth; }
// Recursively count sub-loops
for (Loop *SubLoop : L->getSubLoops()) {
countNestedLoops(SubLoop, depth + 1, loopCount, nestedLoopCount,
maxNestingLevel);
}
}
}
unsigned int calcCyclomaticComplexity(llvm::Function *F,
const llvm::LoopInfo *LI) {
unsigned int numBlocks = 0;
unsigned int numEdges = 0;
unsigned int numCalls = 0;
unsigned int numLoops = 0;
unsigned int numNestedLoops = 0;
unsigned int maxLoopNesting = 0;
// Iterate through each basic block in the function
for (BasicBlock &BB : *F) {
@ -55,22 +137,197 @@ unsigned int calcCyclomaticComplexity(llvm::Function *F) {
}
for (Loop *L : *LI) {
countNestedLoops(L, 1, numLoops, numNestedLoops, maxLoopNesting);
}
// Cyclomatic Complexity V(G) = E - N + 2P
// For a single function, P (number of connected components) is 1
// Calls are considered to be an edge
unsigned int CC = 2 + numCalls + numEdges - numBlocks;
unsigned int cc =
(unsigned int)(C1_WEIGHT * (double)(2 + numCalls + numEdges - numBlocks) +
C2_WEIGHT * (double)numLoops +
C3_WEIGHT * (double)numNestedLoops +
C4_WEIGHT * (double)maxLoopNesting);
// if (debug) {
fprintf(stderr, "CyclomaticComplexity for %s: %u\n",
F->getName().str().c_str(), CC);
fprintf(stderr,
"CyclomaticComplexity for %s: %u (calls=%u edges=%u blocks=%u "
"loops=%u nested_loops=%u max_loop_nesting_level=%u)\n",
F->getName().str().c_str(), cc, numCalls, numEdges, numBlocks,
numLoops, numNestedLoops, maxLoopNesting);
//}
return CC;
return cc;
}
unsigned int calcVulnerabilityScore(llvm::Function *F, const llvm::LoopInfo *LI,
const llvm::DominatorTree *DT,
const llvm::PostDominatorTree *PDT) {
unsigned int score = 0;
// V1 and V2
unsigned paramCount = F->arg_size();
unsigned calledParamCount = 0;
// V3, V4 and V5
unsigned pointerArithCount = 0;
unsigned totalPointerArithParams = 0;
unsigned maxPointerArithVars = 0;
// V6 to V11
unsigned nestedControlStructCount = 0;
unsigned maxNestingLevel = 0;
unsigned maxControlDependentControls = 0;
unsigned maxDataDependentControls = 0;
unsigned ifWithoutElseCount = 0;
unsigned controlPredicateVarCount = 0;
std::function<void(Loop *, unsigned)> countNestedLoops = [&](Loop *L,
unsigned depth) {
nestedControlStructCount++;
if (depth > maxNestingLevel) { maxNestingLevel = depth; }
for (Loop *SubLoop : L->getSubLoops()) {
countNestedLoops(SubLoop, depth + 1);
}
};
for (Loop *TopLoop : *LI) {
countNestedLoops(TopLoop, 1);
}
for (inst_iterator I = inst_begin(*F), E = inst_end(*F); I != E; ++I) {
if (CallInst *CI = dyn_cast<CallInst>(&*I)) {
if (Function *CalledF = CI->getCalledFunction()) {
calledParamCount += CalledF->arg_size();
}
}
if (auto *GEP = dyn_cast<GetElementPtrInst>(&*I)) {
pointerArithCount++;
unsigned numPointerArithVars = GEP->getNumOperands();
totalPointerArithParams += numPointerArithVars;
if (numPointerArithVars > maxPointerArithVars) {
maxPointerArithVars = numPointerArithVars;
}
}
if (BranchInst *BI = dyn_cast<BranchInst>(&*I)) {
if (BI->isConditional()) {
unsigned controlDependentCount = 0;
unsigned dataDependentCount = 0;
for (Use &U : BI->operands()) {
if (Instruction *Op = dyn_cast<Instruction>(U.get())) {
if (DT->dominates(Op, &*I)) { controlDependentCount++; }
if (PDT->dominates(Op, &*I)) { dataDependentCount++; }
}
}
if (controlDependentCount > maxControlDependentControls) {
maxControlDependentControls = controlDependentCount;
}
if (dataDependentCount > maxDataDependentControls) {
maxDataDependentControls = dataDependentCount;
}
// Check for if() without else
BasicBlock *TrueBB = BI->getSuccessor(0);
BasicBlock *FalseBB = BI->getSuccessor(1);
if (TrueBB && FalseBB) {
if (TrueBB->getSinglePredecessor() == &*I->getParent() &&
FalseBB->empty()) {
ifWithoutElseCount++;
}
}
// Count variables involved in control predicates
if (ICmpInst *ICmp = dyn_cast<ICmpInst>(BI->getCondition())) {
controlPredicateVarCount += ICmp->getNumOperands();
} else if (BinaryOperator *BinOp =
dyn_cast<BinaryOperator>(BI->getCondition())) {
controlPredicateVarCount += BinOp->getNumOperands();
} else if (SelectInst *Select =
dyn_cast<SelectInst>(BI->getCondition())) {
controlPredicateVarCount += Select->getNumOperands();
}
}
}
}
score = (unsigned int)(V1_WEIGHT * (double)paramCount +
V2_WEIGHT * (double)calledParamCount +
V3_WEIGHT * (double)pointerArithCount +
V4_WEIGHT * (double)totalPointerArithParams +
V5_WEIGHT * (double)maxPointerArithVars +
V6_WEIGHT * (double)nestedControlStructCount +
V7_WEIGHT * (double)maxNestingLevel +
V8_WEIGHT * (double)maxControlDependentControls +
V9_WEIGHT * (double)maxDataDependentControls +
V10_WEIGHT * (double)ifWithoutElseCount +
V11_WEIGHT * (double)controlPredicateVarCount);
fprintf(stderr,
"VulnerabilityScore for %s: %u (paramCount=%u "
"calledParamCount=%u|pointerArithCount=%u totalPointerArithParams=%u "
"maxPointerArithVars=%u|maxNestingLevel=%u "
"maxControlDependentControls=%u maxDataDependentControls=%u "
"ifWithoutElseCount=%u controlPredicateVarCount=%u)\n",
F->getName().str().c_str(), score, paramCount, calledParamCount,
pointerArithCount, totalPointerArithParams, maxPointerArithVars,
maxNestingLevel, maxControlDependentControls,
maxDataDependentControls, ifWithoutElseCount,
controlPredicateVarCount);
return score;
}
#endif
char *getBBName(const llvm::BasicBlock *BB) {
static char *name;

View File

@ -12,6 +12,7 @@
#include <sys/time.h>
#include "llvm/Config/llvm-config.h"
#if LLVM_VERSION_MAJOR == 3 && LLVM_VERSION_MINOR < 5
typedef long double max_align_t;
#endif
@ -26,6 +27,19 @@ typedef long double max_align_t;
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#endif
#if LLVM_VERSION_MAJOR > 12
#include "llvm/Support/raw_ostream.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/IR/Function.h"
#include "llvm/Pass.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Dominators.h"
#include "llvm/Analysis/PostDominators.h"
#endif
#if LLVM_VERSION_MAJOR > 3 || \
(LLVM_VERSION_MAJOR == 3 && LLVM_VERSION_MINOR > 4)
#include "llvm/IR/DebugInfo.h"
@ -55,7 +69,11 @@ void initInstrumentList();
bool isInInstrumentList(llvm::Function *F, std::string Filename);
unsigned long long int calculateCollisions(uint32_t edges);
void scanForDangerousFunctions(llvm::Module *M);
unsigned int calcCyclomaticComplexity(llvm::Function *F);
unsigned int calcCyclomaticComplexity(llvm::Function *F,
const llvm::LoopInfo *LI);
unsigned int calcVulnerabilityScore(llvm::Function *F, const llvm::LoopInfo *LI,
const llvm::DominatorTree *DT,
const llvm::PostDominatorTree *PDT);
#ifndef IS_EXTERN
#define IS_EXTERN

View File

@ -54,15 +54,15 @@
#define nullptr 0
#endif
#include <set>
#include "afl-llvm-common.h"
#if LLVM_MAJOR >= 19
#define STARTSWITH starts_with
#else
#define STARTSWITH startswith
#endif
#include <set>
#include "afl-llvm-common.h"
using namespace llvm;
namespace {

View File

@ -202,8 +202,6 @@ QEMU_CONF_FLAGS=" \
--disable-xfsctl \
--target-list="${CPU_TARGET}-linux-user" \
--without-default-devices \
--extra-cflags=-Wno-int-conversion \
--disable-werror \
"
if [ -n "${CROSS_PREFIX}" ]; then
@ -245,6 +243,7 @@ if [ "$DEBUG" = "1" ]; then
--enable-debug-stack-usage \
--enable-debug-tcg \
--enable-qom-cast-debug \
--enable-werror \
"
else
@ -255,6 +254,7 @@ else
--disable-debug-tcg \
--disable-qom-cast-debug \
--disable-stack-protector \
--disable-werror \
--disable-docs \
"

View File

@ -2366,7 +2366,8 @@ static void add_aflpplib(aflcc_state_t *aflcc) {
insert_param(aflcc, afllib);
#ifdef __APPLE__
insert_param(aflcc, "-Wl,-undefined,dynamic_lookup");
insert_param(aflcc, "-Wl,-undefined");
insert_param(aflcc, "dynamic_lookup");
#endif
}
@ -2793,11 +2794,11 @@ static void maybe_usage(aflcc_state_t *aflcc, int argc, char **argv) {
"MODES: NCC PERSIST DICT LAF "
"CMPLOG SELECT\n"
" [LLVM] LLVM: %s%s\n"
" PCGUARD %s yes yes module yes yes "
" PCGUARD %s yes yes module yes yes "
"yes\n"
" NATIVE AVAILABLE no yes no no "
"part. yes\n"
" CLASSIC %s no yes module yes yes "
" CLASSIC %s no yes module yes yes "
"yes\n"
" - NORMAL\n"
" - CALLER\n"
@ -2814,10 +2815,10 @@ static void maybe_usage(aflcc_state_t *aflcc, int argc, char **argv) {
" [GCC/CLANG] simple gcc/clang: %s%s\n"
" CLASSIC DEFAULT no no no no no "
"no\n\n",
aflcc->have_llvm ? "AVAILABLE " : "unavailable!",
aflcc->have_llvm ? "AVAILABLE" : "unavailable!",
aflcc->compiler_mode == LLVM ? " [SELECTED]" : "",
aflcc->have_llvm ? "AVAILABLE " : "unavailable!",
aflcc->have_llvm ? "AVAILABLE " : "unavailable!",
aflcc->have_llvm ? "AVAILABLE" : "unavailable!",
aflcc->have_llvm ? "AVAILABLE" : "unavailable!",
aflcc->have_lto ? "AVAILABLE" : "unavailable!",
aflcc->compiler_mode == LTO ? " [SELECTED]" : "",
aflcc->have_gcc_plugin ? "AVAILABLE" : "unavailable!",
@ -2843,7 +2844,7 @@ static void maybe_usage(aflcc_state_t *aflcc, int argc, char **argv) {
" The best is LTO but it often needs RANLIB and AR settings outside "
"of afl-cc.\n\n");
#if LLVM_MAJOR >= 11 || (LLVM_MAJOR == 10 && LLVM_MINOR > 0)
#if LLVM_MAJOR > 10 || (LLVM_MAJOR == 10 && LLVM_MINOR > 0)
#define NATIVE_MSG \
" LLVM-NATIVE: use llvm's native PCGUARD instrumentation (less " \
"performant)\n"

View File

@ -59,6 +59,27 @@ u8 last_intr = 0;
#define AFL_PATH "/usr/local/lib/afl/"
#endif
/* - Some BSD (i.e.: FreeBSD) offer the FAST clock source as
* equivalent to Linux COARSE clock source. Aliasing COARSE to
* FAST on such systems when COARSE is not already defined.
* - macOS has no support of CLOCK_MONOTONIC_COARSE clock type.
*/
#if defined(OS_DARWIN) || defined(OS_SUNOS) || defined(__APPLE__) || \
defined(__sun) || defined(__NetBSD__)
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
#elif defined(OS_FREEBSD)
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST
#endif
/* Convert seconds to milliseconds. */
#define SEC_TO_MS(sec) ((sec) * 1000)
/* Convert seconds to microseconds. */
#define SEC_TO_US(sec) ((sec) * 1000000)
/* Convert nanoseconds to milliseconds. */
#define NS_TO_MS(ns) ((ns) / 1000000)
/* Convert nanoseconds to microseconds. */
#define NS_TO_US(ns) ((ns) / 1000)
void *afl_memmem(const void *haystack, size_t haystacklen, const void *needle,
size_t needlelen) {
@ -976,25 +997,33 @@ void read_bitmap(u8 *fname, u8 *map, size_t len) {
inline u64 get_cur_time(void) {
struct timeval tv;
struct timezone tz;
struct timespec ts;
int rc = clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
if (rc == -1) {
gettimeofday(&tv, &tz);
PFATAL("Failed to obtain timestamp (errno = %i: %s)\n", errno,
strerror(errno));
return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000);
}
return SEC_TO_MS((uint64_t)ts.tv_sec) + NS_TO_MS((uint64_t)ts.tv_nsec);
}
/* Get unix time in microseconds */
inline u64 get_cur_time_us(void) {
u64 get_cur_time_us(void) {
struct timeval tv;
struct timezone tz;
struct timespec ts;
int rc = clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
if (rc == -1) {
gettimeofday(&tv, &tz);
PFATAL("Failed to obtain timestamp (errno = %i: %s)\n", errno,
strerror(errno));
return (tv.tv_sec * 1000000ULL) + tv.tv_usec;
}
return SEC_TO_US((uint64_t)ts.tv_sec) + NS_TO_US((uint64_t)ts.tv_nsec);
}

View File

@ -1655,8 +1655,7 @@ void afl_fsrv_kill(afl_forkserver_t *fsrv) {
if (fsrv->fsrv_pid > 0) {
kill(fsrv->fsrv_pid, fsrv->fsrv_kill_signal);
usleep(25);
waitpid(fsrv->fsrv_pid, NULL, WNOHANG);
waitpid(fsrv->fsrv_pid, NULL, 0);
}

View File

@ -481,6 +481,14 @@ save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
s32 fd;
u64 cksum = 0;
// will be classified away otherwise
if (unlikely((afl->current_score = *(u32 *)((u8 *)afl->fsrv.trace_bits + 1)) >
0)) {
memset(afl->fsrv.trace_bits + 1, 0, 4);
}
/* Update path frequency. */
/* Generating a hash on every input is super expensive. Bad idea and should

View File

@ -60,11 +60,80 @@ inline u32 select_next_queue_entry(afl_state_t *afl) {
}
//#define DEBUG_QUEUE 1
double compute_weight(afl_state_t *afl, struct queue_entry *q,
double avg_exec_us, double avg_bitmap_size,
double avg_top_size, double avg_score) {
if (unlikely(0)) {
return q->score / avg_score;
} else {
double weight = 1.0;
if (likely(afl->schedule >= FAST && afl->schedule <= RARE)) {
u32 hits = afl->n_fuzz[q->n_fuzz_entry];
if (likely(hits)) { weight /= (log10(hits) + 1); }
}
#ifdef DEBUG_QUEUE
fprintf(stderr, "WEIGHT id=%u fname=%s start_weight=1.0\n", q->id,
q->fname);
fprintf(stderr, " after step 1: %.2f (log10(hits))\n", weight);
#endif
if (likely(afl->schedule < RARE)) { weight *= (avg_exec_us / q->exec_us); }
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 2: %.2f (exec_us)\n", weight);
#endif
weight *= (log(q->bitmap_size) / avg_bitmap_size);
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 3: %.2f (log(bitmap_size))\n", weight);
#endif
weight *= (1 + (q->tc_ref / avg_top_size));
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 4: %.2f (top_size)\n", weight);
#endif
if (unlikely(avg_score != 0.0)) { weight *= (q->score / avg_score); }
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 5: %.2f (score)\n", weight);
#endif
if (unlikely(weight < 0.1)) { weight = 0.1; }
if (unlikely(q->favored)) {
weight += 1;
weight *= 5;
}
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 6: %.2f (favored)\n", weight);
#endif
if (unlikely(!q->was_fuzzed)) { weight *= 2.5; }
#ifdef DEBUG_QUEUE
fprintf(stderr, " after step 7: %.2f (was_fuzzed)\n", weight);
#endif
if (unlikely(q->fs_redundant)) { weight *= 0.75; }
#ifdef DEBUG_QUEUE
fprintf(stderr, " after final step: %.2f (fs_redundant)\n", weight);
#endif
return weight;
}
}
/* create the alias table that allows weighted random selection - expensive */
void create_alias_table(afl_state_t *afl) {
u32 n = afl->queued_items, i = 0, nSmall = 0, nLarge = n - 1;
u32 n = afl->queued_items, i = 0, nSmall = 0, nLarge = n - 1,
exploit = afl->fuzz_mode;
double sum = 0;
double *P = (double *)afl_realloc(AFL_BUF_PARAM(out), n * sizeof(double));
@ -91,7 +160,8 @@ void create_alias_table(afl_state_t *afl) {
double avg_exec_us = 0.0;
double avg_bitmap_size = 0.0;
double avg_len = 0.0;
double avg_top_size = 0.0;
double avg_score = 0.0;
u32 active = 0;
for (i = 0; i < n; i++) {
@ -103,7 +173,8 @@ void create_alias_table(afl_state_t *afl) {
avg_exec_us += q->exec_us;
avg_bitmap_size += log(q->bitmap_size);
avg_len += q->len;
avg_top_size += q->tc_ref;
if (exploit) { avg_score += /*log(*/ q->score /*)*/; }
++active;
}
@ -112,7 +183,9 @@ void create_alias_table(afl_state_t *afl) {
avg_exec_us /= active;
avg_bitmap_size /= active;
avg_len /= active;
avg_top_size /= active;
if (exploit) { avg_score /= active; }
for (i = 0; i < n; i++) {
@ -120,59 +193,8 @@ void create_alias_table(afl_state_t *afl) {
if (likely(!q->disabled)) {
double weight = 1.0;
{ // inline does result in a compile error with LTO, weird
if (likely(afl->schedule >= FAST && afl->schedule <= RARE)) {
u32 hits = afl->n_fuzz[q->n_fuzz_entry];
if (likely(hits)) { weight /= (log10(hits) + 1); }
}
if (likely(afl->schedule < RARE)) {
double t = q->exec_us / avg_exec_us;
if (likely(t < 0.1)) {
// nothing
} else if (likely(t <= 0.25))
weight *= 0.9;
else if (likely(t <= 0.5)) {
// nothing
} else if (likely(t < 1.0))
weight *= 1.15;
else if (unlikely(t > 2.5 && t < 5.0))
weight *= 1.1;
// else nothing
}
double l = q->len / avg_len;
if (likely(l < 0.1))
weight *= 0.75;
else if (likely(l < 0.25))
weight *= 1.1;
else if (unlikely(l >= 10))
weight *= 1.1;
double bms = q->bitmap_size / avg_bitmap_size;
if (likely(bms < 0.5))
weight *= (1.0 + ((bms - 0.5) / 2));
else if (unlikely(bms > 1.33))
weight *= 1.1;
if (unlikely(!q->was_fuzzed)) { weight *= 2.5; }
if (unlikely(q->fs_redundant)) { weight *= 0.75; }
}
q->weight = weight;
q->weight = compute_weight(afl, q, avg_exec_us, avg_bitmap_size,
avg_top_size, avg_score);
q->perf_score = calculate_score(afl, q);
sum += q->weight;
@ -621,8 +643,8 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) {
q->trace_mini = NULL;
q->testcase_buf = NULL;
q->mother = afl->queue_cur;
q->weight = 1.0;
q->perf_score = 100;
q->score = afl->current_score;
if (unlikely(!q->score)) { q->score = 1; }
#ifdef INTROSPECTION
q->bitsmap_size = afl->bitsmap_size;
@ -1228,11 +1250,9 @@ inline void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q,
u32 len = q->len;
// only realloc if necessary or useful
// (a custom trim can make the testcase larger)
if (unlikely(len > old_len || len < old_len + 1024)) {
if (len != old_len) {
afl->q_testcase_cache_size += len - old_len;
afl->q_testcase_cache_size = afl->q_testcase_cache_size + len - old_len;
q->testcase_buf = (u8 *)realloc(q->testcase_buf, len);
if (unlikely(!q->testcase_buf)) {
@ -1261,48 +1281,41 @@ inline void queue_testcase_retake_mem(afl_state_t *afl, struct queue_entry *q,
if (likely(q->testcase_buf)) {
if (likely(in != q->testcase_buf)) {
u32 is_same = in == q->testcase_buf;
// only realloc if we save memory
if (unlikely(len < old_len + 1024)) {
if (likely(len != old_len)) {
u8 *ptr = (u8 *)realloc(q->testcase_buf, len);
u8 *ptr = (u8 *)realloc(q->testcase_buf, len);
if (likely(ptr)) {
if (likely(ptr)) {
q->testcase_buf = ptr;
afl->q_testcase_cache_size += len - old_len;
}
q->testcase_buf = ptr;
afl->q_testcase_cache_size = afl->q_testcase_cache_size + len - old_len;
}
memcpy(q->testcase_buf, in, len);
}
if (unlikely(!is_same)) { memcpy(q->testcase_buf, in, len); }
}
}
/* Returns the testcase buf from the file behind this queue entry.
Increases the refcount. */
Increases the refcount. */
inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
if (likely(q->testcase_buf)) { return q->testcase_buf; }
u32 len = q->len;
u32 len = q->len;
double weight = q->weight;
/* first handle if no testcase cache is configured */
// first handle if no testcase cache is configured, or if the
// weighting of the testcase is below average.
if (unlikely(weight < 1.0 || !afl->q_testcase_max_cache_size)) {
if (unlikely(!afl->q_testcase_max_cache_size)) {
u8 *buf;
if (likely(q == afl->queue_cur)) {
if (unlikely(q == afl->queue_cur)) {
buf = (u8 *)afl_realloc((void **)&afl->testcase_buf, len);
@ -1328,113 +1341,117 @@ inline u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q) {
}
/* now handle the testcase cache and we know it is an interesting one */
/* Buf not cached, let's load it */
u32 tid = afl->q_testcase_max_cache_count;
static u32 do_once = 0; // because even threaded we would want this. WIP
while (unlikely(
(afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size &&
afl->q_testcase_cache_count > 1) ||
afl->q_testcase_cache_count >= afl->q_testcase_max_cache_entries - 1)) {
/* We want a max number of entries to the cache that we learn.
Very simple: once the cache is filled by size - that is the max. */
if (unlikely(
afl->q_testcase_cache_size + len >=
afl->q_testcase_max_cache_size &&
(afl->q_testcase_cache_count < afl->q_testcase_max_cache_entries &&
afl->q_testcase_max_cache_count <
afl->q_testcase_max_cache_entries) &&
!do_once)) {
if (afl->q_testcase_max_cache_count > afl->q_testcase_cache_count) {
afl->q_testcase_max_cache_entries = afl->q_testcase_max_cache_count + 1;
} else {
afl->q_testcase_max_cache_entries = afl->q_testcase_cache_count + 1;
}
do_once = 1;
// release unneeded memory
afl->q_testcase_cache = (struct queue_entry **)ck_realloc(
afl->q_testcase_cache,
(afl->q_testcase_max_cache_entries + 1) * sizeof(size_t));
}
/* Cache full. We neet to evict one or more to map one.
Get a random one which is not in use */
do {
// if the cache (MB) is not enough for the queue then this gets
// undesirable because q_testcase_max_cache_count grows sometimes
// although the number of items in the cache will not change hence
// more and more loops
tid = rand_below(afl, afl->q_testcase_max_cache_count);
} while (afl->q_testcase_cache[tid] == NULL ||
afl->q_testcase_cache[tid] == afl->queue_cur);
struct queue_entry *old_cached = afl->q_testcase_cache[tid];
free(old_cached->testcase_buf);
old_cached->testcase_buf = NULL;
afl->q_testcase_cache_size -= old_cached->len;
afl->q_testcase_cache[tid] = NULL;
--afl->q_testcase_cache_count;
++afl->q_testcase_evictions;
if (tid < afl->q_testcase_smallest_free)
afl->q_testcase_smallest_free = tid;
}
if (unlikely(tid >= afl->q_testcase_max_cache_entries)) {
// uh we were full, so now we have to search from start
tid = afl->q_testcase_smallest_free;
}
// we need this while loop in case there were ever previous evictions but
// not in this call.
while (unlikely(afl->q_testcase_cache[tid] != NULL))
++tid;
/* Map the test case into memory. */
int fd = open((char *)q->fname, O_RDONLY);
if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", (char *)q->fname); }
q->testcase_buf = (u8 *)malloc(len);
/* now handle the testcase cache */
if (unlikely(!q->testcase_buf)) {
PFATAL("Unable to malloc '%s' with len %u", (char *)q->fname, len);
/* Buf not cached, let's load it */
u32 tid = afl->q_testcase_max_cache_count;
static u32 do_once = 0; // because even threaded we would want this. WIP
}
while (unlikely(
afl->q_testcase_cache_size + len >= afl->q_testcase_max_cache_size ||
afl->q_testcase_cache_count >= afl->q_testcase_max_cache_entries - 1)) {
ck_read(fd, q->testcase_buf, len, q->fname);
close(fd);
/* We want a max number of entries to the cache that we learn.
Very simple: once the cache is filled by size - that is the max. */
/* Register testcase as cached */
afl->q_testcase_cache[tid] = q;
afl->q_testcase_cache_size += len;
++afl->q_testcase_cache_count;
if (likely(tid >= afl->q_testcase_max_cache_count)) {
if (unlikely(afl->q_testcase_cache_size + len >=
afl->q_testcase_max_cache_size &&
(afl->q_testcase_cache_count <
afl->q_testcase_max_cache_entries &&
afl->q_testcase_max_cache_count <
afl->q_testcase_max_cache_entries) &&
!do_once)) {
afl->q_testcase_max_cache_count = tid + 1;
if (afl->q_testcase_max_cache_count > afl->q_testcase_cache_count) {
} else if (unlikely(tid == afl->q_testcase_smallest_free)) {
afl->q_testcase_max_cache_entries =
afl->q_testcase_max_cache_count + 1;
afl->q_testcase_smallest_free = tid + 1;
} else {
afl->q_testcase_max_cache_entries = afl->q_testcase_cache_count + 1;
}
do_once = 1;
// release unneeded memory
afl->q_testcase_cache = (struct queue_entry **)ck_realloc(
afl->q_testcase_cache,
(afl->q_testcase_max_cache_entries + 1) * sizeof(size_t));
}
/* Cache full. We neet to evict one or more to map one.
Get a random one which is not in use */
do {
// if the cache (MB) is not enough for the queue then this gets
// undesirable because q_testcase_max_cache_count grows sometimes
// although the number of items in the cache will not change hence
// more and more loops
tid = rand_below(afl, afl->q_testcase_max_cache_count);
} while (afl->q_testcase_cache[tid] == NULL ||
afl->q_testcase_cache[tid] == afl->queue_cur);
struct queue_entry *old_cached = afl->q_testcase_cache[tid];
free(old_cached->testcase_buf);
old_cached->testcase_buf = NULL;
afl->q_testcase_cache_size -= old_cached->len;
afl->q_testcase_cache[tid] = NULL;
--afl->q_testcase_cache_count;
++afl->q_testcase_evictions;
if (tid < afl->q_testcase_smallest_free)
afl->q_testcase_smallest_free = tid;
}
if (unlikely(tid >= afl->q_testcase_max_cache_entries)) {
// uh we were full, so now we have to search from start
tid = afl->q_testcase_smallest_free;
}
// we need this while loop in case there were ever previous evictions but
// not in this call.
while (unlikely(afl->q_testcase_cache[tid] != NULL))
++tid;
/* Map the test case into memory. */
int fd = open((char *)q->fname, O_RDONLY);
if (unlikely(fd < 0)) { PFATAL("Unable to open '%s'", (char *)q->fname); }
q->testcase_buf = (u8 *)malloc(len);
if (unlikely(!q->testcase_buf)) {
PFATAL("Unable to malloc '%s' with len %u", (char *)q->fname, len);
}
ck_read(fd, q->testcase_buf, len, q->fname);
close(fd);
/* Register testcase as cached */
afl->q_testcase_cache[tid] = q;
afl->q_testcase_cache_size += len;
++afl->q_testcase_cache_count;
if (likely(tid >= afl->q_testcase_max_cache_count)) {
afl->q_testcase_max_cache_count = tid + 1;
} else if (unlikely(tid == afl->q_testcase_smallest_free)) {
afl->q_testcase_smallest_free = tid + 1;
}
}
@ -1449,13 +1466,12 @@ inline void queue_testcase_store_mem(afl_state_t *afl, struct queue_entry *q,
u32 len = q->len;
if (unlikely(q->weight < 1.0 ||
afl->q_testcase_cache_size + len >=
if (unlikely(afl->q_testcase_cache_size + len >=
afl->q_testcase_max_cache_size ||
afl->q_testcase_cache_count >=
afl->q_testcase_max_cache_entries - 1)) {
// no space or uninteresting? will be loaded regularly later.
// no space? will be loaded regularly later.
return;
}

View File

@ -322,7 +322,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
memcpy(backup, buf, len);
memcpy(changed, buf, len);
if (likely(afl->cmplog_random_colorization)) {
if (afl->cmplog_random_colorization) {
random_replace(afl, changed, len);
@ -402,7 +402,6 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
u32 i = 1;
u32 positions = 0;
while (i) {
restart:
@ -2938,8 +2937,7 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
// afl->queue_cur->exec_cksum
u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
u64 cmplog_start_us = get_cur_time_us();
u8 r = 1;
u8 r = 1;
if (unlikely(!afl->pass_stats)) {
afl->pass_stats = ck_alloc(sizeof(struct afl_pass_stat) * CMP_MAP_W);
@ -2967,12 +2965,7 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
if (!afl->queue_cur->taint || !afl->queue_cur->cmplog_colorinput) {
if (unlikely(colorization(afl, buf, len, &taint))) {
update_cmplog_time(afl, &cmplog_start_us);
return 1;
}
if (unlikely(colorization(afl, buf, len, &taint))) { return 1; }
// no taint? still try, create a dummy to prevent again colorization
if (!taint) {
@ -2981,7 +2974,6 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
fprintf(stderr, "TAINT FAILED\n");
#endif
afl->queue_cur->colorized = CMPLOG_LVL_MAX;
update_cmplog_time(afl, &cmplog_start_us);
return 0;
}
@ -3002,20 +2994,17 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
}
update_cmplog_time(afl, &cmplog_start_us);
struct tainted *t = taint;
#ifdef _DEBUG
while (t) {
#ifdef _DEBUG
fprintf(stderr, "T: idx=%u len=%u\n", t->pos, t->len);
#endif
t = t->next;
}
#endif
#if defined(_DEBUG) || defined(CMPLOG_INTROSPECTION)
u64 start_time = get_cur_time();
u32 cmp_locations = 0;
@ -3036,7 +3025,6 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
}
update_cmplog_time(afl, &cmplog_start_us);
return 1;
}
@ -3060,7 +3048,6 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
}
update_cmplog_time(afl, &cmplog_start_us);
return 1;
}
@ -3079,7 +3066,6 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
u64 orig_hit_cnt, new_hit_cnt;
u64 orig_execs = afl->fsrv.total_execs;
orig_hit_cnt = afl->queued_items + afl->saved_crashes;
update_cmplog_time(afl, &cmplog_start_us);
afl->stage_name = "input-to-state";
afl->stage_short = "its";
@ -3156,35 +3142,33 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
}
update_cmplog_time(afl, &cmplog_start_us);
}
r = 0;
exit_its:
// if (afl->cmplog_lvl == CMPLOG_LVL_MAX) {
if (afl->cmplog_lvl == CMPLOG_LVL_MAX) {
afl->queue_cur->colorized = CMPLOG_LVL_MAX;
afl->queue_cur->colorized = CMPLOG_LVL_MAX;
if (afl->queue_cur->cmplog_colorinput) {
if (afl->queue_cur->cmplog_colorinput) {
ck_free(afl->queue_cur->cmplog_colorinput);
ck_free(afl->queue_cur->cmplog_colorinput);
}
}
while (taint) {
while (taint) {
t = taint->next;
ck_free(taint);
taint = t;
t = taint->next;
ck_free(taint);
taint = t;
}
}
afl->queue_cur->taint = NULL;
afl->queue_cur->taint = NULL;
/*} else {
} else {
afl->queue_cur->colorized = LVL2;
@ -3198,7 +3182,7 @@ exit_its:
}
}*/
}
#ifdef CMPLOG_COMBINE
if (afl->queued_items + afl->saved_crashes > orig_hit_cnt + 1) {
@ -3286,7 +3270,6 @@ exit_its:
#endif
update_cmplog_time(afl, &cmplog_start_us);
return r;
}

View File

@ -666,8 +666,6 @@ abort_calibration:
void sync_fuzzers(afl_state_t *afl) {
if (unlikely(afl->afl_env.afl_no_sync)) { return; }
DIR *sd;
struct dirent *sd_ent;
u32 sync_cnt = 0, synced = 0, entries = 0;

View File

@ -33,15 +33,15 @@ u8 is_det_timeout(u64 cur_ms, u8 is_flip) {
u8 should_det_fuzz(afl_state_t *afl, struct queue_entry *q) {
if (unlikely(!afl->skipdet_g->virgin_det_bits)) {
if (!afl->skipdet_g->virgin_det_bits) {
afl->skipdet_g->virgin_det_bits =
(u8 *)ck_alloc(sizeof(u8) * afl->fsrv.map_size);
}
if (likely(!q->favored || q->passed_det)) return 0;
if (unlikely(!q->trace_mini)) return 0;
if (!q->favored || q->passed_det) return 0;
if (!q->trace_mini) return 0;
if (!afl->skipdet_g->last_cov_undet)
afl->skipdet_g->last_cov_undet = get_cur_time();
@ -122,8 +122,7 @@ u8 skip_deterministic_stage(afl_state_t *afl, u8 *orig_buf, u8 *out_buf,
afl->stage_cur = 0;
orig_hit_cnt = afl->queued_items + afl->saved_crashes;
static u8 *inf_eff_map;
inf_eff_map = (u8 *)ck_realloc(inf_eff_map, sizeof(u8) * len);
u8 *inf_eff_map = (u8 *)ck_alloc(sizeof(u8) * len);
memset(inf_eff_map, 1, sizeof(u8) * len);
if (common_fuzz_stuff(afl, orig_buf, len)) { return 0; }

View File

@ -279,13 +279,6 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
afl->afl_env.afl_final_sync =
get_afl_env(afl_environment_variables[i]) ? 1 : 0;
} else if (!strncmp(env, "AFL_NO_SYNC",
afl_environment_variable_len)) {
afl->afl_env.afl_no_sync =
get_afl_env(afl_environment_variables[i]) ? 1 : 0;
} else if (!strncmp(env, "AFL_CUSTOM_MUTATOR_ONLY",
afl_environment_variable_len)) {
@ -769,9 +762,8 @@ void afl_states_stop(void) {
if (el->fsrv.fsrv_pid > 0) {
kill(el->fsrv.fsrv_pid, el->fsrv.fsrv_kill_signal);
usleep(100);
/* Make sure the forkserver does not end up as zombie. */
waitpid(el->fsrv.fsrv_pid, NULL, WNOHANG);
waitpid(el->fsrv.fsrv_pid, NULL, 0);
}

View File

@ -207,12 +207,6 @@ void load_stats_file(afl_state_t *afl) {
}
if (starts_with("cmplog_time", keystring)) {
afl->cmplog_time_us = strtoull(lptr, &nptr, 10) * 1000000;
}
if (starts_with("trim_time", keystring)) {
afl->trim_time_us = strtoull(lptr, &nptr, 10) * 1000000;
@ -327,11 +321,8 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
#ifndef __HAIKU__
if (getrusage(RUSAGE_CHILDREN, &rus)) { rus.ru_maxrss = 0; }
#endif
u64 runtime_ms = afl->prev_run_time + cur_time - afl->start_time;
u64 overhead_ms = (afl->calibration_time_us + afl->sync_time_us +
afl->trim_time_us + afl->cmplog_time_us) /
1000;
if (!runtime_ms) { runtime_ms = 1; }
u64 runtime = afl->prev_run_time + cur_time - afl->start_time;
if (!runtime) { runtime = 1; }
fprintf(
f,
@ -344,7 +335,6 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
"time_wo_finds : %llu\n"
"fuzz_time : %llu\n"
"calibration_time : %llu\n"
"cmplog_time : %llu\n"
"sync_time : %llu\n"
"trim_time : %llu\n"
"execs_done : %llu\n"
@ -385,18 +375,20 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
"target_mode : %s%s%s%s%s%s%s%s%s%s\n"
"command_line : %s\n",
(afl->start_time /*- afl->prev_run_time*/) / 1000, cur_time / 1000,
runtime_ms / 1000, (u32)getpid(),
runtime / 1000, (u32)getpid(),
afl->queue_cycle ? (afl->queue_cycle - 1) : 0, afl->cycles_wo_finds,
afl->longest_find_time > cur_time - afl->last_find_time
? afl->longest_find_time / 1000
: ((afl->start_time == 0 || afl->last_find_time == 0)
? 0
: (cur_time - afl->last_find_time) / 1000),
(runtime_ms - MIN(runtime_ms, overhead_ms)) / 1000,
afl->calibration_time_us / 1000000, afl->cmplog_time_us / 1000000,
afl->sync_time_us / 1000000, afl->trim_time_us / 1000000,
afl->fsrv.total_execs,
afl->fsrv.total_execs / ((double)(runtime_ms) / 1000),
(runtime -
((afl->calibration_time_us + afl->sync_time_us + afl->trim_time_us) /
1000)) /
1000,
afl->calibration_time_us / 1000000, afl->sync_time_us / 1000000,
afl->trim_time_us / 1000000, afl->fsrv.total_execs,
afl->fsrv.total_execs / ((double)(runtime) / 1000),
afl->last_avg_execs_saved, afl->queued_items, afl->queued_favored,
afl->queued_discovered, afl->queued_imported, afl->queued_variable,
afl->max_depth, afl->current_entry, afl->pending_favored,
@ -640,10 +632,9 @@ void show_stats_normal(afl_state_t *afl) {
cur_ms = get_cur_time();
if (afl->most_time_key && afl->queue_cycle) {
if (afl->most_time_key) {
if (afl->most_time * 1000 + afl->sync_time_us / 1000 <
cur_ms - afl->start_time) {
if (afl->most_time * 1000 < cur_ms - afl->start_time) {
afl->most_time_key = 2;
afl->stop_soon = 2;
@ -652,7 +643,7 @@ void show_stats_normal(afl_state_t *afl) {
}
if (afl->most_execs_key == 1 && afl->queue_cycle) {
if (afl->most_execs_key == 1) {
if (afl->most_execs <= afl->fsrv.total_execs) {
@ -1340,9 +1331,7 @@ void show_stats_normal(afl_state_t *afl) {
sprintf(tmp, "disabled, ");
} else if (unlikely(!afl->bytes_trim_out ||
afl->bytes_trim_in <= afl->bytes_trim_out)) {
} else if (unlikely(!afl->bytes_trim_out)) {
sprintf(tmp, "n/a, ");
@ -1359,9 +1348,7 @@ void show_stats_normal(afl_state_t *afl) {
strcat(tmp, "disabled");
} else if (unlikely(!afl->blocks_eff_total ||
afl->blocks_eff_select >= afl->blocks_eff_total)) {
} else if (unlikely(!afl->blocks_eff_total)) {
strcat(tmp, "n/a");
@ -1475,10 +1462,9 @@ void show_stats_pizza(afl_state_t *afl) {
cur_ms = get_cur_time();
if (afl->most_time_key && afl->queue_cycle) {
if (afl->most_time_key) {
if (afl->most_time * 1000 + afl->sync_time_us / 1000 <
cur_ms - afl->start_time) {
if (afl->most_time * 1000 < cur_ms - afl->start_time) {
afl->most_time_key = 2;
afl->stop_soon = 2;
@ -1487,7 +1473,7 @@ void show_stats_pizza(afl_state_t *afl) {
}
if (afl->most_execs_key == 1 && afl->queue_cycle) {
if (afl->most_execs_key == 1) {
if (afl->most_execs <= afl->fsrv.total_execs) {
@ -2496,7 +2482,7 @@ void show_init_stats(afl_state_t *afl) {
}
inline void update_calibration_time(afl_state_t *afl, u64 *time) {
void update_calibration_time(afl_state_t *afl, u64 *time) {
u64 cur = get_cur_time_us();
afl->calibration_time_us += cur - *time;
@ -2504,7 +2490,7 @@ inline void update_calibration_time(afl_state_t *afl, u64 *time) {
}
inline void update_trim_time(afl_state_t *afl, u64 *time) {
void update_trim_time(afl_state_t *afl, u64 *time) {
u64 cur = get_cur_time_us();
afl->trim_time_us += cur - *time;
@ -2512,7 +2498,7 @@ inline void update_trim_time(afl_state_t *afl, u64 *time) {
}
inline void update_sync_time(afl_state_t *afl, u64 *time) {
void update_sync_time(afl_state_t *afl, u64 *time) {
u64 cur = get_cur_time_us();
afl->sync_time_us += cur - *time;
@ -2520,11 +2506,3 @@ inline void update_sync_time(afl_state_t *afl, u64 *time) {
}
inline void update_cmplog_time(afl_state_t *afl, u64 *time) {
u64 cur = get_cur_time_us();
afl->cmplog_time_us += cur - *time;
*time = cur;
}

View File

@ -335,7 +335,6 @@ static void usage(u8 *argv0, int more_help) {
"AFL_STATSD_PORT: change default statsd port (default: 8125)\n"
"AFL_STATSD_TAGS_FLAVOR: set statsd tags format (default: disable tags)\n"
" suported formats: dogstatsd, librato, signalfx, influxdb\n"
"AFL_NO_SYNC: disables all syncing\n"
"AFL_SYNC_TIME: sync time between fuzzing instances (in minutes)\n"
"AFL_FINAL_SYNC: sync a final time when exiting (will delay the exit!)\n"
"AFL_NO_CRASH_README: do not create a README in the crashes directory\n"
@ -915,15 +914,8 @@ int main(int argc, char **argv_orig, char **envp) {
u8 suffix = 'M';
if (mem_limit_given) {
WARNF("Overriding previous -m option.");
} else {
mem_limit_given = 1;
}
if (mem_limit_given) { FATAL("Multiple -m options not supported"); }
mem_limit_given = 1;
if (!optarg) { FATAL("Wrong usage of -m"); }
@ -1469,16 +1461,15 @@ int main(int argc, char **argv_orig, char **envp) {
#endif
configure_afl_kill_signals(
&afl->fsrv, afl->afl_env.afl_child_kill_signal,
afl->afl_env.afl_fsrv_kill_signal,
(afl->fsrv.qemu_mode || afl->unicorn_mode || afl->fsrv.use_fauxsrv
configure_afl_kill_signals(&afl->fsrv, afl->afl_env.afl_child_kill_signal,
afl->afl_env.afl_fsrv_kill_signal,
(afl->fsrv.qemu_mode || afl->unicorn_mode
#ifdef __linux__
|| afl->fsrv.nyx_mode
|| afl->fsrv.nyx_mode
#endif
)
? SIGKILL
: SIGTERM);
)
? SIGKILL
: SIGTERM);
setup_signal_handlers();
check_asan_opts(afl);
@ -2595,7 +2586,7 @@ int main(int argc, char **argv_orig, char **envp) {
(!afl->queue_cycle && afl->afl_env.afl_import_first)) &&
afl->sync_id)) {
if (unlikely(!afl->queue_cycle && afl->afl_env.afl_import_first)) {
if (!afl->queue_cycle && afl->afl_env.afl_import_first) {
OKF("Syncing queues from other fuzzer instances first ...");
@ -2603,15 +2594,16 @@ int main(int argc, char **argv_orig, char **envp) {
sync_fuzzers(afl);
if (!afl->queue_cycle && afl->afl_env.afl_import_first) {
// real start time, we reset, so this works correctly with -V
afl->start_time = get_cur_time();
}
}
++afl->queue_cycle;
if (afl->afl_env.afl_no_ui) {
ACTF("Entering queue cycle %llu\n", afl->queue_cycle);
}
runs_in_current_cycle = (u32)-1;
afl->cur_skipped_items = 0;
@ -2620,7 +2612,7 @@ int main(int argc, char **argv_orig, char **envp) {
// queue is fully cycled.
time_t cursec = time(NULL);
struct tm *curdate = localtime(&cursec);
if (unlikely(!afl->afl_env.afl_pizza_mode)) {
if (likely(!afl->afl_env.afl_pizza_mode)) {
if (unlikely(curdate->tm_mon == 3 && curdate->tm_mday == 1)) {
@ -2665,6 +2657,13 @@ int main(int argc, char **argv_orig, char **envp) {
}
if (unlikely(afl->not_on_tty)) {
ACTF("Entering queue cycle %llu.", afl->queue_cycle);
fflush(stdout);
}
/* If we had a full queue cycle with no new finds, try
recombination strategies next. */
@ -2869,7 +2868,9 @@ int main(int argc, char **argv_orig, char **envp) {
}
u64 execs_before = afl->fsrv.total_execs;
skipped_fuzz = fuzz_one(afl);
afl->queue_cur->total_execs += afl->fsrv.total_execs - execs_before;
#ifdef INTROSPECTION
++afl->queue_cur->stats_selected;
@ -2950,13 +2951,26 @@ int main(int argc, char **argv_orig, char **envp) {
if (likely(!afl->stop_soon && afl->sync_id)) {
if (unlikely(afl->is_main_node)) {
if (likely(afl->skip_deterministic)) {
if (unlikely(cur_time > (afl->sync_time >> 1) + afl->last_sync_time)) {
if (unlikely(afl->is_main_node)) {
if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) {
if (unlikely(cur_time >
(afl->sync_time >> 1) + afl->last_sync_time)) {
sync_fuzzers(afl);
if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) {
sync_fuzzers(afl);
}
}
} else {
if (unlikely(cur_time > afl->sync_time + afl->last_sync_time)) {
if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); }
}
@ -2964,11 +2978,7 @@ int main(int argc, char **argv_orig, char **envp) {
} else {
if (unlikely(cur_time > afl->sync_time + afl->last_sync_time)) {
if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); }
}
sync_fuzzers(afl);
}
@ -3059,6 +3069,37 @@ stop_fuzzing:
}
if (getenv("AFL_DUMP_QUEUE_ON_EXIT")) {
for (u32 mode = 0; mode < 2; mode++) { // explore + exploit mode data
afl->fuzz_mode = mode;
create_alias_table(afl);
fprintf(stderr, "\nQUEUE DUMP MODE: %u\n", mode);
for (u32 k = 0; k < afl->queued_items; ++k) {
struct queue_entry *q = afl->queue_buf[k];
fprintf(stderr,
"item=%u fname=%s len=%u exec_us=%llu total_execs=%llu "
"has_new_cov=%u "
"var_behavior=%u favored=%u fs_redundant=%u disabled=%u "
"bitmap_size=%u tc_ref=%u fuzz_level=%u was_fuzzed=%u "
"mother=%d found=%u perf_score=%.2f weight=%.2f score=%u\n",
k, q->fname, q->len, q->exec_us, q->total_execs, q->has_new_cov,
q->var_behavior, q->favored, q->fs_redundant, q->disabled,
q->bitmap_size, q->tc_ref, q->fuzz_level, q->was_fuzzed,
q->mother == NULL ? -1 : (int)q->mother->id, q->found,
q->perf_score, q->weight, q->score);
}
fprintf(stderr, "\n");
}
}
if (frida_afl_preload) { ck_free(frida_afl_preload); }
fclose(afl->fsrv.plot_file);

View File

@ -95,24 +95,6 @@ inline u64 hash64(u8 *key, u32 len, u64 seed) {
}
/* Hash a file */
u64 get_binary_hash(u8 *fn) {
int fd = open(fn, O_RDONLY);
if (fd < 0) { PFATAL("Unable to open '%s'", fn); }
struct stat st;
if (fstat(fd, &st) < 0) { PFATAL("Unable to fstat '%s'", fn); }
u32 f_len = st.st_size;
u8 *f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0);
if (f_data == MAP_FAILED) { PFATAL("Unable to mmap file '%s'", fn); }
close(fd);
u64 hash = hash64(f_data, f_len, 0);
if (munmap(f_data, f_len)) { PFATAL("unmap() failed"); }
return hash;
}
// Public domain SHA1 implementation copied from:
// https://github.com/x42/liboauth/blob/7001b8256cd654952ec2515b055d2c5b243be600/src/sha1.c

View File

@ -239,15 +239,15 @@ u8 *afl_shm_init(sharedmem_t *shm, size_t map_size,
if (shm->cmplog_g_shm_fd == -1) { PFATAL("shm_open() failed"); }
/* configure the size of the shared memory segment */
if (ftruncate(shm->cmplog_g_shm_fd, sizeof(struct cmp_map))) {
if (ftruncate(shm->cmplog_g_shm_fd, map_size)) {
PFATAL("setup_shm(): cmplog ftruncate() failed");
}
/* map the shared memory segment to the address space of the process */
shm->cmp_map = mmap(0, sizeof(struct cmp_map), PROT_READ | PROT_WRITE,
MAP_SHARED, shm->cmplog_g_shm_fd, 0);
shm->cmp_map = mmap(0, map_size, PROT_READ | PROT_WRITE, MAP_SHARED,
shm->cmplog_g_shm_fd, 0);
if (shm->cmp_map == MAP_FAILED) {
close(shm->cmplog_g_shm_fd);

View File

@ -83,6 +83,8 @@ static u32 tcnt, highest; /* tuple content information */
static u32 in_len; /* Input data length */
static u32 score;
static u32 map_size = MAP_SIZE, timed_out = 0;
static bool quiet_mode, /* Hide non-essential messages? */
@ -178,8 +180,7 @@ fsrv_run_result_t fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv,
void classify_counts(afl_forkserver_t *fsrv) {
u8 *mem = fsrv->trace_bits;
const u8 *map = (binary_mode || collect_coverage) ? count_class_binary
: count_class_human;
const u8 *map = binary_mode ? count_class_binary : count_class_human;
u32 i = map_size;
@ -225,13 +226,8 @@ static void at_exit_handler(void) {
if (remove_shm) {
remove_shm = false;
if (shm.map) afl_shm_deinit(&shm);
if ((shm_fuzz && shm_fuzz->shmemfuzz_mode) || fsrv->use_shmem_fuzz) {
shm_fuzz = deinit_shmem(fsrv, shm_fuzz);
}
if (fsrv->use_shmem_fuzz) deinit_shmem(fsrv, shm_fuzz);
}
@ -244,9 +240,23 @@ static void at_exit_handler(void) {
static void analyze_results(afl_forkserver_t *fsrv) {
u32 i;
if (unlikely((score = *(u32 *)((u8 *)fsrv->trace_bits + 1)) > 0)) {
memset(fsrv->trace_bits + 1, 0, 4);
}
for (i = 0; i < map_size; i++) {
if (fsrv->trace_bits[i]) { coverage_map[i] |= fsrv->trace_bits[i]; }
if (fsrv->trace_bits[i]) {
total += fsrv->trace_bits[i];
if (fsrv->trace_bits[i] > highest) highest = fsrv->trace_bits[i];
// if (!coverage_map[i]) { coverage_map[i] = 1; }
coverage_map[i] |= fsrv->trace_bits[i];
}
}
@ -268,6 +278,12 @@ static u32 write_results_to_file(afl_forkserver_t *fsrv, u8 *outfile) {
}
if (unlikely((score = *(u32 *)((u8 *)fsrv->trace_bits + 1)) > 0)) {
memset(fsrv->trace_bits + 1, 0, 4);
}
if (cmin_mode &&
(fsrv->last_run_timed_out || (!caa && child_crashed != cco))) {
@ -1338,8 +1354,6 @@ int main(int argc, char **argv_orig, char **envp) {
}
if (collect_coverage) { binary_mode = false; } // ensure this
if (optind == argc || !out_file) { usage(argv[0]); }
if (in_dir && in_filelist) { FATAL("you can only specify either -i or -I"); }
@ -1532,8 +1546,6 @@ int main(int argc, char **argv_orig, char **envp) {
/* initialize cmplog_mode */
shm_fuzz->cmplog_mode = 0;
atexit(at_exit_handler);
u8 *map = afl_shm_init(shm_fuzz, MAX_FILE + sizeof(u32), 1);
shm_fuzz->shmemfuzz_mode = true;
if (!map) { FATAL("BUG: Zero return from afl_shm_init."); }
@ -1680,9 +1692,12 @@ int main(int argc, char **argv_orig, char **envp) {
if ((coverage_map = (u8 *)malloc(map_size + 64)) == NULL)
FATAL("coult not grab memory");
edges_only = false;
raw_instr_output = true;
}
atexit(at_exit_handler);
if (get_afl_env("AFL_DEBUG")) {
int j = optind;
@ -1699,12 +1714,9 @@ int main(int argc, char **argv_orig, char **envp) {
map_size = fsrv->map_size;
if (fsrv->support_shmem_fuzz && !fsrv->use_shmem_fuzz) {
if (fsrv->support_shmem_fuzz && !fsrv->use_shmem_fuzz)
shm_fuzz = deinit_shmem(fsrv, shm_fuzz);
}
if (in_dir) {
if (execute_testcases(in_dir) == 0) {
@ -1736,12 +1748,9 @@ int main(int argc, char **argv_orig, char **envp) {
} else {
if (fsrv->support_shmem_fuzz && !fsrv->use_shmem_fuzz) {
if (fsrv->support_shmem_fuzz && !fsrv->use_shmem_fuzz)
shm_fuzz = deinit_shmem(fsrv, shm_fuzz);
}
#ifdef __linux__
if (!fsrv->nyx_mode) {
@ -1772,12 +1781,20 @@ int main(int argc, char **argv_orig, char **envp) {
OKF("Captured %u tuples (map size %u, highest value %u, total values %llu) "
"in '%s'." cRST,
tcnt, fsrv->real_map_size, highest, total, out_file);
if (collect_coverage)
if (collect_coverage) {
OKF("A coverage of %u edges were achieved out of %u existing (%.02f%%) "
"with %llu input files.",
tcnt, map_size, ((float)tcnt * 100) / (float)map_size,
fsrv->total_execs);
} else if (score > 0) {
OKF("Path score is %u (cyclomatic and/or vulnerability scoring).\n",
score);
}
}
if (stdin_file) {
@ -1788,9 +1805,9 @@ int main(int argc, char **argv_orig, char **envp) {
}
remove_shm = false;
remove_shm = 0;
afl_shm_deinit(&shm);
if (fsrv->use_shmem_fuzz) { shm_fuzz = deinit_shmem(fsrv, shm_fuzz); }
if (fsrv->use_shmem_fuzz) shm_fuzz = deinit_shmem(fsrv, shm_fuzz);
u32 ret;

View File

@ -1 +1 @@
4b4fdab1
764b66b2

View File

@ -136,7 +136,7 @@ def overlap_alignments(segments, memory):
# https://github.com/llvm-mirror/llvm/blob/master/include/llvm/ADT/Triple.h
def get_arch():
arch, arch_vendor, arch_os, *arch_remains = lldb.debugger.GetSelectedTarget().GetTriple().split("-")
arch, arch_vendor, arch_os = lldb.target.GetTriple().split("-")
if arch == "x86_64":
return "x64"
elif arch == "x86" or arch == "i386":
@ -165,7 +165,7 @@ def dump_arch_info():
def dump_regs():
reg_state = {}
for reg_list in lldb.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().GetSelectedFrame().GetRegisters():
for reg_list in lldb.frame.GetRegisters():
if "general purpose registers" in reg_list.GetName().lower():
for reg in reg_list:
reg_state[reg.GetName()] = int(reg.GetValue(), 16)
@ -180,9 +180,8 @@ def get_section_info(sec):
module_name = sec.addr.module.file.GetFilename()
module_name = module_name if module_name is not None else ""
long_name = module_name + "." + name
load_addr = sec.addr.GetLoadAddress(lldb.debugger.GetSelectedTarget())
return load_addr, (load_addr + sec.size), sec.size, long_name
return sec.addr.load_addr, (sec.addr.load_addr + sec.size), sec.size, long_name
def dump_process_memory(output_dir):
@ -192,7 +191,7 @@ def dump_process_memory(output_dir):
# 1st pass:
# Loop over the segments, fill in the segment info dictionary
for module in lldb.debugger.GetSelectedTarget().module_iter():
for module in lldb.target.module_iter():
for seg_ea in module.section_iter():
seg_info = {"module": module.file.GetFilename()}
(
@ -202,8 +201,8 @@ def dump_process_memory(output_dir):
seg_info["name"],
) = get_section_info(seg_ea)
# TODO: Ugly hack for -1 LONG address on 32-bit
if seg_info["start"] >= sys.maxsize or seg_size <= 0:
print ("Throwing away page: {}".format(seg_info["name"]))
if seg_info["start"] >= sys.maxint or seg_size <= 0:
print "Throwing away page: {}".format(seg_info["name"])
continue
# Page-align segment
@ -213,7 +212,7 @@ def dump_process_memory(output_dir):
raw_segment_list.append(seg_info)
# Add the stack memory region (just hardcode 0x1000 around the current SP)
sp = lldb.debugger.GetSelectedTarget().GetProcess().GetSelectedThread().GetSelectedFrame().GetSP()
sp = lldb.frame.GetSP()
start_sp = ALIGN_PAGE_DOWN(sp)
raw_segment_list.append(
{"start": start_sp, "end": start_sp + 0x1000, "name": "STACK"}
@ -229,7 +228,7 @@ def dump_process_memory(output_dir):
start_addr = -1
next_region_addr = 0
while next_region_addr > start_addr:
err = lldb.debugger.GetSelectedTarget().GetProcess().GetMemoryRegionInfo(next_region_addr, mem_info)
err = lldb.process.GetMemoryRegionInfo(next_region_addr, mem_info)
# TODO: Should check err.success. If False, what do we do?
if not err.success:
break
@ -268,7 +267,7 @@ def dump_process_memory(output_dir):
region_name = seg_info["name"]
# Compress and dump the content to a file
err = lldb.SBError()
seg_content = lldb.debugger.GetSelectedTarget().GetProcess().ReadMemory(
seg_content = lldb.process.ReadMemory(
start_addr, end_addr - start_addr, err
)
if seg_content == None:
@ -341,12 +340,11 @@ def main():
index_file.close()
print ("Done.")
except Exception as e:
except Exception, e:
print ("!!! ERROR:\n\t{}".format(repr(e)))
if __name__ == "__main__":
lldb.debugger = lldb.SBDebugger.Create()
main()
elif lldb.debugger:
main()

View File

@ -119,7 +119,7 @@ def main():
binary_code = binary_file.read()
binary_file.close()
# Assert that the binary size is within limits
# Apply constraints to the mutated input
if len(binary_code) > CODE_SIZE_MAX:
print("Binary code is too large (> {} bytes)".format(CODE_SIZE_MAX))
return

View File

@ -69,21 +69,3 @@ need to be changed for other OSes.
Current supported OSes are: Linux, Darwin, FreeBSD (thanks to @devnexen)
Also, the following example (generate_libtoken_dict.sh) shows how to use a script to capture tokens from the
files in the target output directory,
and then generate a dictionary file from those tokens.
#### usage:
```bash
./generate_libtoken_dict.sh -p /path/to/libtokencap.so -b /path/to/target/program -o /path/to/target/output -t 5 -- [-program_args]
```
#### description opts:
- ```-o``` : Path to target output directory ;
- ```-b``` : Path to target program binary ;
- ```-p``` : Path to LD_PRELOAD library ;
- ```-t``` : Timeout in seconds ;
- ```-- [-program_args]```: Any additional arguments required by the target binary can be specified after ```--```.
#### output:
A sorted and unique token dictionary file with the extension ``*.dict``
is created in the same directory as the target output containing tokens captured during the execution of the target binary.

View File

@ -1,55 +0,0 @@
#help
usage() {
echo "Usage: $0 -o <target_output> -b <target_bin> -p <LD_PRELOAD_PATH> [-t <timeout_sec>] -- [target_args]"
echo "Options:"
echo " -o Path to target output directory"
echo " -b Path to target program binary"
echo " -p Path to LD_PRELOAD library"
echo " -t Timeout in seconds"
exit 1
}
#parse cli options
while getopts ":o:b:p:t:" opt; do
case $opt in
o) target_output="$OPTARG" ;;
b) target_bin="$OPTARG" ;;
p) LD_PRELOAD_PATH="$OPTARG" ;;
t) timeout_sec="$OPTARG" ;;
\?) echo "Invalid option: -$OPTARG" >&2; usage ;;
:) echo "Option -$OPTARG requires an argument." >&2; usage ;;
esac
done
#shift away the parsed opts
shift $((OPTIND - 1))
#check options
if [ -z "$target_output" ] || [ -z "$target_bin" ] || [ -z "$LD_PRELOAD_PATH" ]; then
echo "Error: Missing mandatory opts" >&2
usage
fi
# initialize vars
AFL_TOKEN_FILE="${PWD}/temp_output.txt"
AFL_DICT_FILE="${PWD}/$(basename "$target_bin")_tokens.dict"
#generate token-file
{
touch "$AFL_TOKEN_FILE"
for i in $(find "$target_output" -type f -name "id*"); do
LD_PRELOAD="$LD_PRELOAD_PATH" \
timeout -s SIGKILL "$timeout_sec" \
"$target_bin" "$@" "$i"
done
} >"$AFL_TOKEN_FILE"
# sort & remove duplicates
sort -u "$AFL_TOKEN_FILE" >"$AFL_DICT_FILE"
# delete temp-file
rm "$AFL_TOKEN_FILE"
# print done-message
echo "Token dictionary created: $AFL_DICT_FILE"
echo "Script completed successfully"