Merge pull request #1101 from AFLplusplus/dev

Dev
This commit is contained in:
van Hauser
2021-12-09 11:55:36 +01:00
committed by GitHub
256 changed files with 12826 additions and 10049 deletions

View File

@ -8,8 +8,9 @@ assignees: ''
---
**IMPORTANT**
1. You have verified that the issue to be present in the current `dev` branch
2. Please supply the command line options and relevant environment variables, e.g. a copy-paste of the contents of `out/default/fuzzer_setup`
1. You have verified that the issue to be present in the current `dev` branch.
2. Please supply the command line options and relevant environment variables,
e.g., a copy-paste of the contents of `out/default/fuzzer_setup`.
Thank you for making AFL++ better!

3
.gitignore vendored
View File

@ -30,6 +30,7 @@ afl-g++-fast
afl-gotcpu
afl-ld
afl-ld-lto
afl-cs-proxy
afl-qemu-trace
afl-showmap
afl-tmin
@ -94,3 +95,5 @@ utils/optimin/optimin
utils/persistent_mode/persistent_demo
utils/persistent_mode/persistent_demo_new
utils/persistent_mode/test-instr
!coresight_mode
!coresight_mode/coresight-trace

6
.gitmodules vendored
View File

@ -13,3 +13,9 @@
[submodule "utils/optimin/EvalMaxSAT"]
path = utils/optimin/EvalMaxSAT
url = https://github.com/FlorentAvellaneda/EvalMaxSAT
[submodule "coresight_mode/patchelf"]
path = coresight_mode/patchelf
url = https://github.com/NixOS/patchelf.git
[submodule "coresight_mode/coresight-trace"]
path = coresight_mode/coresight-trace
url = https://github.com/RICSecLab/coresight-trace.git

View File

@ -1,4 +1,4 @@
# How to submit a Pull Request to AFLplusplus
# How to submit a Pull Request to AFL++
All contributions (pull requests) must be made against our `dev` branch.
@ -15,10 +15,9 @@ project, or added a file in a directory we already format, otherwise run:
./.custom-format.py -i file-that-you-have-created.c
```
Regarding the coding style, please follow the AFL style.
No camel case at all and use AFL's macros wherever possible
(e.g. WARNF, FATAL, MAP_SIZE, ...).
Regarding the coding style, please follow the AFL style. No camel case at all
and use AFL's macros wherever possible (e.g., WARNF, FATAL, MAP_SIZE, ...).
Remember that AFLplusplus has to build and run on many platforms, so
generalize your Makefiles/GNUmakefile (or your patches to our pre-existing
Makefiles) to be as generic as possible.
Remember that AFL++ has to build and run on many platforms, so generalize your
Makefiles/GNUmakefile (or your patches to our pre-existing Makefiles) to be as
generic as possible.

View File

@ -16,6 +16,8 @@ env NO_ARCH_OPT 1
RUN apt-get update && \
apt-get -y install --no-install-suggests --no-install-recommends \
automake \
cmake \
meson \
ninja-build \
bison flex \
build-essential \

View File

@ -10,7 +10,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# For Heiko:
@ -32,7 +32,7 @@ VERSION = $(shell grep '^$(HASH)define VERSION ' ../config.h | cut -d '"' -f
# PROGS intentionally omit afl-as, which gets installed elsewhere.
PROGS = afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze
SH_PROGS = afl-plot afl-cmin afl-cmin.bash afl-whatsup afl-system-config afl-persistent-config
SH_PROGS = afl-plot afl-cmin afl-cmin.bash afl-whatsup afl-system-config afl-persistent-config afl-cc
MANPAGES=$(foreach p, $(PROGS) $(SH_PROGS), $(p).8) afl-as.8
ASAN_OPTIONS=detect_leaks=0
@ -346,7 +346,7 @@ help:
@echo "HELP --- the following make targets exist:"
@echo "=========================================="
@echo "all: just the main afl++ binaries"
@echo "binary-only: everything for binary-only fuzzing: qemu_mode, unicorn_mode, libdislocator, libtokencap"
@echo "binary-only: everything for binary-only fuzzing: qemu_mode, frida_mode, unicorn_mode, coresight_mode, libdislocator, libtokencap"
@echo "source-only: everything for source code fuzzing: gcc_plugin, libdislocator, libtokencap"
@echo "distrib: everything (for both binary-only and source code fuzzing)"
@echo "man: creates simple man pages from the help option of the programs"
@ -541,7 +541,7 @@ test_build: afl-cc afl-gcc afl-as afl-showmap
# echo 1 | ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr1 ./test-instr
# @rm -f test-instr
# @cmp -s .test-instr0 .test-instr1; DR="$$?"; rm -f .test-instr0 .test-instr1; if [ "$$DR" = "0" ]; then echo; echo "Oops, the instrumentation of afl-gcc does not seem to be behaving correctly!"; \
# gcc -v 2>&1 | grep -q -- --with-as= && ( echo; echo "Gcc is configured not to use an external assembler with the -B option."; echo "See docs/INSTALL.md section 5 how to build a -B enabled gcc." ) || \
# gcc -v 2>&1 | grep -q -- --with-as= && ( echo; echo "Gcc is configured not to use an external assembler with the -B option." ) || \
# ( echo; echo "Please post to https://github.com/AFLplusplus/AFLplusplus/issues to troubleshoot the issue." ); echo; exit 0; fi
# @echo
# @echo "[+] All right, the instrumentation of afl-gcc seems to be working!"
@ -564,7 +564,7 @@ all_done: test_build
.PHONY: clean
clean:
rm -rf $(PROGS) libradamsa.so afl-fuzz-document afl-as as afl-g++ afl-clang afl-clang++ *.o src/*.o *~ a.out core core.[1-9][0-9]* *.stackdump .test .test1 .test2 test-instr .test-instr0 .test-instr1 afl-qemu-trace afl-gcc-fast afl-gcc-pass.so afl-g++-fast ld *.so *.8 test/unittests/*.o test/unittests/unit_maybe_alloc test/unittests/preallocable .afl-* afl-gcc afl-g++ afl-clang afl-clang++ test/unittests/unit_hash test/unittests/unit_rand *.dSYM
rm -rf $(PROGS) libradamsa.so afl-fuzz-document afl-as as afl-g++ afl-clang afl-clang++ *.o src/*.o *~ a.out core core.[1-9][0-9]* *.stackdump .test .test1 .test2 test-instr .test-instr0 .test-instr1 afl-cs-proxy afl-qemu-trace afl-gcc-fast afl-gcc-pass.so afl-g++-fast ld *.so *.8 test/unittests/*.o test/unittests/unit_maybe_alloc test/unittests/preallocable .afl-* afl-gcc afl-g++ afl-clang afl-clang++ test/unittests/unit_hash test/unittests/unit_rand *.dSYM
-$(MAKE) -f GNUmakefile.llvm clean
-$(MAKE) -f GNUmakefile.gcc_plugin clean
$(MAKE) -C utils/libdislocator clean
@ -579,19 +579,23 @@ clean:
$(MAKE) -C qemu_mode/libqasan clean
-$(MAKE) -C frida_mode clean
ifeq "$(IN_REPO)" "1"
-test -e coresight_mode/coresight-trace/Makefile && $(MAKE) -C coresight_mode/coresight-trace clean || true
-test -e qemu_mode/qemuafl/Makefile && $(MAKE) -C qemu_mode/qemuafl clean || true
test -e unicorn_mode/unicornafl/Makefile && $(MAKE) -C unicorn_mode/unicornafl clean || true
else
rm -rf coresight_mode/coresight_trace
rm -rf qemu_mode/qemuafl
rm -rf unicorn_mode/unicornafl
endif
.PHONY: deepclean
deepclean: clean
rm -rf coresight_mode/coresight-trace
rm -rf unicorn_mode/unicornafl
rm -rf qemu_mode/qemuafl
ifeq "$(IN_REPO)" "1"
# NEVER EVER ACTIVATE THAT!!!!! git reset --hard >/dev/null 2>&1 || true
git checkout coresight_mode/coresight-trace
git checkout unicorn_mode/unicornafl
git checkout qemu_mode/qemuafl
endif
@ -610,6 +614,9 @@ endif
# -$(MAKE) -C utils/plot_ui
-$(MAKE) -C frida_mode
ifneq "$(SYS)" "Darwin"
ifeq "$(ARCH)" "aarch64"
-$(MAKE) -C coresight_mode
endif
-cd qemu_mode && sh ./build_qemu_support.sh
-cd unicorn_mode && unset CFLAGS && sh ./build_unicorn_support.sh
endif
@ -624,6 +631,9 @@ binary-only: test_shm test_python ready $(PROGS)
# -$(MAKE) -C utils/plot_ui
-$(MAKE) -C frida_mode
ifneq "$(SYS)" "Darwin"
ifeq "$(ARCH)" "aarch64"
-$(MAKE) -C coresight_mode
endif
-cd qemu_mode && sh ./build_qemu_support.sh
-cd unicorn_mode && unset CFLAGS && sh ./build_unicorn_support.sh
endif
@ -695,7 +705,7 @@ endif
.PHONY: uninstall
uninstall:
-cd $${DESTDIR}$(BIN_PATH) && rm -f $(PROGS) $(SH_PROGS) afl-qemu-trace afl-plot-ui afl-fuzz-document afl-network-server afl-g* afl-plot.sh afl-as afl-ld-lto afl-c* afl-lto*
-cd $${DESTDIR}$(BIN_PATH) && rm -f $(PROGS) $(SH_PROGS) afl-cs-proxy afl-qemu-trace afl-plot-ui afl-fuzz-document afl-network-server afl-g* afl-plot.sh afl-as afl-ld-lto afl-c* afl-lto*
-cd $${DESTDIR}$(HELPER_PATH) && rm -f afl-g*.*o afl-llvm-*.*o afl-compiler-*.*o libdislocator.so libtokencap.so libcompcov.so libqasan.so afl-frida-trace.so socketfuzz*.so argvfuzz*.so libAFLDriver.a libAFLQemuDriver.a as afl-as SanitizerCoverage*.so compare-transform-pass.so cmplog-*-pass.so split-*-pass.so dynamic_list.txt
-rm -rf $${DESTDIR}$(MISC_PATH)/testcases $${DESTDIR}$(MISC_PATH)/dictionaries
-sh -c "ls docs/*.md | sed 's|^docs/|$${DESTDIR}$(DOC_PATH)/|' | xargs rm -f"

View File

@ -17,7 +17,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
#TEST_MMAP=1
PREFIX ?= /usr/local

View File

@ -12,7 +12,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# For Heiko:
@ -308,7 +308,7 @@ ifeq "$(TEST_MMAP)" "1"
endif
PROGS_ALWAYS = ./afl-cc ./afl-compiler-rt.o ./afl-compiler-rt-32.o ./afl-compiler-rt-64.o
PROGS = $(PROGS_ALWAYS) ./afl-llvm-pass.so ./SanitizerCoveragePCGUARD.so ./split-compares-pass.so ./split-switches-pass.so ./cmplog-routines-pass.so ./cmplog-instructions-pass.so ./cmplog-switches-pass.so ./afl-llvm-dict2file.so ./compare-transform-pass.so ./afl-ld-lto ./afl-llvm-lto-instrumentlist.so ./afl-llvm-lto-instrumentation.so ./SanitizerCoverageLTO.so
PROGS = $(PROGS_ALWAYS) ./afl-llvm-pass.so ./SanitizerCoveragePCGUARD.so ./split-compares-pass.so ./split-switches-pass.so ./cmplog-routines-pass.so ./cmplog-instructions-pass.so ./cmplog-switches-pass.so ./afl-llvm-dict2file.so ./compare-transform-pass.so ./afl-ld-lto ./afl-llvm-lto-instrumentlist.so ./SanitizerCoverageLTO.so
# If prerequisites are not given, warn, do not build anything, and exit with code 0
ifeq "$(LLVMVER)" ""
@ -408,11 +408,6 @@ ifeq "$(LLVM_LTO)" "1"
endif
./SanitizerCoverageLTO.so: instrumentation/SanitizerCoverageLTO.so.cc
ifeq "$(LLVM_LTO)" "1"
$(CXX) $(CLANG_CPPFL) -Wno-writable-strings -fno-rtti -fPIC -std=$(LLVM_STDCXX) -shared $< -o $@ $(CLANG_LFL) instrumentation/afl-llvm-common.o
endif
./afl-llvm-lto-instrumentation.so: instrumentation/afl-llvm-lto-instrumentation.so.cc instrumentation/afl-llvm-common.o
ifeq "$(LLVM_LTO)" "1"
$(CXX) $(CLANG_CPPFL) -Wno-writable-strings -fno-rtti -fPIC -std=$(LLVM_STDCXX) -shared $< -o $@ $(CLANG_LFL) instrumentation/afl-llvm-common.o
$(CLANG_BIN) $(CFLAGS_SAFE) $(CPPFLAGS) -Wno-unused-result -O0 $(AFL_CLANG_FLTO) -fPIC -c instrumentation/afl-llvm-rt-lto.o.c -o ./afl-llvm-rt-lto.o
@ -480,7 +475,7 @@ install: all
@if [ -f ./afl-cc ]; then set -e; install -m 755 ./afl-cc $${DESTDIR}$(BIN_PATH); ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-c++; fi
@rm -f $${DESTDIR}$(HELPER_PATH)/afl-llvm-rt*.o $${DESTDIR}$(HELPER_PATH)/afl-gcc-rt*.o
@if [ -f ./afl-compiler-rt.o ]; then set -e; install -m 755 ./afl-compiler-rt.o $${DESTDIR}$(HELPER_PATH); ln -sf afl-compiler-rt.o $${DESTDIR}$(HELPER_PATH)/afl-llvm-rt.o ;fi
@if [ -f ./afl-lto ]; then set -e; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-lto; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-lto++; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-clang-lto; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-clang-lto++; install -m 755 ./afl-llvm-lto-instrumentation.so ./afl-llvm-rt-lto*.o ./afl-llvm-lto-instrumentlist.so $${DESTDIR}$(HELPER_PATH); fi
@if [ -f ./afl-lto ]; then set -e; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-lto; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-lto++; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-clang-lto; ln -sf afl-cc $${DESTDIR}$(BIN_PATH)/afl-clang-lto++; install -m 755 ./afl-llvm-rt-lto*.o ./afl-llvm-lto-instrumentlist.so $${DESTDIR}$(HELPER_PATH); fi
@if [ -f ./afl-ld-lto ]; then set -e; install -m 755 ./afl-ld-lto $${DESTDIR}$(BIN_PATH); fi
@if [ -f ./afl-compiler-rt-32.o ]; then set -e; install -m 755 ./afl-compiler-rt-32.o $${DESTDIR}$(HELPER_PATH); ln -sf afl-compiler-rt-32.o $${DESTDIR}$(HELPER_PATH)/afl-llvm-rt-32.o ;fi
@if [ -f ./afl-compiler-rt-64.o ]; then set -e; install -m 755 ./afl-compiler-rt-64.o $${DESTDIR}$(HELPER_PATH); ln -sf afl-compiler-rt-64.o $${DESTDIR}$(HELPER_PATH)/afl-llvm-rt-64.o ; fi

View File

@ -1 +0,0 @@
docs/QuickStartGuide.md

1563
README.md

File diff suppressed because it is too large Load Diff

34
TODO.md
View File

@ -1,38 +1,32 @@
# TODO list for AFL++
## Roadmap 3.00+
## Should
- better autodetection of shifting runtime timeout values
- Update afl->pending_not_fuzzed for MOpt
- put fuzz target in top line of UI
- afl-plot to support multiple plot_data
- parallel builds for source-only targets
- get rid of check_binary, replace with more forkserver communication
## Maybe
- afl_custom_fuzz_splice_optin()
- afl_custom_splice()
- better autodetection of shifting runtime timeout values
- cmplog: use colorization input for havoc?
- parallel builds for source-only targets
- cmdline option from-to range for mutations
## Further down the road
afl-fuzz:
- setting min_len/max_len/start_offset/end_offset limits for mutation output
qemu_mode:
QEMU mode/FRIDA mode:
- non colliding instrumentation
- rename qemu specific envs to AFL_QEMU (AFL_ENTRYPOINT, AFL_CODE_START/END,
AFL_COMPCOV_LEVEL?)
- add AFL_QEMU_EXITPOINT (maybe multiple?), maybe pointless as we have
- add AFL_QEMU_EXITPOINT (maybe multiple?), maybe pointless as there is
persistent mode
- add/implement AFL_QEMU_INST_LIBLIST and AFL_QEMU_NOINST_PROGRAM
- add/implement AFL_QEMU_INST_REGIONS as a list of _START/_END addresses
## Ideas
- LTO/sancov: write current edge to prev_loc and use that information when
using cmplog or __sanitizer_cov_trace_cmp*. maybe we can deduct by follow
up edge numbers that both following cmp paths have been found and then
disable working on this edge id -> cmplog_intelligence branch
- use cmplog colorization taint result for havoc locations?
- new instrumentation option for a thread-safe variant of feedback to shared mem.
The user decides, if this is needed (eg the target is multithreaded).
using cmplog or __sanitizer_cov_trace_cmp*. maybe we can deduct by follow up
edge numbers that both following cmp paths have been found and then disable
working on this edge id -> cmplog_intelligence branch
- use cmplog colorization taint result for havoc locations?

View File

@ -396,7 +396,7 @@ BEGIN {
system( "AFL_CMIN_ALLOW_ANY=1 "AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"/.run_test\" -Z "extra_par" -- \""target_bin"\" "prog_args_string" <\""in_dir"/"first_file"\"")
} else {
system("cp \""in_dir"/"first_file"\" "stdin_file)
system( "AFL_CMIN_ALLOW_ANY=1 "AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"/.run_test\" -Z "extra_par" -A \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null")
system( "AFL_CMIN_ALLOW_ANY=1 "AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"/.run_test\" -Z "extra_par" -H \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null")
}
first_count = 0
@ -432,8 +432,8 @@ BEGIN {
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -- \""target_bin"\" "prog_args_string)
} else {
print " Processing "in_count" files (forkserver mode)..."
# print AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -A \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null"
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -A \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null")
# print AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -H \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null"
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -H \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null")
}
if (retval && !AFL_CMIN_CRASHES_ONLY) {

View File

@ -11,7 +11,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# This tool tries to find the smallest subset of files in the input directory
# that still trigger the full range of instrumentation data points seen in
@ -310,7 +310,7 @@ if [ "$STDIN_FILE" = "" ]; then
else
cp "$IN_DIR/$FIRST_FILE" "$STDIN_FILE"
AFL_CMIN_ALLOW_ANY=1 "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/.run_test" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
AFL_CMIN_ALLOW_ANY=1 "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/.run_test" -Z $EXTRA_PAR -H "$STDIN_FILE" -- "$@" </dev/null
fi
@ -360,7 +360,7 @@ echo "[*] Obtaining traces for input files in '$IN_DIR'..."
cp "$IN_DIR/$fn" "$STDIN_FILE"
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -A "$STDIN_FILE" -- "$@" </dev/null
"$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -H "$STDIN_FILE" -- "$@" </dev/null
done

View File

@ -12,7 +12,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
get_abs_path() {
@ -195,15 +195,19 @@ exit 1
fi
mkdir -p "$outputdir/tmp"
afl-plot-ui > "$outputdir/tmp/win_ids" &
rm -rf "$outputdir/.tmp"
mkdir -p "$outputdir/.tmp"
mkfifo "$outputdir/.tmp/win_ids" || exit 1
sleep 0.5
afl-plot-ui > "$outputdir/.tmp/win_ids" &
W_IDS=$(cat "$outputdir/.tmp/win_ids")
W_ID1=$(cat $outputdir/tmp/win_ids | head -1)
W_ID2=$(cat $outputdir/tmp/win_ids | head -2 | tail -1)
W_ID3=$(cat $outputdir/tmp/win_ids | head -3 | tail -1)
W_ID4=$(cat $outputdir/tmp/win_ids | tail -1)
rm -rf "$outputdir/.tmp"
W_ID1=$(echo "$W_IDS" | head -n 1)
W_ID2=$(echo "$W_IDS" | head -n 2 | tail -n 1)
W_ID3=$(echo "$W_IDS" | head -n 3 | tail -n 1)
W_ID4=$(echo "$W_IDS" | tail -n 1)
echo "[*] Generating plots..."
@ -265,12 +269,6 @@ _EOF_
sleep 1
rm "$outputdir/tmp/win_ids"
if [ -z "$(ls -A $outputdir/tmp)" ]; then
rm -r "$outputdir/tmp"
fi
else
echo "[*] Generating plots..."

View File

@ -34,8 +34,8 @@ if [ "$PLATFORM" = "Linux" ] ; then
sysctl -w kernel.randomize_va_space=0
sysctl -w kernel.sched_child_runs_first=1
sysctl -w kernel.sched_autogroup_enabled=1
sysctl -w kernel.sched_migration_cost_ns=50000000
sysctl -w kernel.sched_latency_ns=250000000
sysctl -w kernel.sched_migration_cost_ns=50000000 2>/dev/null
sysctl -w kernel.sched_latency_ns=250000000 2>/dev/null
echo never > /sys/kernel/mm/transparent_hugepage/enabled
test -e /sys/devices/system/cpu/cpufreq/scaling_governor && echo performance | tee /sys/devices/system/cpu/cpufreq/scaling_governor
test -e /sys/devices/system/cpu/cpufreq/policy0/scaling_governor && echo performance | tee /sys/devices/system/cpu/cpufreq/policy*/scaling_governor
@ -52,7 +52,7 @@ if [ "$PLATFORM" = "Linux" ] ; then
echo ' /etc/default/grub:GRUB_CMDLINE_LINUX_DEFAULT="ibpb=off ibrs=off kpti=0 l1tf=off mds=off mitigations=off no_stf_barrier noibpb noibrs nopcid nopti nospec_store_bypass_disable nospectre_v1 nospectre_v2 pcid=off pti=off spec_store_bypass_disable=off spectre_v2=off stf_barrier=off srbds=off noexec=off noexec32=off tsx=on tsx_async_abort=off arm64.nopauth audit=0 hardened_usercopy=off ssbd=force-off"'
echo
}
echo If you run fuzzing instances in docker, run them with \"--security-opt seccomp=unconfined\" for more speed
echo If you run fuzzing instances in docker, run them with \"--security-opt seccomp=unconfined\" for more speed.
echo
DONE=1
fi

View File

@ -12,7 +12,7 @@
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# This tool summarizes the status of any locally-running synchronized
# instances of afl-fuzz.

2
coresight_mode/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
.local
glibc*

View File

@ -0,0 +1,62 @@
#!/usr/bin/env make
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 Ricerca Security, Inc. All rights reserved.
SHELL:=bash
PREFIX?=$(shell pwd)/.local
CS_TRACE:=coresight-trace
PATCHELF?=$(PREFIX)/bin/patchelf
PATCH_DIR:=patches
GLIBC_VER:=2.33
GLIBC_NAME:=glibc-$(GLIBC_VER)
GLIBC_URL_BASE:=http://ftp.gnu.org/gnu/glibc
GLIBC_LDSO?=$(PREFIX)/lib/ld-linux-aarch64.so.1
OUTPUT?="$(TARGET).patched"
all: build
build:
git submodule update --init --recursive $(CS_TRACE)
$(MAKE) -C $(CS_TRACE)
cp $(CS_TRACE)/cs-proxy ../afl-cs-proxy
patch: | $(PATCHELF) $(GLIBC_LDSO)
@if test -z "$(TARGET)"; then echo "TARGET is not set"; exit 1; fi
$(PATCHELF) \
--set-interpreter $(GLIBC_LDSO) \
--set-rpath $(dir $(GLIBC_LDSO)) \
--output $(OUTPUT) \
$(TARGET)
$(PATCHELF): patchelf
git submodule update --init $<
cd $< && \
./bootstrap.sh && \
./configure --prefix=$(PREFIX) && \
$(MAKE) && \
$(MAKE) check && \
$(MAKE) install
$(GLIBC_LDSO): | $(GLIBC_NAME).tar.xz
tar -xf $(GLIBC_NAME).tar.xz
for file in $(shell find $(PATCH_DIR) -maxdepth 1 -type f); do \
patch -p1 < $$file ; \
done
mkdir -p $(GLIBC_NAME)/build
cd $(GLIBC_NAME)/build && \
../configure --prefix=$(PREFIX) && \
$(MAKE) && \
$(MAKE) install
$(GLIBC_NAME).tar.xz:
wget -O $@ $(GLIBC_URL_BASE)/$@
clean:
$(MAKE) -C $(CS_TRACE) clean
.PHONY: all build patch clean

21
coresight_mode/Makefile Normal file
View File

@ -0,0 +1,21 @@
#!/usr/bin/env make
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 Ricerca Security, Inc. All rights reserved.
all:
@echo trying to use GNU make...
@gmake all || echo please install GNUmake
build:
@echo trying to use GNU make...
@gmake build || echo please install GNUmake
patch:
@echo trying to use GNU make...
@gmake patch || echo please install GNUmake
clean:
@echo trying to use GNU make...
@gmake clean || echo please install GNUmake
.PHONY: all build patch clean

70
coresight_mode/README.md Normal file
View File

@ -0,0 +1,70 @@
# AFL++ CoreSight mode
CoreSight mode enables binary-only fuzzing on ARM64 Linux using CoreSight (ARM's hardware tracing technology).
NOTE: CoreSight mode is in the early development stage. Not applicable for production use.
Currently the following hardware boards are supported:
* NVIDIA Jetson TX2 (NVIDIA Parker)
* NVIDIA Jetson Nano (NVIDIA Tegra X1)
* GIGABYTE R181-T90 (Marvell ThunderX2 CN99XX)
## Getting started
Please read the [RICSec/coresight-trace README](https://github.com/RICSecLab/coresight-trace/blob/master/README.md) and check the prerequisites (capstone) before getting started.
CoreSight mode supports the AFL++ fork server mode to reduce `exec` system call
overhead. To support it for binary-only fuzzing, it needs to modify the target
ELF binary to re-link to the patched glibc. We employ this design from
[PTrix](https://github.com/junxzm1990/afl-pt).
Check out all the git submodules in the `cs_mode` directory:
```bash
git submodule update --init --recursive
```
### Build coresight-trace
There are some notes on building coresight-trace. Refer to the [README](https://github.com/RICSecLab/coresight-trace/blob/master/README.md) for the details. Run make in the `cs_mode` directory:
```bash
make build
```
Make sure `cs-proxy` is placed in the AFL++ root directory as `afl-cs-proxy`.
### Patch COTS binary
The fork server mode requires patchelf and the patched glibc. The dependency build can be done by just run make:
```bash
make patch TARGET=$BIN
```
The above make command builds and installs the dependencies to `$PREFIX` (default to `$PWD/.local`) at the first time. Then, it runs `patchelf` to `$BIN` with output `$OUTPUT` (`$BIN.patched` by default).
### Run afl-fuzz
Run `afl-fuzz` with `-A` option to use CoreSight mode.
```bash
sudo afl-fuzz -A -i input -o output -- $OUTPUT @@
```
## Environment Variables
There are AFL++ CoreSight mode-specific environment variables for run-time configuration.
* `AFL_CS_CUSTOM_BIN` overrides the proxy application path. `afl-cs-proxy` will be used if not defined.
* `AFLCS_COV` specifies coverage type on CoreSight trace decoding. `edge` and `path` is supported. The default value is `edge`.
* `AFLCS_UDMABUF` is the u-dma-buf device number used to store trace data in the DMA region. The default value is `0`.
## TODO List
* Eliminate modified glibc dependency
* Support parallel fuzzing
## Acknowledgements
This project has received funding from the Acquisition, Technology & Logistics Agency (ATLA) under the National Security Technology Research Promotion Fund 2021 (JPJ004596).

View File

@ -0,0 +1,117 @@
diff --git a/glibc-2.33/elf/rtld.c b/glibc-2.33/elf/rtld.c
index 596b6ac3..2ee270d4 100644
--- a/glibc-2.33/elf/rtld.c
+++ b/glibc-2.33/elf/rtld.c
@@ -169,6 +169,99 @@ uintptr_t __pointer_chk_guard_local
strong_alias (__pointer_chk_guard_local, __pointer_chk_guard)
#endif
+#define AFLCS_RTLD 1
+
+#if AFLCS_RTLD
+
+#include <sys/shm.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <dlfcn.h>
+#include <signal.h>
+
+#include <asm/unistd.h>
+#include <unistd.h>
+
+#define FORKSRV_FD 198
+
+#define AFLCS_ENABLE "__AFLCS_ENABLE"
+
+/* We use this additional AFLCS_# AFLCS_#+1 pair to communicate with proxy */
+#define AFLCS_FORKSRV_FD (FORKSRV_FD - 3)
+#define AFLCS_RTLD_SNIPPET do { __cs_start_forkserver(); } while(0)
+
+/* Fork server logic, invoked before we return from _dl_start. */
+
+static void __cs_start_forkserver(void) {
+ int status;
+ pid_t child_pid;
+ static char tmp[4] = {0, 0, 0, 0};
+
+ if (!getenv(AFLCS_ENABLE)) {
+ return;
+ }
+
+ if (write(AFLCS_FORKSRV_FD + 1, tmp, 4) != 4) {
+ _exit(-1);
+ }
+
+ /* All right, let's await orders... */
+ while (1) {
+ /* Whoops, parent dead? */
+ if (read(AFLCS_FORKSRV_FD, tmp, 4) != 4) {
+ _exit(1);
+ }
+
+ child_pid = INLINE_SYSCALL(clone, 5,
+ CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | SIGCHLD, 0,
+ NULL, NULL, &THREAD_SELF->tid);
+ if (child_pid < 0) {
+ _exit(4);
+ }
+ if (!child_pid) {
+ /* Child process. Wait for parent start tracing */
+ kill(getpid(), SIGSTOP);
+ /* Close descriptors and run free. */
+ close(AFLCS_FORKSRV_FD);
+ close(AFLCS_FORKSRV_FD + 1);
+ return;
+ }
+
+ /* Parent. */
+ if (write(AFLCS_FORKSRV_FD + 1, &child_pid, 4) != 4) {
+ _exit(5);
+ }
+
+ /* Wait until SIGCONT is signaled. */
+ if (waitpid(child_pid, &status, WCONTINUED) < 0) {
+ _exit(6);
+ }
+ if (!WIFCONTINUED(status)) {
+ /* Relay status to proxy. */
+ if (write(AFLCS_FORKSRV_FD + 1, &status, 4) != 4) {
+ _exit(7);
+ }
+ continue;
+ }
+ while (1) {
+ /* Get status. */
+ if (waitpid(child_pid, &status, WUNTRACED) < 0) {
+ _exit(8);
+ }
+ /* Relay status to proxy. */
+ if (write(AFLCS_FORKSRV_FD + 1, &status, 4) != 4) {
+ _exit(9);
+ }
+ if (!(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP)) {
+ /* The child process is exited. */
+ break;
+ }
+ }
+ }
+}
+
+#endif /* AFLCS_RTLD */
+
/* Check that AT_SECURE=0, or that the passed name does not contain
directories and is not overly long. Reject empty names
unconditionally. */
@@ -588,6 +681,12 @@ _dl_start (void *arg)
# define ELF_MACHINE_START_ADDRESS(map, start) (start)
#endif
+ /* AFL-CS-START */
+#if AFLCS_RTLD
+ AFLCS_RTLD_SNIPPET;
+#endif
+ /* AFL-CS-END */
+
return ELF_MACHINE_START_ADDRESS (GL(dl_ns)[LM_ID_BASE]._ns_loaded, entry);
}
}

View File

@ -15,6 +15,7 @@ In `./rust`, you will find rust bindings, including a simple example in `./rust/
If you use git to clone AFL++, then the following will incorporate our
excellent grammar custom mutator:
```sh
git submodule update --init
```

View File

@ -1,19 +1,19 @@
# GramaTron
Gramatron is a coverage-guided fuzzer that uses grammar automatons to perform
grammar-aware fuzzing. Technical details about our framework are available
in the [ISSTA'21 paper](https://nebelwelt.net/files/21ISSTA.pdf).
The artifact to reproduce the experiments presented in the paper are present
in `artifact/`. Instructions to run a sample campaign and incorporate new
grammars is presented below:
GramaTron is a coverage-guided fuzzer that uses grammar automatons to perform
grammar-aware fuzzing. Technical details about our framework are available in
the [ISSTA'21 paper](https://nebelwelt.net/files/21ISSTA.pdf). The artifact to
reproduce the experiments presented in the paper are present in `artifact/`.
Instructions to run a sample campaign and incorporate new grammars is presented
below:
# Compiling
## Compiling
Simply execute `./build_gramatron_mutator.sh`
Execute `./build_gramatron_mutator.sh`.
# Running
## Running
You have to set the grammar file to use with `GRAMMATRON_AUTOMATION`:
You have to set the grammar file to use with `GRAMATRON_AUTOMATION`:
```
export AFL_DISABLE_TRIM=1
@ -23,23 +23,27 @@ export GRAMATRON_AUTOMATION=grammars/ruby/source_automata.json
afl-fuzz -i in -o out -- ./target
```
# Adding and testing a new grammar
## Adding and testing a new grammar
- Specify in a JSON format for CFG. Examples are correspond `source.json` files
- Specify in a JSON format for CFG. Examples are correspond `source.json` files.
- Run the automaton generation script (in `src/gramfuzz-mutator/preprocess`)
which will place the generated automaton in the same folder.
```
./preprocess/prep_automaton.sh <grammar_file> <start_symbol> [stack_limit]
Eg. ./preprocess/prep_automaton.sh ~/grammars/ruby/source.json PROGRAM
```
- If the grammar has no self-embedding rules then you do not need to pass the
stack limit parameter. However, if it does have self-embedding rules then you
```
./preprocess/prep_automaton.sh <grammar_file> <start_symbol> [stack_limit]
E.g., ./preprocess/prep_automaton.sh ~/grammars/ruby/source.json PROGRAM
```
- If the grammar has no self-embedding rules, then you do not need to pass the
stack limit parameter. However, if it does have self-embedding rules, then you
need to pass the stack limit parameter. We recommend starting with `5` and
then increasing it if you need more complexity
- To sanity-check that the automaton is generating inputs as expected you can use the `test` binary housed in `src/gramfuzz-mutator`
```
./test SanityCheck <automaton_file>
then increasing it if you need more complexity.
- To sanity-check that the automaton is generating inputs as expected, you can
use the `test` binary housed in `src/gramfuzz-mutator`.
Eg. ./test SanityCheck ~/grammars/ruby/source_automata.json
```
```
./test SanityCheck <automaton_file>
E.g., ./test SanityCheck ~/grammars/ruby/source_automata.json
```

View File

@ -1086,6 +1086,7 @@ ATTRIBUTE_INTERFACE size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size,
size_t MaxSize) {
assert(fuzzer::F);
fuzzer::F->GetMD().StartMutationSequence();
size_t r = fuzzer::F->GetMD().DefaultMutate(Data, Size, MaxSize);
#ifdef INTROSPECTION
introspection_ptr = fuzzer::F->GetMD().WriteMutationSequence();

View File

@ -11,9 +11,11 @@ Note that this is currently a simple implementation and it is missing two featur
* Dictionary support
To update the source, all that is needed is that FuzzerDriver.cpp has to receive
```
#include "libfuzzer.inc"
```
before the closing namespace bracket.
It is also libfuzzer.inc where the configuration of the libfuzzer mutations
@ -21,4 +23,4 @@ are done.
> Original repository: https://github.com/llvm/llvm-project
> Path: compiler-rt/lib/fuzzer/*.{h|cpp}
> Source commit: df3e903655e2499968fc7af64fb5fa52b2ee79bb
> Source commit: df3e903655e2499968fc7af64fb5fa52b2ee79bb

View File

@ -2,7 +2,7 @@
extern "C" ATTRIBUTE_INTERFACE void
LLVMFuzzerMyInit(int (*Callback)(const uint8_t *Data, size_t Size), unsigned int Seed) {
Random Rand(Seed);
auto *Rand = new Random(Seed);
FuzzingOptions Options;
Options.Verbosity = 3;
Options.MaxLen = 1024000;
@ -30,7 +30,7 @@ LLVMFuzzerMyInit(int (*Callback)(const uint8_t *Data, size_t Size), unsigned int
struct EntropicOptions Entropic;
Entropic.Enabled = Options.Entropic;
EF = new ExternalFunctions();
auto *MD = new MutationDispatcher(Rand, Options);
auto *MD = new MutationDispatcher(*Rand, Options);
auto *Corpus = new InputCorpus(Options.OutputCorpus, Entropic);
auto *F = new Fuzzer(Callback, *Corpus, *MD, Options);
}

View File

@ -99,10 +99,12 @@ extern "C" size_t afl_custom_fuzz(MyMutator *mutator, // return value from afl_c
std::string s = ProtoToData(*p);
// Copy to a new buffer ( mutated_out )
size_t mutated_size = s.size() <= max_size ? s.size() : max_size; // check if raw data's size is larger than max_size
uint8_t *mutated_out = new uint8_t[mutated_size+1];
memcpy(mutated_out, s.c_str(), mutated_size); // copy the mutated data
delete mutator->mutated_out;
mutator->mutated_out = new uint8_t[mutated_size+1];
memcpy(mutator->mutated_out, s.c_str(), mutated_size); // copy the mutated data
// Assign the mutated data and return mutated_size
*out_buf = mutated_out;
*out_buf = mutator->mutated_out;
return mutated_size;
}

View File

@ -2,4 +2,6 @@
#include "test.pb.h"
class MyMutator : public protobuf_mutator::Mutator {
public:
uint8_t *mutated_out = nullptr;
};

View File

@ -129,7 +129,7 @@ uint8_t afl_custom_queue_new_entry(my_mutator_t * data,
int pid = fork();
if (pid == -1) return;
if (pid == -1) return 0;
if (pid) {
@ -147,7 +147,7 @@ uint8_t afl_custom_queue_new_entry(my_mutator_t * data,
if (r <= 0) {
close(pipefd[1]);
return;
return 0;
}

View File

@ -1,20 +1,18 @@
# AFL dictionaries
# AFL++ dictionaries
(See [../README.md](../README.md) for the general instruction manual.)
This subdirectory contains a set of dictionaries that can be used in
conjunction with the -x option to allow the fuzzer to effortlessly explore the
grammar of some of the more verbose data formats or languages. The basic
principle behind the operation of fuzzer dictionaries is outlined in section 10
of the "main" README.md for the project.
This subdirectory contains a set of dictionaries that can be used in conjunction
with the -x option to allow the fuzzer to effortlessly explore the grammar of
some of the more verbose data formats or languages.
These sets were done by Michal Zalewski, various contributors, and imported
from oss-fuzz, go-fuzz and libfuzzer.
These sets were done by Michal Zalewski, various contributors, and imported from
oss-fuzz, go-fuzz and libfuzzer.
Custom dictionaries can be added at will. They should consist of a
reasonably-sized set of rudimentary syntax units that the fuzzer will then try
to clobber together in various ways. Snippets between 2 and 16 bytes are
usually the sweet spot.
to clobber together in various ways. Snippets between 2 and 16 bytes are usually
the sweet spot.
Custom dictionaries can be created in two ways:
@ -36,9 +34,9 @@ In the file mode, every name field can be optionally followed by @<num>, e.g.:
`keyword_foo@1 = "foo"`
Such entries will be loaded only if the requested dictionary level is equal or
higher than this number. The default level is zero; a higher value can be set
by appending @<num> to the dictionary file name, like so:
higher than this number. The default level is zero; a higher value can be set by
appending @<num> to the dictionary file name, like so:
`-x path/to/dictionary.dct@2`
Good examples of dictionaries can be found in xml.dict and png.dict.
Good examples of dictionaries can be found in xml.dict and png.dict.

View File

@ -9,16 +9,46 @@ Want to stay in the loop on major new features? Join our mailing list by
sending a mail to <afl-users+subscribe@googlegroups.com>.
### Version ++3.15a (dev)
- documentation restructuring, made possible by Google Season of Docs
- new binary-only fuzzing mode: coresight_mode for aarch64 CPUs :)
thanks to RICSecLab submitting!
- if instrumented libaries are dlopen()'ed after the forkserver you
will now see crashes. before you would have colliding coverage.
we changed this to force fixing a broken setup rather then allowing
ineffective fuzzing.
See docs/best_practices.md how to fix such setups.
- afl-fuzz:
- added AFL_IGNORE_PROBLEMS plus checks to identify and abort on
incorrect LTO usage setups and enhanced the READMEs for better
information on how to deal with instrumenting libraries
- cmplog binaries will need to be recompiled for this version
(it is better!)
- fix a regression introduced in 3.10 that resulted in less
coverage being detected. thanks to Collin May for reporting!
- added AFL_IGNORE_PROBLEMS, plus checks to identify and abort on
incorrect LTO usage setups and enhanced the READMEs for better
information on how to deal with instrumenting libraries
- fix -n dumb mode (nobody should use this)
- fix stability issue with LTO and cmplog
- better banner
- more effective cmplog mode
- more often update the UI when in input2stage mode
- frida_mode:
- better performance, bug fixes
- David Carlier added Android support :)
- afl-showmap, afl-tmin and afl-analyze:
- honor persistent mode for more speed. thanks to dloffre-snl
for reporting!
- fix bug where targets are not killed on timeouts
- moved hidden afl-showmap -A option to -H to be used for
coresight_mode
- Prevent accidently killing non-afl/fuzz services when aborting
afl-showmap and other tools.
- afl-cc:
- new cmplog mode (incompatible with older afl++ versions)
- support llvm IR select instrumentation for default PCGUARD and LTO
- fix for shared linking on MacOS
- llvm and LTO mode verified to work with new llvm 14-dev
- fixed a potential crash in targets for LAF string handling
- added AFL_USE_TSAN thread sanitizer support
- llvm and LTO mode modified to work with new llvm 14-dev (again)
- fix for AFL_REAL_LD
- added the very good grammar mutator "GramaTron" to the
custom_mutators
- added optimin, a faster and better corpus minimizer by
@ -30,7 +60,7 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
- fix AFL_PRELOAD issues on MacOS
- removed utils/afl_frida because frida_mode/ is now so much better
- added uninstall target to makefile (todo: update new readme!)
- removed indirections in rust callbacks for unicornafl
### Version ++3.14c (release)
- afl-fuzz:
@ -2748,7 +2778,7 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
- Updated the documentation and added notes_for_asan.txt. Based on feedback
from Hanno Boeck, Ben Laurie, and others.
- Moved the project to http://lcamtuf.coredump.cx/afl/.
- Moved the project to https://lcamtuf.coredump.cx/afl/.
### Version 0.46b:

View File

@ -1,243 +1,185 @@
# Frequently asked questions about AFL++
## Contents
* [What is the difference between AFL and AFL++?](#what-is-the-difference-between-afl-and-afl)
* [I got a weird compile error from clang](#i-got-a-weird-compile-error-from-clang)
* [How to improve the fuzzing speed?](#how-to-improve-the-fuzzing-speed)
* [How do I fuzz a network service?](#how-do-i-fuzz-a-network-service)
* [How do I fuzz a GUI program?](#how-do-i-fuzz-a-gui-program)
* [What is an edge?](#what-is-an-edge)
* [Why is my stability below 100%?](#why-is-my-stability-below-100)
* [How can I improve the stability value?](#how-can-i-improve-the-stability-value)
# Frequently asked questions (FAQ)
If you find an interesting or important question missing, submit it via
[https://github.com/AFLplusplus/AFLplusplus/issues](https://github.com/AFLplusplus/AFLplusplus/issues)
[https://github.com/AFLplusplus/AFLplusplus/discussions](https://github.com/AFLplusplus/AFLplusplus/discussions).
## What is the difference between AFL and AFL++?
## General
American Fuzzy Lop (AFL) was developed by Michał "lcamtuf" Zalewski starting in
2013/2014, and when he left Google end of 2017 he stopped developing it.
<details>
<summary id="what-is-the-difference-between-afl-and-aflplusplus">What is the difference between AFL and AFL++?</summary><p>
At the end of 2019 the Google fuzzing team took over maintenance of AFL, however
it is only accepting PRs from the community and is not developing enhancements
anymore.
AFL++ is a superior fork to Google's AFL - more speed, more and better
mutations, more and better instrumentation, custom module support, etc.
In the second quarter of 2019, 1 1/2 year later when no further development of
AFL had happened and it became clear there would none be coming, AFL++
was born, where initially community patches were collected and applied
for bug fixes and enhancements. Then from various AFL spin-offs - mostly academic
research - features were integrated. This already resulted in a much advanced
AFL.
American Fuzzy Lop (AFL) was developed by Michał "lcamtuf" Zalewski starting
in 2013/2014, and when he left Google end of 2017 he stopped developing it.
Until the end of 2019 the AFL++ team had grown to four active developers which
then implemented their own research and features, making it now by far the most
flexible and feature rich guided fuzzer available as open source.
And in independent fuzzing benchmarks it is one of the best fuzzers available,
e.g. [Fuzzbench Report](https://www.fuzzbench.com/reports/2020-08-03/index.html)
At the end of 2019, the Google fuzzing team took over maintenance of AFL,
however, it is only accepting PRs from the community and is not developing
enhancements anymore.
## I got a weird compile error from clang
In the second quarter of 2019, 1 1/2 years later, when no further development
of AFL had happened and it became clear there would none be coming, AFL++ was
born, where initially community patches were collected and applied for bug
fixes and enhancements. Then from various AFL spin-offs - mostly academic
research - features were integrated. This already resulted in a much advanced
AFL.
If you see this kind of error when trying to instrument a target with afl-cc/
afl-clang-fast/afl-clang-lto:
```
/prg/tmp/llvm-project/build/bin/clang-13: symbol lookup error: /usr/local/bin/../lib/afl//cmplog-instructions-pass.so: undefined symbol: _ZNK4llvm8TypeSizecvmEv
clang-13: error: unable to execute command: No such file or directory
clang-13: error: clang frontend command failed due to signal (use -v to see invocation)
clang version 13.0.0 (https://github.com/llvm/llvm-project 1d7cf550721c51030144f3cd295c5789d51c4aad)
Target: x86_64-unknown-linux-gnu
Thread model: posix
InstalledDir: /prg/tmp/llvm-project/build/bin
clang-13: note: diagnostic msg:
********************
```
Then this means that your OS updated the clang installation from an upgrade
package and because of that the AFL++ llvm plugins do not match anymore.
Until the end of 2019, the AFL++ team had grown to four active developers
which then implemented their own research and features, making it now by far
the most flexible and feature rich guided fuzzer available as open source. And
in independent fuzzing benchmarks it is one of the best fuzzers available,
e.g., [Fuzzbench
Report](https://www.fuzzbench.com/reports/2020-08-03/index.html).
</p></details>
Solution: `git pull ; make clean install` of AFL++
<details>
<summary id="where-can-i-find-tutorials">Where can I find tutorials?</summary><p>
## How to improve the fuzzing speed?
We compiled a list of tutorials and exercises, see
[tutorials.md](tutorials.md).
</p></details>
1. Use [llvm_mode](../instrumentation/README.llvm.md): afl-clang-lto (llvm >= 11) or afl-clang-fast (llvm >= 9 recommended)
2. Use [persistent mode](../instrumentation/README.persistent_mode.md) (x2-x20 speed increase)
3. Use the [AFL++ snapshot module](https://github.com/AFLplusplus/AFL-Snapshot-LKM) (x2 speed increase)
4. If you do not use shmem persistent mode, use `AFL_TMPDIR` to put the input file directory on a tempfs location, see [docs/env_variables.md](docs/env_variables.md)
5. Improve Linux kernel performance: modify `/etc/default/grub`, set `GRUB_CMDLINE_LINUX_DEFAULT="ibpb=off ibrs=off kpti=off l1tf=off mds=off mitigations=off no_stf_barrier noibpb noibrs nopcid nopti nospec_store_bypass_disable nospectre_v1 nospectre_v2 pcid=off pti=off spec_store_bypass_disable=off spectre_v2=off stf_barrier=off"`; then `update-grub` and `reboot` (warning: makes the system less secure)
6. Running on an `ext2` filesystem with `noatime` mount option will be a bit faster than on any other journaling filesystem
7. Use your cores! [README.md:3.b) Using multiple cores/threads](../README.md#b-using-multiple-coresthreads)
<details>
<summary id="what-is-an-edge">What is an "edge"?</summary><p>
## How do I fuzz a network service?
A program contains `functions`, `functions` contain the compiled machine code.
The compiled machine code in a `function` can be in a single or many `basic
blocks`. A `basic block` is the largest possible number of subsequent machine
code instructions that has exactly one entry point (which can be be entered by
multiple other basic blocks) and runs linearly without branching or jumping to
other addresses (except at the end).
The short answer is - you cannot, at least not "out of the box".
```
function() {
A:
some
code
B:
if (x) goto C; else goto D;
C:
some code
goto E
D:
some code
goto B
E:
return
}
```
Using a network channel is inadequate for several reasons:
- it has a slow-down of x10-20 on the fuzzing speed
- it does not scale to fuzzing multiple instances easily,
- instead of one initial data packet often a back-and-forth interplay of packets is needed for stateful protocols (which is totally unsupported by most coverage aware fuzzers).
Every code block between two jump locations is a `basic block`.
The established method to fuzz network services is to modify the source code
to read from a file or stdin (fd 0) (or even faster via shared memory, combine
this with persistent mode [instrumentation/README.persistent_mode.md](../instrumentation/README.persistent_mode.md)
and you have a performance gain of x10 instead of a performance loss of over
x10 - that is a x100 difference!).
An `edge` is then the unique relationship between two directly connected
`basic blocks` (from the code example above):
If modifying the source is not an option (e.g. because you only have a binary
and perform binary fuzzing) you can also use a shared library with AFL_PRELOAD
to emulate the network. This is also much faster than the real network would be.
See [utils/socket_fuzzing/](../utils/socket_fuzzing/).
There is an outdated AFL++ branch that implements networking if you are
desperate though: [https://github.com/AFLplusplus/AFLplusplus/tree/networking](https://github.com/AFLplusplus/AFLplusplus/tree/networking) -
however a better option is AFLnet ([https://github.com/aflnet/aflnet](https://github.com/aflnet/aflnet))
which allows you to define network state with different type of data packets.
## How do I fuzz a GUI program?
If the GUI program can read the fuzz data from a file (via the command line,
a fixed location or via an environment variable) without needing any user
interaction then it would be suitable for fuzzing.
Otherwise it is not possible without modifying the source code - which is a
very good idea anyway as the GUI functionality is a huge CPU/time overhead
for the fuzzing.
So create a new `main()` that just reads the test case and calls the
functionality for processing the input that the GUI program is using.
## What is an "edge"?
A program contains `functions`, `functions` contain the compiled machine code.
The compiled machine code in a `function` can be in a single or many `basic blocks`.
A `basic block` is the largest possible number of subsequent machine code
instructions that has exactly one entrypoint (which can be be entered by multiple other basic blocks)
and runs linearly without branching or jumping to other addresses (except at the end).
```
function() {
A:
some
code
B:
if (x) goto C; else goto D;
C:
some code
goto E
D:
some code
goto B
E:
return
}
```
Every code block between two jump locations is a `basic block`.
An `edge` is then the unique relationship between two directly connected `basic blocks` (from the
code example above):
```
Block A
|
```
Block A
|
v
Block B <------+
/ \ |
v v |
Block C Block D --+
\
v
Block B <------+
/ \ |
v v |
Block C Block D --+
\
v
Block E
```
Every line between two blocks is an `edge`.
Note that a few basic block loop to itself, this too would be an edge.
Block E
```
## Why is my stability below 100%?
Every line between two blocks is an `edge`. Note that a few basic block loop
to itself, this too would be an edge.
</p></details>
Stability is measured by how many percent of the edges in the target are
"stable". Sending the same input again and again should take the exact same
path through the target every time. If that is the case, the stability is 100%.
## Targets
If however randomness happens, e.g. a thread reading other external data,
reaction to timing, etc. then in some of the re-executions with the same data
the edge coverage result will be different accross runs.
Those edges that change are then flagged "unstable".
<details>
<summary id="how-can-i-fuzz-a-binary-only-target">How can I fuzz a binary-only target?</summary><p>
The more "unstable" edges, the more difficult for AFL++ to identify valid new
paths.
AFL++ is a great fuzzer if you have the source code available.
A value above 90% is usually fine and a value above 80% is also still ok, and
even a value above 20% can still result in successful finds of bugs.
However, it is recommended that for values below 90% or 80% you should take
countermeasures to improve stability.
However, if there is only the binary program and no source code available,
then the standard non-instrumented mode is not effective.
## How can I improve the stability value?
To learn how these binaries can be fuzzed, read
[fuzzing_binary-only_targets.md](fuzzing_binary-only_targets.md).
</p></details>
For fuzzing a 100% stable target that covers all edges is the best case.
A 90% stable target that covers all edges is however better than a 100% stable
target that ignores 10% of the edges.
<details>
<summary id="how-can-i-fuzz-a-network-service">How can I fuzz a network service?</summary><p>
With instability you basically have a partial coverage loss on an edge, with
ignored functions you have a full loss on that edges.
The short answer is - you cannot, at least not "out of the box".
There are functions that are unstable, but also provide value to coverage, eg
init functions that use fuzz data as input for example.
If however a function that has nothing to do with the input data is the
source of instability, e.g. checking jitter, or is a hash map function etc.
then it should not be instrumented.
For more information on fuzzing network services, see
[best_practices.md#fuzzing-a-network-service](best_practices.md#fuzzing-a-network-service).
</p></details>
To be able to exclude these functions (based on AFL++'s measured stability)
the following process will allow to identify functions with variable edges.
<details>
<summary id="how-can-i-fuzz-a-gui-program">How can I fuzz a GUI program?</summary><p>
Four steps are required to do this and it also requires quite some knowledge
of coding and/or disassembly and is effectively possible only with
afl-clang-fast PCGUARD and afl-clang-lto LTO instrumentation.
Not all GUI programs are suitable for fuzzing. If the GUI program can read the
fuzz data from a file without needing any user interaction, then it would be
suitable for fuzzing.
1. First step: Instrument to be able to find the responsible function(s).
For more information on fuzzing GUI programs, see
[best_practices.md#fuzzing-a-gui-program](best_practices.md#fuzzing-a-gui-program).
</p></details>
a) For LTO instrumented binaries this can be documented during compile
time, just set `export AFL_LLVM_DOCUMENT_IDS=/path/to/a/file`.
This file will have one assigned edge ID and the corresponding
function per line.
## Performance
b) For PCGUARD instrumented binaries it is much more difficult. Here you
can either modify the __sanitizer_cov_trace_pc_guard function in
instrumentation/afl-llvm-rt.o.c to write a backtrace to a file if the ID in
__afl_area_ptr[*guard] is one of the unstable edge IDs.
(Example code is already there).
Then recompile and reinstall llvm_mode and rebuild your target.
Run the recompiled target with afl-fuzz for a while and then check the
file that you wrote with the backtrace information.
Alternatively you can use `gdb` to hook __sanitizer_cov_trace_pc_guard_init
on start, check to which memory address the edge ID value is written
and set a write breakpoint to that address (`watch 0x.....`).
<details>
<summary id="how-can-i-improve-the-fuzzing-speed">How can I improve the fuzzing speed?</summary><p>
c) in all other instrumentation types this is not possible. So just
recompile with the two mentioned above. This is just for
identifying the functions that have unstable edges.
There are a few things you can do to improve the fuzzing speed, see
[best_practices.md#improving-speed](best_practices.md#improving-speed).
</p></details>
2. Second step: Identify which edge ID numbers are unstable
<details>
<summary id="why-is-my-stability-below-100percent">Why is my stability below 100%?</summary><p>
run the target with `export AFL_DEBUG=1` for a few minutes then terminate.
The out/fuzzer_stats file will then show the edge IDs that were identified
as unstable in the `var_bytes` entry. You can match these numbers
directly to the data you created in the first step.
Now you know which functions are responsible for the instability
Stability is measured by how many percent of the edges in the target are
"stable". Sending the same input again and again should take the exact same
path through the target every time. If that is the case, the stability is
100%.
3. Third step: create a text file with the filenames/functions
If, however, randomness happens, e.g., a thread reading other external data,
reaction to timing, etc., then in some of the re-executions with the same data
the edge coverage result will be different across runs. Those edges that
change are then flagged "unstable".
Identify which source code files contain the functions that you need to
remove from instrumentation, or just specify the functions you want to
skip for instrumentation. Note that optimization might inline functions!
The more "unstable" edges, the more difficult for AFL++ to identify valid new
paths.
Simply follow this document on how to do this: [instrumentation/README.instrument_list.md](../instrumentation/README.instrument_list.md)
If PCGUARD is used, then you need to follow this guide (needs llvm 12+!):
[http://clang.llvm.org/docs/SanitizerCoverage.html#partially-disabling-instrumentation](http://clang.llvm.org/docs/SanitizerCoverage.html#partially-disabling-instrumentation)
A value above 90% is usually fine and a value above 80% is also still ok, and
even a value above 20% can still result in successful finds of bugs. However,
it is recommended that for values below 90% or 80% you should take
countermeasures to improve stability.
Only exclude those functions from instrumentation that provide no value
for coverage - that is if it does not process any fuzz data directly
or indirectly (e.g. hash maps, thread management etc.).
If however a function directly or indirectly handles fuzz data then you
should not put the function in a deny instrumentation list and rather
live with the instability it comes with.
For more information on stability and how to improve the stability value, see
[best_practices.md#improving-stability](best_practices.md#improving-stability).
</p></details>
4. Fourth step: recompile the target
## Troubleshooting
Recompile, fuzz it, be happy :)
<details>
<summary id="i-got-a-weird-compile-error-from-clang">I got a weird compile error from clang.</summary><p>
This link explains this process for [Fuzzbench](https://github.com/google/fuzzbench/issues/677)
If you see this kind of error when trying to instrument a target with
afl-cc/afl-clang-fast/afl-clang-lto:
```
/prg/tmp/llvm-project/build/bin/clang-13: symbol lookup error: /usr/local/bin/../lib/afl//cmplog-instructions-pass.so: undefined symbol: _ZNK4llvm8TypeSizecvmEv
clang-13: error: unable to execute command: No such file or directory
clang-13: error: clang frontend command failed due to signal (use -v to see invocation)
clang version 13.0.0 (https://github.com/llvm/llvm-project 1d7cf550721c51030144f3cd295c5789d51c4aad)
Target: x86_64-unknown-linux-gnu
Thread model: posix
InstalledDir: /prg/tmp/llvm-project/build/bin
clang-13: note: diagnostic msg:
********************
```
Then this means that your OS updated the clang installation from an upgrade
package and because of that the AFL++ llvm plugins do not match anymore.
Solution: `git pull ; make clean install` of AFL++.
</p></details>

View File

@ -1,82 +1,101 @@
# Installation instructions
# Building and installing AFL++
This document provides basic installation instructions and discusses known
issues for a variety of platforms. See README.md for the general instruction
manual.
## Linux on x86
## 1. Linux on x86
---------------
An easy way to install AFL++ with everything compiled is available via docker:
You can use the [Dockerfile](../Dockerfile) (which has gcc-10 and clang-11 -
hence afl-clang-lto is available!) or just pull directly from the Docker Hub:
This platform is expected to work well. Compile the program with:
```bash
make
```shell
docker pull aflplusplus/aflplusplus
docker run -ti -v /location/of/your/target:/src aflplusplus/aflplusplus
```
You can start using the fuzzer without installation, but it is also possible to
install it with:
This image is automatically generated when a push to the stable repo happens.
You will find your target source code in /src in the container.
```bash
If you want to build AFL++ yourself, you have many options. The easiest choice
is to build and install everything:
```shell
sudo apt-get update
sudo apt-get install -y build-essential python3-dev automake git flex bison libglib2.0-dev libpixman-1-dev python3-setuptools
# try to install llvm 11 and install the distro default if that fails
sudo apt-get install -y lld-11 llvm-11 llvm-11-dev clang-11 || sudo apt-get install -y lld llvm llvm-dev clang
sudo apt-get install -y gcc-$(gcc --version|head -n1|sed 's/.* //'|sed 's/\..*//')-plugin-dev libstdc++-$(gcc --version|head -n1|sed 's/.* //'|sed 's/\..*//')-dev
sudo apt-get install -y ninja-build # for QEMU mode
git clone https://github.com/AFLplusplus/AFLplusplus
cd AFLplusplus
make distrib
sudo make install
```
There are no special dependencies to speak of; you will need GNU make and a
working compiler (gcc or clang). Some of the optional scripts bundled with the
program may depend on bash, gdb, and similar basic tools.
It is recommended to install the newest available gcc, clang and llvm-dev
possible in your distribution!
If you are using clang, please review README.llvm.md; the LLVM
integration mode can offer substantial performance gains compared to the
traditional approach.
Note that "make distrib" also builds instrumentation, QEMU mode, unicorn_mode
and more. If you just want plain AFL++, then do "make all". However, compiling
and using at least instrumentation is highly recommended for much better results
- hence in this case choose:
Likewise, if you are using GCC, please review instrumentation/README.gcc_plugin.md.
You may have to change several settings to get optimal results (most notably,
disable crash reporting utilities and switch to a different CPU governor), but
afl-fuzz will guide you through that if necessary.
## 2. OpenBSD, FreeBSD, NetBSD on x86
Similarly to Linux, these platforms are expected to work well and are
regularly tested. Compile everything with GNU make:
```bash
gmake
```shell
make source-only
```
Note that BSD make will *not* work; if you do not have gmake on your system,
please install it first. As on Linux, you can use the fuzzer itself without
installation, or install it with:
These build targets exist:
```
sudo gmake install
* all: just the main AFL++ binaries
* binary-only: everything for binary-only fuzzing: qemu_mode, unicorn_mode,
libdislocator, libtokencap
* source-only: everything for source code fuzzing: instrumentation,
libdislocator, libtokencap
* distrib: everything (for both binary-only and source code fuzzing)
* man: creates simple man pages from the help option of the programs
* install: installs everything you have compiled with the build options above
* clean: cleans everything compiled, not downloads (unless not on a checkout)
* deepclean: cleans everything including downloads
* code-format: format the code, do this before you commit and send a PR please!
* tests: runs test cases to ensure that all features are still working as they
should
* unit: perform unit tests (based on cmocka)
* help: shows these build options
[Unless you are on Mac OS X](https://developer.apple.com/library/archive/qa/qa1118/_index.html),
you can also build statically linked versions of the AFL++ binaries by passing
the `STATIC=1` argument to make:
```shell
make STATIC=1
```
Keep in mind that if you are using csh as your shell, the syntax of some of the
shell commands given in the README.md and other docs will be different.
These build options exist:
The `llvm` requires a dynamically linked, fully-operational installation of
clang. At least on FreeBSD, the clang binaries are static and do not include
some of the essential tools, so if you want to make it work, you may need to
follow the instructions in README.llvm.md.
* STATIC - compile AFL++ static
* ASAN_BUILD - compiles with memory sanitizer for debug purposes
* DEBUG - no optimization, -ggdb3, all warnings and -Werror
* PROFILING - compile with profiling information (gprof)
* INTROSPECTION - compile afl-fuzz with mutation introspection
* NO_PYTHON - disable python support
* NO_SPLICING - disables splicing mutation in afl-fuzz, not recommended for
normal fuzzing
* AFL_NO_X86 - if compiling on non-intel/amd platforms
* LLVM_CONFIG - if your distro doesn't use the standard name for llvm-config
(e.g., Debian)
Beyond that, everything should work as advertised.
e.g.: `make ASAN_BUILD=1`
The QEMU mode is currently supported only on Linux. I think it's just a QEMU
problem, I couldn't get a vanilla copy of user-mode emulation support working
correctly on BSD at all.
## MacOS X on x86 and arm64 (M1)
## 3. MacOS X on x86 and arm64 (M1)
MacOS X should work, but there are some gotchas due to the idiosyncrasies of
the platform. On top of this, I have limited release testing capabilities
and depend mostly on user feedback.
MacOS X should work, but there are some gotchas due to the idiosyncrasies of the
platform. On top of this, we have limited release testing capabilities and
depend mostly on user feedback.
To build AFL, install llvm (and perhaps gcc) from brew and follow the general
instructions for Linux. If possible avoid Xcode at all cost.
instructions for Linux. If possible, avoid Xcode at all cost.
`brew install wget git make cmake llvm gdb`
Be sure to setup PATH to point to the correct clang binaries and use the
Be sure to setup `PATH` to point to the correct clang binaries and use the
freshly installed clang, clang++ and gmake, e.g.:
```
@ -90,12 +109,13 @@ cd ..
gmake install
```
afl-gcc will fail unless you have GCC installed, but that is using outdated
instrumentation anyway. You don't want that.
Note that afl-clang-lto, afl-gcc-fast and qemu_mode are not working on MacOS.
`afl-gcc` will fail unless you have GCC installed, but that is using outdated
instrumentation anyway. You don't want that. Note that `afl-clang-lto`,
`afl-gcc-fast` and `qemu_mode` are not working on MacOS.
The crash reporting daemon that comes by default with MacOS X will cause
problems with fuzzing. You need to turn it off:
```
launchctl unload -w /System/Library/LaunchAgents/com.apple.ReportCrash.plist
sudo launchctl unload -w /System/Library/LaunchDaemons/com.apple.ReportCrash.Root.plist
@ -107,17 +127,17 @@ and definitely don't look POSIX-compliant. This means two things:
- Fuzzing will be probably slower than on Linux. In fact, some folks report
considerable performance gains by running the jobs inside a Linux VM on
MacOS X.
- Some non-portable, platform-specific code may be incompatible with the
AFL forkserver. If you run into any problems, set `AFL_NO_FORKSRV=1` in the
- Some non-portable, platform-specific code may be incompatible with the AFL++
forkserver. If you run into any problems, set `AFL_NO_FORKSRV=1` in the
environment before starting afl-fuzz.
User emulation mode of QEMU does not appear to be supported on MacOS X, so
black-box instrumentation mode (`-Q`) will not work.
However Frida mode (`-O`) should work on x86 and arm64 MacOS boxes.
black-box instrumentation mode (`-Q`) will not work. However, Frida mode (`-O`)
should work on x86 and arm64 MacOS boxes.
MacOS X supports SYSV shared memory used by AFL's instrumentation, but the
default settings aren't usable with AFL++. The default settings on 10.14 seem
to be:
default settings aren't usable with AFL++. The default settings on 10.14 seem to
be:
```bash
$ ipcs -M
@ -138,8 +158,8 @@ sysctl kern.sysv.shmmax=8388608
sysctl kern.sysv.shmall=4096
```
If you're running more than one instance of AFL you likely want to make `shmall`
bigger and increase `shmseg` as well:
If you're running more than one instance of AFL, you likely want to make
`shmall` bigger and increase `shmseg` as well:
```bash
sysctl kern.sysv.shmmax=8388608
@ -147,91 +167,6 @@ sysctl kern.sysv.shmseg=48
sysctl kern.sysv.shmall=98304
```
See http://www.spy-hill.com/help/apple/SharedMemory.html for documentation for
these settings and how to make them permanent.
## 4. Linux or *BSD on non-x86 systems
Standard build will fail on non-x86 systems, but you should be able to
leverage two other options:
- The LLVM mode (see README.llvm.md), which does not rely on
x86-specific assembly shims. It's fast and robust, but requires a
complete installation of clang.
- The QEMU mode (see qemu_mode/README.md), which can be also used for
fuzzing cross-platform binaries. It's slower and more fragile, but
can be used even when you don't have the source for the tested app.
If you're not sure what you need, you need the LLVM mode, which is built by
default.
...and compile your target program with afl-clang-fast or afl-clang-fast++
instead of the traditional afl-gcc or afl-clang wrappers.
## 5. Solaris on x86
The fuzzer reportedly works on Solaris, but I have not tested this first-hand,
and the user base is fairly small, so I don't have a lot of feedback.
To get the ball rolling, you will need to use GNU make and GCC or clang. I'm
being told that the stock version of GCC that comes with the platform does not
work properly due to its reliance on a hardcoded location for 'as' (completely
ignoring the `-B` parameter or `$PATH`).
To fix this, you may want to build stock GCC from the source, like so:
```sh
./configure --prefix=$HOME/gcc --with-gnu-as --with-gnu-ld \
--with-gmp-include=/usr/include/gmp --with-mpfr-include=/usr/include/mpfr
make
sudo make install
```
Do *not* specify `--with-as=/usr/gnu/bin/as` - this will produce a GCC binary that
ignores the `-B` flag and you will be back to square one.
Note that Solaris reportedly comes with crash reporting enabled, which causes
problems with crashes being misinterpreted as hangs, similarly to the gotchas
for Linux and MacOS X. AFL does not auto-detect crash reporting on this
particular platform, but you may need to run the following command:
```sh
coreadm -d global -d global-setid -d process -d proc-setid \
-d kzone -d log
```
User emulation mode of QEMU is not available on Solaris, so black-box
instrumentation mode (`-Q`) will not work.
## 6. Everything else
You're on your own. On POSIX-compliant systems, you may be able to compile and
run the fuzzer; and the LLVM and GCC plugin modes may offer a way to instrument
non-x86 code.
The fuzzer will run on Windows in WSL only. It will not work under Cygwin on in the normal Windows world. It
could be ported to the latter platform fairly easily, but it's a pretty bad
idea, because Cygwin is extremely slow. It makes much more sense to use
VirtualBox or so to run a hardware-accelerated Linux VM; it will run around
20x faster or so. If you have a *really* compelling use case for Cygwin, let
me know.
Although Android on x86 should theoretically work, the stock kernel may have
SHM support compiled out, and if so, you may have to address that issue first.
It's possible that all you need is this workaround:
https://github.com/pelya/android-shmem
Joshua J. Drake notes that the Android linker adds a shim that automatically
intercepts `SIGSEGV` and related signals. To fix this issue and be able to see
crashes, you need to put this at the beginning of the fuzzed program:
```sh
signal(SIGILL, SIG_DFL);
signal(SIGABRT, SIG_DFL);
signal(SIGBUS, SIG_DFL);
signal(SIGFPE, SIG_DFL);
signal(SIGSEGV, SIG_DFL);
```
You may need to `#include <signal.h>` first.
See
[http://www.spy-hill.com/help/apple/SharedMemory.html](http://www.spy-hill.com/help/apple/SharedMemory.html)
for documentation for these settings and how to make them permanent.

View File

@ -1,50 +0,0 @@
# AFL quick start guide
You should read [README.md](../README.md) - it's pretty short. If you really can't, here's
how to hit the ground running:
1) Compile AFL with 'make'. If build fails, see [INSTALL.md](INSTALL.md) for tips.
2) Find or write a reasonably fast and simple program that takes data from
a file or stdin, processes it in a test-worthy way, then exits cleanly.
If testing a network service, modify it to run in the foreground and read
from stdin. When fuzzing a format that uses checksums, comment out the
checksum verification code, too.
If this is not possible (e.g. in -Q(emu) mode) then use
AFL_CUSTOM_MUTATOR_LIBRARY to calculate the values with your own library.
The program must crash properly when a fault is encountered. Watch out for
custom SIGSEGV or SIGABRT handlers and background processes. For tips on
detecting non-crashing flaws, see section 11 in [README.md](README.md) .
3) Compile the program / library to be fuzzed using afl-cc. A common way to
do this would be:
CC=/path/to/afl-cc CXX=/path/to/afl-c++ ./configure --disable-shared
make clean all
4) Get a small but valid input file that makes sense to the program. When
fuzzing verbose syntax (SQL, HTTP, etc), create a dictionary as described in
dictionaries/README.md, too.
5) If the program reads from stdin, run 'afl-fuzz' like so:
./afl-fuzz -i testcase_dir -o findings_dir -- \
/path/to/tested/program [...program's cmdline...]
If the program takes input from a file, you can put @@ in the program's
command line; AFL will put an auto-generated file name in there for you.
6) Investigate anything shown in red in the fuzzer UI by promptly consulting
[status_screen.md](status_screen.md).
8) There is a basic docker build with 'docker build -t aflplusplus .'
That's it. Sit back, relax, and - time permitting - try to skim through the
following files:
- README.md - A general introduction to AFL,
- docs/perf_tips.md - Simple tips on how to fuzz more quickly,
- docs/status_screen.md - An explanation of the tidbits shown in the UI,
- docs/parallel_fuzzing.md - Advice on running AFL on multiple cores.

543
docs/afl-fuzz_approach.md Normal file
View File

@ -0,0 +1,543 @@
# The afl-fuzz approach
AFL++ is a brute-force fuzzer coupled with an exceedingly simple but rock-solid
instrumentation-guided genetic algorithm. It uses a modified form of edge
coverage to effortlessly pick up subtle, local-scale changes to program control
flow.
Simplifying a bit, the overall algorithm can be summed up as:
1) Load user-supplied initial test cases into the queue.
2) Take the next input file from the queue.
3) Attempt to trim the test case to the smallest size that doesn't alter the
measured behavior of the program.
4) Repeatedly mutate the file using a balanced and well-researched variety of
traditional fuzzing strategies.
5) If any of the generated mutations resulted in a new state transition recorded
by the instrumentation, add mutated output as a new entry in the queue.
6) Go to 2.
The discovered test cases are also periodically culled to eliminate ones that
have been obsoleted by newer, higher-coverage finds; and undergo several other
instrumentation-driven effort minimization steps.
As a side result of the fuzzing process, the tool creates a small,
self-contained corpus of interesting test cases. These are extremely useful for
seeding other, labor- or resource-intensive testing regimes - for example, for
stress-testing browsers, office applications, graphics suites, or closed-source
tools.
The fuzzer is thoroughly tested to deliver out-of-the-box performance far
superior to blind fuzzing or coverage-only tools.
## Understanding the status screen
This section provides an overview of the status screen - plus tips for
troubleshooting any warnings and red text shown in the UI.
For the general instruction manual, see [README.md](../README.md).
### A note about colors
The status screen and error messages use colors to keep things readable and
attract your attention to the most important details. For example, red almost
always means "consult this doc" :-)
Unfortunately, the UI will only render correctly if your terminal is using
traditional un*x palette (white text on black background) or something close to
that.
If you are using inverse video, you may want to change your settings, say:
- For GNOME Terminal, go to `Edit > Profile` preferences, select the "colors"
tab, and from the list of built-in schemes, choose "white on black".
- For the MacOS X Terminal app, open a new window using the "Pro" scheme via the
`Shell > New Window` menu (or make "Pro" your default).
Alternatively, if you really like your current colors, you can edit config.h to
comment out USE_COLORS, then do `make clean all`.
We are not aware of any other simple way to make this work without causing other
side effects - sorry about that.
With that out of the way, let's talk about what's actually on the screen...
### The status bar
```
american fuzzy lop ++3.01a (default) [fast] {0}
```
The top line shows you which mode afl-fuzz is running in (normal: "american
fuzzy lop", crash exploration mode: "peruvian rabbit mode") and the version of
AFL++. Next to the version is the banner, which, if not set with -T by hand,
will either show the binary name being fuzzed, or the -M/-S main/secondary name
for parallel fuzzing. Second to last is the power schedule mode being run
(default: fast). Finally, the last item is the CPU id.
### Process timing
```
+----------------------------------------------------+
| run time : 0 days, 8 hrs, 32 min, 43 sec |
| last new path : 0 days, 0 hrs, 6 min, 40 sec |
| last uniq crash : none seen yet |
| last uniq hang : 0 days, 1 hrs, 24 min, 32 sec |
+----------------------------------------------------+
```
This section is fairly self-explanatory: it tells you how long the fuzzer has
been running and how much time has elapsed since its most recent finds. This is
broken down into "paths" (a shorthand for test cases that trigger new execution
patterns), crashes, and hangs.
When it comes to timing: there is no hard rule, but most fuzzing jobs should be
expected to run for days or weeks; in fact, for a moderately complex project,
the first pass will probably take a day or so. Every now and then, some jobs
will be allowed to run for months.
There's one important thing to watch out for: if the tool is not finding new
paths within several minutes of starting, you're probably not invoking the
target binary correctly and it never gets to parse the input files that are
thrown at it; other possible explanations are that the default memory limit
(`-m`) is too restrictive and the program exits after failing to allocate a
buffer very early on; or that the input files are patently invalid and always
fail a basic header check.
If there are no new paths showing up for a while, you will eventually see a big
red warning in this section, too :-)
### Overall results
```
+-----------------------+
| cycles done : 0 |
| total paths : 2095 |
| uniq crashes : 0 |
| uniq hangs : 19 |
+-----------------------+
```
The first field in this section gives you the count of queue passes done so far
- that is, the number of times the fuzzer went over all the interesting test
cases discovered so far, fuzzed them, and looped back to the very beginning.
Every fuzzing session should be allowed to complete at least one cycle; and
ideally, should run much longer than that.
As noted earlier, the first pass can take a day or longer, so sit back and
relax.
To help make the call on when to hit `Ctrl-C`, the cycle counter is color-coded.
It is shown in magenta during the first pass, progresses to yellow if new finds
are still being made in subsequent rounds, then blue when that ends - and
finally, turns green after the fuzzer hasn't been seeing any action for a longer
while.
The remaining fields in this part of the screen should be pretty obvious:
there's the number of test cases ("paths") discovered so far, and the number of
unique faults. The test cases, crashes, and hangs can be explored in real-time
by browsing the output directory, see
[#interpreting-output](#interpreting-output).
### Cycle progress
```
+-------------------------------------+
| now processing : 1296 (61.86%) |
| paths timed out : 0 (0.00%) |
+-------------------------------------+
```
This box tells you how far along the fuzzer is with the current queue cycle: it
shows the ID of the test case it is currently working on, plus the number of
inputs it decided to ditch because they were persistently timing out.
The "*" suffix sometimes shown in the first line means that the currently
processed path is not "favored" (a property discussed later on).
### Map coverage
```
+--------------------------------------+
| map density : 10.15% / 29.07% |
| count coverage : 4.03 bits/tuple |
+--------------------------------------+
```
The section provides some trivia about the coverage observed by the
instrumentation embedded in the target binary.
The first line in the box tells you how many branch tuples already were hit, in
proportion to how much the bitmap can hold. The number on the left describes the
current input; the one on the right is the value for the entire input corpus.
Be wary of extremes:
- Absolute numbers below 200 or so suggest one of three things: that the program
is extremely simple; that it is not instrumented properly (e.g., due to being
linked against a non-instrumented copy of the target library); or that it is
bailing out prematurely on your input test cases. The fuzzer will try to mark
this in pink, just to make you aware.
- Percentages over 70% may very rarely happen with very complex programs that
make heavy use of template-generated code. Because high bitmap density makes
it harder for the fuzzer to reliably discern new program states, we recommend
recompiling the binary with `AFL_INST_RATIO=10` or so and trying again (see
[env_variables.md](env_variables.md)). The fuzzer will flag high percentages
in red. Chances are, you will never see that unless you're fuzzing extremely
hairy software (say, v8, perl, ffmpeg).
The other line deals with the variability in tuple hit counts seen in the
binary. In essence, if every taken branch is always taken a fixed number of
times for all the inputs that were tried, this will read `1.00`. As we manage to
trigger other hit counts for every branch, the needle will start to move toward
`8.00` (every bit in the 8-bit map hit), but will probably never reach that
extreme.
Together, the values can be useful for comparing the coverage of several
different fuzzing jobs that rely on the same instrumented binary.
### Stage progress
```
+-------------------------------------+
| now trying : interest 32/8 |
| stage execs : 3996/34.4k (11.62%) |
| total execs : 27.4M |
| exec speed : 891.7/sec |
+-------------------------------------+
```
This part gives you an in-depth peek at what the fuzzer is actually doing right
now. It tells you about the current stage, which can be any of:
- calibration - a pre-fuzzing stage where the execution path is examined to
detect anomalies, establish baseline execution speed, and so on. Executed very
briefly whenever a new find is being made.
- trim L/S - another pre-fuzzing stage where the test case is trimmed to the
shortest form that still produces the same execution path. The length (L) and
stepover (S) are chosen in general relationship to file size.
- bitflip L/S - deterministic bit flips. There are L bits toggled at any given
time, walking the input file with S-bit increments. The current L/S variants
are: `1/1`, `2/1`, `4/1`, `8/8`, `16/8`, `32/8`.
- arith L/8 - deterministic arithmetics. The fuzzer tries to subtract or add
small integers to 8-, 16-, and 32-bit values. The stepover is always 8 bits.
- interest L/8 - deterministic value overwrite. The fuzzer has a list of known
"interesting" 8-, 16-, and 32-bit values to try. The stepover is 8 bits.
- extras - deterministic injection of dictionary terms. This can be shown as
"user" or "auto", depending on whether the fuzzer is using a user-supplied
dictionary (`-x`) or an auto-created one. You will also see "over" or
"insert", depending on whether the dictionary words overwrite existing data or
are inserted by offsetting the remaining data to accommodate their length.
- havoc - a sort-of-fixed-length cycle with stacked random tweaks. The
operations attempted during this stage include bit flips, overwrites with
random and "interesting" integers, block deletion, block duplication, plus
assorted dictionary-related operations (if a dictionary is supplied in the
first place).
- splice - a last-resort strategy that kicks in after the first full queue cycle
with no new paths. It is equivalent to 'havoc', except that it first splices
together two random inputs from the queue at some arbitrarily selected
midpoint.
- sync - a stage used only when `-M` or `-S` is set (see
[fuzzing_in_depth.md:3c) Using multiple cores](fuzzing_in_depth.md#c-using-multiple-cores)).
No real fuzzing is involved, but the tool scans the output from other fuzzers
and imports test cases as necessary. The first time this is done, it may take
several minutes or so.
The remaining fields should be fairly self-evident: there's the exec count
progress indicator for the current stage, a global exec counter, and a benchmark
for the current program execution speed. This may fluctuate from one test case
to another, but the benchmark should be ideally over 500 execs/sec most of the
time - and if it stays below 100, the job will probably take very long.
The fuzzer will explicitly warn you about slow targets, too. If this happens,
see the [best_practices.md#improving-speed](best_practices.md#improving-speed)
for ideas on how to speed things up.
### Findings in depth
```
+--------------------------------------+
| favored paths : 879 (41.96%) |
| new edges on : 423 (20.19%) |
| total crashes : 0 (0 unique) |
| total tmouts : 24 (19 unique) |
+--------------------------------------+
```
This gives you several metrics that are of interest mostly to complete nerds.
The section includes the number of paths that the fuzzer likes the most based on
a minimization algorithm baked into the code (these will get considerably more
air time), and the number of test cases that actually resulted in better edge
coverage (versus just pushing the branch hit counters up). There are also
additional, more detailed counters for crashes and timeouts.
Note that the timeout counter is somewhat different from the hang counter; this
one includes all test cases that exceeded the timeout, even if they did not
exceed it by a margin sufficient to be classified as hangs.
### Fuzzing strategy yields
```
+-----------------------------------------------------+
| bit flips : 57/289k, 18/289k, 18/288k |
| byte flips : 0/36.2k, 4/35.7k, 7/34.6k |
| arithmetics : 53/2.54M, 0/537k, 0/55.2k |
| known ints : 8/322k, 12/1.32M, 10/1.70M |
| dictionary : 9/52k, 1/53k, 1/24k |
|havoc/splice : 1903/20.0M, 0/0 |
|py/custom/rq : unused, 53/2.54M, unused |
| trim/eff : 20.31%/9201, 17.05% |
+-----------------------------------------------------+
```
This is just another nerd-targeted section keeping track of how many paths were
netted, in proportion to the number of execs attempted, for each of the fuzzing
strategies discussed earlier on. This serves to convincingly validate
assumptions about the usefulness of the various approaches taken by afl-fuzz.
The trim strategy stats in this section are a bit different than the rest. The
first number in this line shows the ratio of bytes removed from the input files;
the second one corresponds to the number of execs needed to achieve this goal.
Finally, the third number shows the proportion of bytes that, although not
possible to remove, were deemed to have no effect and were excluded from some of
the more expensive deterministic fuzzing steps.
Note that when deterministic mutation mode is off (which is the default because
it is not very efficient) the first five lines display "disabled (default,
enable with -D)".
Only what is activated will have counter shown.
### Path geometry
```
+---------------------+
| levels : 5 |
| pending : 1570 |
| pend fav : 583 |
| own finds : 0 |
| imported : 0 |
| stability : 100.00% |
+---------------------+
```
The first field in this section tracks the path depth reached through the guided
fuzzing process. In essence: the initial test cases supplied by the user are
considered "level 1". The test cases that can be derived from that through
traditional fuzzing are considered "level 2"; the ones derived by using these as
inputs to subsequent fuzzing rounds are "level 3"; and so forth. The maximum
depth is therefore a rough proxy for how much value you're getting out of the
instrumentation-guided approach taken by afl-fuzz.
The next field shows you the number of inputs that have not gone through any
fuzzing yet. The same stat is also given for "favored" entries that the fuzzer
really wants to get to in this queue cycle (the non-favored entries may have to
wait a couple of cycles to get their chance).
Next is the number of new paths found during this fuzzing section and imported
from other fuzzer instances when doing parallelized fuzzing; and the extent to
which identical inputs appear to sometimes produce variable behavior in the
tested binary.
That last bit is actually fairly interesting: it measures the consistency of
observed traces. If a program always behaves the same for the same input data,
it will earn a score of 100%. When the value is lower but still shown in purple,
the fuzzing process is unlikely to be negatively affected. If it goes into red,
you may be in trouble, since AFL++ will have difficulty discerning between
meaningful and "phantom" effects of tweaking the input file.
Now, most targets will just get a 100% score, but when you see lower figures,
there are several things to look at:
- The use of uninitialized memory in conjunction with some intrinsic sources of
entropy in the tested binary. Harmless to AFL, but could be indicative of a
security bug.
- Attempts to manipulate persistent resources, such as left over temporary files
or shared memory objects. This is usually harmless, but you may want to
double-check to make sure the program isn't bailing out prematurely. Running
out of disk space, SHM handles, or other global resources can trigger this,
too.
- Hitting some functionality that is actually designed to behave randomly.
Generally harmless. For example, when fuzzing sqlite, an input like `select
random();` will trigger a variable execution path.
- Multiple threads executing at once in semi-random order. This is harmless when
the 'stability' metric stays over 90% or so, but can become an issue if not.
Here's what to try:
* Use afl-clang-fast from [instrumentation](../instrumentation/) - it uses a
thread-local tracking model that is less prone to concurrency issues,
* See if the target can be compiled or run without threads. Common
`./configure` options include `--without-threads`, `--disable-pthreads`, or
`--disable-openmp`.
* Replace pthreads with GNU Pth (https://www.gnu.org/software/pth/), which
allows you to use a deterministic scheduler.
- In persistent mode, minor drops in the "stability" metric can be normal,
because not all the code behaves identically when re-entered; but major dips
may signify that the code within `__AFL_LOOP()` is not behaving correctly on
subsequent iterations (e.g., due to incomplete clean-up or reinitialization of
the state) and that most of the fuzzing effort goes to waste.
The paths where variable behavior is detected are marked with a matching entry
in the `<out_dir>/queue/.state/variable_behavior/` directory, so you can look
them up easily.
### CPU load
```
[cpu: 25%]
```
This tiny widget shows the apparent CPU utilization on the local system. It is
calculated by taking the number of processes in the "runnable" state, and then
comparing it to the number of logical cores on the system.
If the value is shown in green, you are using fewer CPU cores than available on
your system and can probably parallelize to improve performance; for tips on how
to do that, see
[fuzzing_in_depth.md:3c) Using multiple cores](fuzzing_in_depth.md#c-using-multiple-cores).
If the value is shown in red, your CPU is *possibly* oversubscribed, and running
additional fuzzers may not give you any benefits.
Of course, this benchmark is very simplistic; it tells you how many processes
are ready to run, but not how resource-hungry they may be. It also doesn't
distinguish between physical cores, logical cores, and virtualized CPUs; the
performance characteristics of each of these will differ quite a bit.
If you want a more accurate measurement, you can run the `afl-gotcpu` utility
from the command line.
## Interpreting output
See [#understanding-the-status-screen](#understanding-the-status-screen) for
information on how to interpret the displayed stats and monitor the health of
the process. Be sure to consult this file especially if any UI elements are
highlighted in red.
The fuzzing process will continue until you press Ctrl-C. At a minimum, you want
to allow the fuzzer to complete one queue cycle, which may take anywhere from a
couple of hours to a week or so.
There are three subdirectories created within the output directory and updated
in real-time:
- queue/ - test cases for every distinctive execution path, plus all the
starting files given by the user. This is the synthesized corpus.
Before using this corpus for any other purposes, you can shrink
it to a smaller size using the afl-cmin tool. The tool will find
a smaller subset of files offering equivalent edge coverage.
- crashes/ - unique test cases that cause the tested program to receive a fatal
signal (e.g., SIGSEGV, SIGILL, SIGABRT). The entries are grouped by
the received signal.
- hangs/ - unique test cases that cause the tested program to time out. The
default time limit before something is classified as a hang is the
larger of 1 second and the value of the -t parameter. The value can
be fine-tuned by setting AFL_HANG_TMOUT, but this is rarely
necessary.
Crashes and hangs are considered "unique" if the associated execution paths
involve any state transitions not seen in previously-recorded faults. If a
single bug can be reached in multiple ways, there will be some count inflation
early in the process, but this should quickly taper off.
The file names for crashes and hangs are correlated with the parent,
non-faulting queue entries. This should help with debugging.
## Visualizing
If you have gnuplot installed, you can also generate some pretty graphs for any
active fuzzing task using afl-plot. For an example of how this looks like, see
[https://lcamtuf.coredump.cx/afl/plot/](https://lcamtuf.coredump.cx/afl/plot/).
You can also manually build and install afl-plot-ui, which is a helper utility
for showing the graphs generated by afl-plot in a graphical window using GTK.
You can build and install it as follows:
```shell
sudo apt install libgtk-3-0 libgtk-3-dev pkg-config
cd utils/plot_ui
make
cd ../../
sudo make install
```
To learn more about remote monitoring and metrics visualization with StatsD, see
[rpc_statsd.md](rpc_statsd.md).
### Addendum: status and plot files
For unattended operation, some of the key status screen information can be also
found in a machine-readable format in the fuzzer_stats file in the output
directory. This includes:
- `start_time` - unix time indicating the start time of afl-fuzz
- `last_update` - unix time corresponding to the last update of this file
- `run_time` - run time in seconds to the last update of this file
- `fuzzer_pid` - PID of the fuzzer process
- `cycles_done` - queue cycles completed so far
- `cycles_wo_finds` - number of cycles without any new paths found
- `execs_done` - number of execve() calls attempted
- `execs_per_sec` - overall number of execs per second
- `paths_total` - total number of entries in the queue
- `paths_favored` - number of queue entries that are favored
- `paths_found` - number of entries discovered through local fuzzing
- `paths_imported` - number of entries imported from other instances
- `max_depth` - number of levels in the generated data set
- `cur_path` - currently processed entry number
- `pending_favs` - number of favored entries still waiting to be fuzzed
- `pending_total` - number of all entries waiting to be fuzzed
- `variable_paths` - number of test cases showing variable behavior
- `stability` - percentage of bitmap bytes that behave consistently
- `bitmap_cvg` - percentage of edge coverage found in the map so far
- `unique_crashes` - number of unique crashes recorded
- `unique_hangs` - number of unique hangs encountered
- `last_path` - seconds since the last path was found
- `last_crash` - seconds since the last crash was found
- `last_hang` - seconds since the last hang was found
- `execs_since_crash` - execs since the last crash was found
- `exec_timeout` - the -t command line value
- `slowest_exec_ms` - real time of the slowest execution in ms
- `peak_rss_mb` - max rss usage reached during fuzzing in MB
- `edges_found` - how many edges have been found
- `var_byte_count` - how many edges are non-deterministic
- `afl_banner` - banner text (e.g., the target name)
- `afl_version` - the version of AFL++ used
- `target_mode` - default, persistent, qemu, unicorn, non-instrumented
- `command_line` - full command line used for the fuzzing session
Most of these map directly to the UI elements discussed earlier on.
On top of that, you can also find an entry called `plot_data`, containing a
plottable history for most of these fields. If you have gnuplot installed, you
can turn this into a nice progress report with the included `afl-plot` tool.
### Addendum: automatically sending metrics with StatsD
In a CI environment or when running multiple fuzzers, it can be tedious to log
into each of them or deploy scripts to read the fuzzer statistics. Using
`AFL_STATSD` (and the other related environment variables `AFL_STATSD_HOST`,
`AFL_STATSD_PORT`, `AFL_STATSD_TAGS_FLAVOR`) you can automatically send metrics
to your favorite StatsD server. Depending on your StatsD server, you will be
able to monitor, trigger alerts, or perform actions based on these metrics
(e.g.: alert on slow exec/s for a new build, threshold of crashes, time since
last crash > X, etc.).
The selected metrics are a subset of all the metrics found in the status and in
the plot file. The list is the following: `cycle_done`, `cycles_wo_finds`,
`execs_done`,`execs_per_sec`, `paths_total`, `paths_favored`, `paths_found`,
`paths_imported`, `max_depth`, `cur_path`, `pending_favs`, `pending_total`,
`variable_paths`, `unique_crashes`, `unique_hangs`, `total_crashes`,
`slowest_exec_ms`, `edges_found`, `var_byte_count`, `havoc_expansion`. Their
definitions can be found in the addendum above.
When using multiple fuzzer instances with StatsD, it is *strongly* recommended
to setup the flavor (`AFL_STATSD_TAGS_FLAVOR`) to match your StatsD server. This
will allow you to see individual fuzzer performance, detect bad ones, see the
progress of each strategy...

192
docs/best_practices.md Normal file
View File

@ -0,0 +1,192 @@
# Best practices
## Contents
### Targets
* [Fuzzing a target with source code available](#fuzzing-a-target-with-source-code-available)
* [Fuzzing a target with dlopen() instrumented libraries](#fuzzing-a-target-with-dlopen-instrumented-libraries)
* [Fuzzing a binary-only target](#fuzzing-a-binary-only-target)
* [Fuzzing a GUI program](#fuzzing-a-gui-program)
* [Fuzzing a network service](#fuzzing-a-network-service)
### Improvements
* [Improving speed](#improving-speed)
* [Improving stability](#improving-stability)
## Targets
### Fuzzing a target with source code available
To learn how to fuzz a target if source code is available, see
[fuzzing_in_depth.md](fuzzing_in_depth.md).
### Fuzzing a target with dlopen instrumented libraries
If a source code based fuzzing target loads instrumented libraries with
dlopen() after the forkserver has been activated and non-colliding coverage
instrumentation is used (PCGUARD (which is the default), or LTO), then this
an issue, because this would enlarge the coverage map, but afl-fuzz doesn't
know about it.
The solution is to use `AFL_PRELOAD` for all dlopen()'ed libraries to
ensure that all coverage targets are present on startup in the target,
even if accessed only later with dlopen().
For PCGUARD instrumentation `abort()` is called if this is detected, for LTO
there will either be no coverage for the instrumented dlopen()'ed libraries or
you will see lots of crashes in the UI.
Note that this is not an issue if you use the inferiour `afl-gcc-fast`,
`afl-gcc` or`AFL_LLVM_INSTRUMENT=CLASSIC/NGRAM/CTX afl-clang-fast`
instrumentation.
### Fuzzing a binary-only target
For a comprehensive guide, see
[fuzzing_binary-only_targets.md](fuzzing_binary-only_targets.md).
### Fuzzing a GUI program
If the GUI program can read the fuzz data from a file (via the command line, a
fixed location or via an environment variable) without needing any user
interaction, then it would be suitable for fuzzing.
Otherwise, it is not possible without modifying the source code - which is a
very good idea anyway as the GUI functionality is a huge CPU/time overhead for
the fuzzing.
So create a new `main()` that just reads the test case and calls the
functionality for processing the input that the GUI program is using.
### Fuzzing a network service
Fuzzing a network service does not work "out of the box".
Using a network channel is inadequate for several reasons:
- it has a slow-down of x10-20 on the fuzzing speed
- it does not scale to fuzzing multiple instances easily,
- instead of one initial data packet often a back-and-forth interplay of packets
is needed for stateful protocols (which is totally unsupported by most
coverage aware fuzzers).
The established method to fuzz network services is to modify the source code to
read from a file or stdin (fd 0) (or even faster via shared memory, combine this
with persistent mode
[instrumentation/README.persistent_mode.md](../instrumentation/README.persistent_mode.md)
and you have a performance gain of x10 instead of a performance loss of over x10
- that is a x100 difference!).
If modifying the source is not an option (e.g., because you only have a binary
and perform binary fuzzing) you can also use a shared library with AFL_PRELOAD
to emulate the network. This is also much faster than the real network would be.
See [utils/socket_fuzzing/](../utils/socket_fuzzing/).
There is an outdated AFL++ branch that implements networking if you are
desperate though:
[https://github.com/AFLplusplus/AFLplusplus/tree/networking](https://github.com/AFLplusplus/AFLplusplus/tree/networking)
- however, a better option is AFLnet
([https://github.com/aflnet/aflnet](https://github.com/aflnet/aflnet)) which
allows you to define network state with different type of data packets.
## Improvements
### Improving speed
1. Use [llvm_mode](../instrumentation/README.llvm.md): afl-clang-lto (llvm >=
11) or afl-clang-fast (llvm >= 9 recommended).
2. Use [persistent mode](../instrumentation/README.persistent_mode.md) (x2-x20
speed increase).
3. Instrument just what you are interested in, see
[instrumentation/README.instrument_list.md](../instrumentation/README.instrument_list.md).
4. If you do not use shmem persistent mode, use `AFL_TMPDIR` to put the input
file directory on a tempfs location, see
[env_variables.md](env_variables.md).
5. Improve Linux kernel performance: modify `/etc/default/grub`, set
`GRUB_CMDLINE_LINUX_DEFAULT="ibpb=off ibrs=off kpti=off l1tf=off mds=off
mitigations=off no_stf_barrier noibpb noibrs nopcid nopti
nospec_store_bypass_disable nospectre_v1 nospectre_v2 pcid=off pti=off
spec_store_bypass_disable=off spectre_v2=off stf_barrier=off"`; then
`update-grub` and `reboot` (warning: makes the system less secure).
6. Running on an `ext2` filesystem with `noatime` mount option will be a bit
faster than on any other journaling filesystem.
7. Use your cores
([fuzzing_in_depth.md:3c) Using multiple cores](fuzzing_in_depth.md#c-using-multiple-cores))!
### Improving stability
For fuzzing a 100% stable target that covers all edges is the best case. A 90%
stable target that covers all edges is, however, better than a 100% stable
target that ignores 10% of the edges.
With instability, you basically have a partial coverage loss on an edge, with
ignored functions you have a full loss on that edges.
There are functions that are unstable, but also provide value to coverage, e.g.,
init functions that use fuzz data as input. If, however, a function that has
nothing to do with the input data is the source of instability, e.g., checking
jitter, or is a hash map function etc., then it should not be instrumented.
To be able to exclude these functions (based on AFL++'s measured stability), the
following process will allow to identify functions with variable edges.
Four steps are required to do this and it also requires quite some knowledge of
coding and/or disassembly and is effectively possible only with `afl-clang-fast`
`PCGUARD` and `afl-clang-lto` `LTO` instrumentation.
1. Instrument to be able to find the responsible function(s):
a) For LTO instrumented binaries, this can be documented during compile
time, just set `export AFL_LLVM_DOCUMENT_IDS=/path/to/a/file`. This file
will have one assigned edge ID and the corresponding function per line.
b) For PCGUARD instrumented binaries, it is much more difficult. Here you
can either modify the `__sanitizer_cov_trace_pc_guard` function in
`instrumentation/afl-llvm-rt.o.c` to write a backtrace to a file if the
ID in `__afl_area_ptr[*guard]` is one of the unstable edge IDs. (Example
code is already there). Then recompile and reinstall `llvm_mode` and
rebuild your target. Run the recompiled target with `afl-fuzz` for a
while and then check the file that you wrote with the backtrace
information. Alternatively, you can use `gdb` to hook
`__sanitizer_cov_trace_pc_guard_init` on start, check to which memory
address the edge ID value is written, and set a write breakpoint to that
address (`watch 0x.....`).
c) In other instrumentation types, this is not possible. So just recompile
with the two mentioned above. This is just for identifying the functions
that have unstable edges.
2. Identify which edge ID numbers are unstable.
Run the target with `export AFL_DEBUG=1` for a few minutes then terminate.
The out/fuzzer_stats file will then show the edge IDs that were identified
as unstable in the `var_bytes` entry. You can match these numbers directly
to the data you created in the first step. Now you know which functions are
responsible for the instability
3. Create a text file with the filenames/functions
Identify which source code files contain the functions that you need to
remove from instrumentation, or just specify the functions you want to skip
for instrumentation. Note that optimization might inline functions!
Follow this document on how to do this:
[instrumentation/README.instrument_list.md](../instrumentation/README.instrument_list.md).
If `PCGUARD` is used, then you need to follow this guide (needs llvm 12+!):
[https://clang.llvm.org/docs/SanitizerCoverage.html#partially-disabling-instrumentation](https://clang.llvm.org/docs/SanitizerCoverage.html#partially-disabling-instrumentation)
Only exclude those functions from instrumentation that provide no value for
coverage - that is if it does not process any fuzz data directly or
indirectly (e.g., hash maps, thread management etc.). If, however, a
function directly or indirectly handles fuzz data, then you should not put
the function in a deny instrumentation list and rather live with the
instability it comes with.
4. Recompile the target
Recompile, fuzz it, be happy :)
This link explains this process for
[Fuzzbench](https://github.com/google/fuzzbench/issues/677).

View File

@ -1,223 +0,0 @@
# Fuzzing binary-only programs with AFL++
AFL++, libfuzzer and others are great if you have the source code, and
it allows for very fast and coverage guided fuzzing.
However, if there is only the binary program and no source code available,
then standard `afl-fuzz -n` (non-instrumented mode) is not effective.
The following is a description of how these binaries can be fuzzed with AFL++.
## TL;DR:
qemu_mode in persistent mode is the fastest - if the stability is
high enough. Otherwise try retrowrite, afl-dyninst and if these
fail too then try standard qemu_mode with AFL_ENTRYPOINT to where you need it.
If your target is a library use utils/afl_frida/.
If your target is non-linux then use unicorn_mode/.
## QEMU
Qemu is the "native" solution to the program.
It is available in the ./qemu_mode/ directory and once compiled it can
be accessed by the afl-fuzz -Q command line option.
It is the easiest to use alternative and even works for cross-platform binaries.
The speed decrease is at about 50%.
However various options exist to increase the speed:
- using AFL_ENTRYPOINT to move the forkserver entry to a later basic block in
the binary (+5-10% speed)
- using persistent mode [qemu_mode/README.persistent.md](../qemu_mode/README.persistent.md)
this will result in 150-300% overall speed increase - so 3-8x the original
qemu_mode speed!
- using AFL_CODE_START/AFL_CODE_END to only instrument specific parts
Note that there is also honggfuzz: [https://github.com/google/honggfuzz](https://github.com/google/honggfuzz)
which now has a qemu_mode, but its performance is just 1.5% ...
As it is included in AFL++ this needs no URL.
If you like to code a customized fuzzer without much work, we highly
recommend to check out our sister project libafl which will support QEMU
too:
[https://github.com/AFLplusplus/LibAFL](https://github.com/AFLplusplus/LibAFL)
## AFL FRIDA
In frida_mode you can fuzz binary-only targets easily like with QEMU,
with the advantage that frida_mode also works on MacOS (both intel and M1).
If you want to fuzz a binary-only library then you can fuzz it with
frida-gum via utils/afl_frida/, you will have to write a harness to
call the target function in the library, use afl-frida.c as a template.
Both come with AFL++ so this needs no URL.
You can also perform remote fuzzing with frida, e.g. if you want to fuzz
on iPhone or Android devices, for this you can use
[https://github.com/ttdennis/fpicker/](https://github.com/ttdennis/fpicker/)
as an intermediate that uses AFL++ for fuzzing.
If you like to code a customized fuzzer without much work, we highly
recommend to check out our sister project libafl which supports Frida too:
[https://github.com/AFLplusplus/LibAFL](https://github.com/AFLplusplus/LibAFL)
Working examples already exist :-)
## WINE+QEMU
Wine mode can run Win32 PE binaries with the QEMU instrumentation.
It needs Wine, python3 and the pefile python package installed.
As it is included in AFL++ this needs no URL.
## UNICORN
Unicorn is a fork of QEMU. The instrumentation is, therefore, very similar.
In contrast to QEMU, Unicorn does not offer a full system or even userland
emulation. Runtime environment and/or loaders have to be written from scratch,
if needed. On top, block chaining has been removed. This means the speed boost
introduced in the patched QEMU Mode of AFL++ cannot simply be ported over to
Unicorn. For further information, check out [unicorn_mode/README.md](../unicorn_mode/README.md).
As it is included in AFL++ this needs no URL.
## AFL UNTRACER
If you want to fuzz a binary-only shared library then you can fuzz it with
utils/afl_untracer/, use afl-untracer.c as a template.
It is slower than AFL FRIDA (see above).
## DYNINST
Dyninst is a binary instrumentation framework similar to Pintool and
Dynamorio (see far below). However whereas Pintool and Dynamorio work at
runtime, dyninst instruments the target at load time, and then let it run -
or save the binary with the changes.
This is great for some things, e.g. fuzzing, and not so effective for others,
e.g. malware analysis.
So what we can do with dyninst is taking every basic block, and put afl's
instrumention code in there - and then save the binary.
Afterwards we can just fuzz the newly saved target binary with afl-fuzz.
Sounds great? It is. The issue though - it is a non-trivial problem to
insert instructions, which change addresses in the process space, so that
everything is still working afterwards. Hence more often than not binaries
crash when they are run.
The speed decrease is about 15-35%, depending on the optimization options
used with afl-dyninst.
So if Dyninst works, it is the best option available. Otherwise it just
doesn't work well.
[https://github.com/vanhauser-thc/afl-dyninst](https://github.com/vanhauser-thc/afl-dyninst)
## RETROWRITE, ZAFL, ... other binary rewriter
If you have an x86/x86_64 binary that still has its symbols, is compiled
with position independant code (PIC/PIE) and does not use most of the C++
features then the retrowrite solution might be for you.
It decompiles to ASM files which can then be instrumented with afl-gcc.
It is at about 80-85% performance.
[https://git.zephyr-software.com/opensrc/zafl](https://git.zephyr-software.com/opensrc/zafl)
[https://github.com/HexHive/retrowrite](https://github.com/HexHive/retrowrite)
## MCSEMA
Theoretically you can also decompile to llvm IR with mcsema, and then
use llvm_mode to instrument the binary.
Good luck with that.
[https://github.com/lifting-bits/mcsema](https://github.com/lifting-bits/mcsema)
## INTEL-PT
If you have a newer Intel CPU, you can make use of Intels processor trace.
The big issue with Intel's PT is the small buffer size and the complex
encoding of the debug information collected through PT.
This makes the decoding very CPU intensive and hence slow.
As a result, the overall speed decrease is about 70-90% (depending on
the implementation and other factors).
There are two AFL intel-pt implementations:
1. [https://github.com/junxzm1990/afl-pt](https://github.com/junxzm1990/afl-pt)
=> this needs Ubuntu 14.04.05 without any updates and the 4.4 kernel.
2. [https://github.com/hunter-ht-2018/ptfuzzer](https://github.com/hunter-ht-2018/ptfuzzer)
=> this needs a 4.14 or 4.15 kernel. the "nopti" kernel boot option must
be used. This one is faster than the other.
Note that there is also honggfuzz: https://github.com/google/honggfuzz
But its IPT performance is just 6%!
## CORESIGHT
Coresight is ARM's answer to Intel's PT.
There is no implementation so far which handles coresight and getting
it working on an ARM Linux is very difficult due to custom kernel building
on embedded systems is difficult. And finding one that has coresight in
the ARM chip is difficult too.
My guess is that it is slower than Qemu, but faster than Intel PT.
If anyone finds any coresight implementation for AFL please ping me: vh@thc.org
## PIN & DYNAMORIO
Pintool and Dynamorio are dynamic instrumentation engines, and they can be
used for getting basic block information at runtime.
Pintool is only available for Intel x32/x64 on Linux, Mac OS and Windows,
whereas Dynamorio is additionally available for ARM and AARCH64.
Dynamorio is also 10x faster than Pintool.
The big issue with Dynamorio (and therefore Pintool too) is speed.
Dynamorio has a speed decrease of 98-99%
Pintool has a speed decrease of 99.5%
Hence Dynamorio is the option to go for if everything else fails, and Pintool
only if Dynamorio fails too.
Dynamorio solutions:
* [https://github.com/vanhauser-thc/afl-dynamorio](https://github.com/vanhauser-thc/afl-dynamorio)
* [https://github.com/mxmssh/drAFL](https://github.com/mxmssh/drAFL)
* [https://github.com/googleprojectzero/winafl/](https://github.com/googleprojectzero/winafl/) <= very good but windows only
Pintool solutions:
* [https://github.com/vanhauser-thc/afl-pin](https://github.com/vanhauser-thc/afl-pin)
* [https://github.com/mothran/aflpin](https://github.com/mothran/aflpin)
* [https://github.com/spinpx/afl_pin_mode](https://github.com/spinpx/afl_pin_mode) <= only old Pintool version supported
## Non-AFL solutions
There are many binary-only fuzzing frameworks.
Some are great for CTFs but don't work with large binaries, others are very
slow but have good path discovery, some are very hard to set-up ...
* QSYM: [https://github.com/sslab-gatech/qsym](https://github.com/sslab-gatech/qsym)
* Manticore: [https://github.com/trailofbits/manticore](https://github.com/trailofbits/manticore)
* S2E: [https://github.com/S2E](https://github.com/S2E)
* Tinyinst: [https://github.com/googleprojectzero/TinyInst](https://github.com/googleprojectzero/TinyInst) (Mac/Windows only)
* Jackalope: [https://github.com/googleprojectzero/Jackalope](https://github.com/googleprojectzero/Jackalope)
* ... please send me any missing that are good
## Closing words
That's it! News, corrections, updates? Send an email to vh@thc.org

View File

@ -1,16 +1,16 @@
# Custom Mutators in AFL++
This file describes how you can implement custom mutations to be used in AFL.
For now, we support C/C++ library and Python module, collectivelly named as the
For now, we support C/C++ library and Python module, collectively named as the
custom mutator.
There is also experimental support for Rust in `custom_mutators/rust`.
Please refer to that directory for documentation.
Run ```cargo doc -p custom_mutator --open``` in that directory to view the
documentation in your web browser.
There is also experimental support for Rust in `custom_mutators/rust`. For
documentation, refer to that directory. Run `cargo doc -p custom_mutator --open`
in that directory to view the documentation in your web browser.
Implemented by
- C/C++ library (`*.so`): Khaled Yakdan from Code Intelligence (<yakdan@code-intelligence.de>)
- C/C++ library (`*.so`): Khaled Yakdan from Code Intelligence
(<yakdan@code-intelligence.de>)
- Python module: Christian Holler from Mozilla (<choller@mozilla.com>)
## 1) Introduction
@ -21,13 +21,17 @@ fuzzing by using libraries that perform mutations according to a given grammar.
The custom mutator is passed to `afl-fuzz` via the `AFL_CUSTOM_MUTATOR_LIBRARY`
or `AFL_PYTHON_MODULE` environment variable, and must export a fuzz function.
Now AFL also supports multiple custom mutators which can be specified in the same `AFL_CUSTOM_MUTATOR_LIBRARY` environment variable like this.
Now AFL++ also supports multiple custom mutators which can be specified in the
same `AFL_CUSTOM_MUTATOR_LIBRARY` environment variable like this.
```bash
export AFL_CUSTOM_MUTATOR_LIBRARY="full/path/to/mutator_first.so;full/path/to/mutator_second.so"
```
Please see [APIs](#2-apis) and [Usage](#3-usage) for detail.
The custom mutation stage is set to be the first non-deterministic stage (right before the havoc stage).
For details, see [APIs](#2-apis) and [Usage](#3-usage).
The custom mutation stage is set to be the first non-deterministic stage (right
before the havoc stage).
Note: If `AFL_CUSTOM_MUTATOR_ONLY` is set, all mutations will solely be
performed with the custom mutator.
@ -35,6 +39,7 @@ performed with the custom mutator.
## 2) APIs
C/C++:
```c
void *afl_custom_init(afl_state_t *afl, unsigned int seed);
unsigned int afl_custom_fuzz_count(void *data, const unsigned char *buf, size_t buf_size);
@ -53,6 +58,7 @@ void afl_custom_deinit(void *data);
```
Python:
```python
def init(seed):
pass
@ -101,7 +107,8 @@ def deinit(): # optional for Python
- `init`:
This method is called when AFL++ starts up and is used to seed RNG and set up buffers and state.
This method is called when AFL++ starts up and is used to seed RNG and set
up buffers and state.
- `queue_get` (optional):
@ -110,27 +117,26 @@ def deinit(): # optional for Python
- `fuzz_count` (optional):
When a queue entry is selected to be fuzzed, afl-fuzz selects the number
of fuzzing attempts with this input based on a few factors.
If however the custom mutator wants to set this number instead on how often
it is called for a specific queue entry, use this function.
This function is most useful if `AFL_CUSTOM_MUTATOR_ONLY` is **not** used.
When a queue entry is selected to be fuzzed, afl-fuzz selects the number of
fuzzing attempts with this input based on a few factors. If, however, the
custom mutator wants to set this number instead on how often it is called
for a specific queue entry, use this function. This function is most useful
if `AFL_CUSTOM_MUTATOR_ONLY` is **not** used.
- `fuzz` (optional):
This method performs custom mutations on a given input. It also accepts an
additional test case.
Note that this function is optional - but it makes sense to use it.
You would only skip this if `post_process` is used to fix checksums etc.
so if you are using it e.g. as a post processing library.
additional test case. Note that this function is optional - but it makes
sense to use it. You would only skip this if `post_process` is used to fix
checksums etc. so if you are using it, e.g., as a post processing library.
Note that a length > 0 *must* be returned!
- `describe` (optional):
When this function is called, it shall describe the current testcase,
generated by the last mutation. This will be called, for example,
to name the written testcase file after a crash occurred.
Using it can help to reproduce crashing mutations.
When this function is called, it shall describe the current test case,
generated by the last mutation. This will be called, for example, to name
the written test case file after a crash occurred. Using it can help to
reproduce crashing mutations.
- `havoc_mutation` and `havoc_mutation_probability` (optional):
@ -142,21 +148,21 @@ def deinit(): # optional for Python
- `post_process` (optional):
For some cases, the format of the mutated data returned from the custom
mutator is not suitable to directly execute the target with this input.
For example, when using libprotobuf-mutator, the data returned is in a
protobuf format which corresponds to a given grammar. In order to execute
the target, the protobuf data must be converted to the plain-text format
expected by the target. In such scenarios, the user can define the
`post_process` function. This function is then transforming the data into the
format expected by the API before executing the target.
mutator is not suitable to directly execute the target with this input. For
example, when using libprotobuf-mutator, the data returned is in a protobuf
format which corresponds to a given grammar. In order to execute the target,
the protobuf data must be converted to the plain-text format expected by the
target. In such scenarios, the user can define the `post_process` function.
This function is then transforming the data into the format expected by the
API before executing the target.
This can return any python object that implements the buffer protocol and
supports PyBUF_SIMPLE. These include bytes, bytearray, etc.
- `queue_new_entry` (optional):
This methods is called after adding a new test case to the queue.
If the contents of the file was changed return True, False otherwise.
This methods is called after adding a new test case to the queue. If the
contents of the file was changed, return True, False otherwise.
- `introspection` (optional):
@ -168,8 +174,8 @@ def deinit(): # optional for Python
The last method to be called, deinitializing the state.
Note that there are also three functions for trimming as described in the
next section.
Note that there are also three functions for trimming as described in the next
section.
### Trimming Support
@ -177,8 +183,8 @@ The generic trimming routines implemented in AFL++ can easily destroy the
structure of complex formats, possibly leading to a point where you have a lot
of test cases in the queue that your Python module cannot process anymore but
your target application still accepts. This is especially the case when your
target can process a part of the input (causing coverage) and then errors out
on the remaining input.
target can process a part of the input (causing coverage) and then errors out on
the remaining input.
In such cases, it makes sense to implement a custom trimming routine. The API
consists of multiple methods because after each trimming step, we have to go
@ -189,8 +195,9 @@ trimmed input. Here's a quick API description:
This method is called at the start of each trimming operation and receives
the initial buffer. It should return the amount of iteration steps possible
on this input (e.g. if your input has n elements and you want to remove them
one by one, return n, if you do a binary search, return log(n), and so on).
on this input (e.g., if your input has n elements and you want to remove
them one by one, return n, if you do a binary search, return log(n), and so
on).
If your trimming algorithm doesn't allow to determine the amount of
(remaining) steps easily (esp. while running), then you can alternatively
@ -202,21 +209,21 @@ trimmed input. Here's a quick API description:
- `trim` (optional)
This method is called for each trimming operation. It doesn't have any
arguments because we already have the initial buffer from `init_trim` and we
can memorize the current state in the data variables. This can also save
arguments because there is already the initial buffer from `init_trim` and
we can memorize the current state in the data variables. This can also save
reparsing steps for each iteration. It should return the trimmed input
buffer.
- `post_trim` (optional)
This method is called after each trim operation to inform you if your
trimming step was successful or not (in terms of coverage). If you receive
a failure here, you should reset your input to the last known good state.
In any case, this method must return the next trim iteration index (from 0
to the maximum amount of steps you returned in `init_trim`).
trimming step was successful or not (in terms of coverage). If you receive a
failure here, you should reset your input to the last known good state. In
any case, this method must return the next trim iteration index (from 0 to
the maximum amount of steps you returned in `init_trim`).
Omitting any of three trimming methods will cause the trimming to be disabled
and trigger a fallback to the builtin default trimming routine.
and trigger a fallback to the built-in default trimming routine.
### Environment Variables
@ -224,11 +231,10 @@ Optionally, the following environment variables are supported:
- `AFL_CUSTOM_MUTATOR_ONLY`
Disable all other mutation stages. This can prevent broken testcases
(those that your Python module can't work with anymore) to fill up your
queue. Best combined with a custom trimming routine (see below) because
trimming can cause the same test breakage like havoc and splice.
Disable all other mutation stages. This can prevent broken test cases (those
that your Python module can't work with anymore) to fill up your queue. Best
combined with a custom trimming routine (see below) because trimming can
cause the same test breakage like havoc and splice.
- `AFL_PYTHON_ONLY`
@ -264,22 +270,27 @@ In case your setup is different, set the necessary variables like this:
### Custom Mutator Preparation
For C/C++ mutators, the source code must be compiled as a shared object:
```bash
gcc -shared -Wall -O3 example.c -o example.so
```
Note that if you specify multiple custom mutators, the corresponding functions will
be called in the order in which they are specified. e.g first `post_process` function of
`example_first.so` will be called and then that of `example_second.so`.
Note that if you specify multiple custom mutators, the corresponding functions
will be called in the order in which they are specified. E.g., the first
`post_process` function of `example_first.so` will be called and then that of
`example_second.so`.
### Run
C/C++
```bash
export AFL_CUSTOM_MUTATOR_LIBRARY="/full/path/to/example_first.so;/full/path/to/example_second.so"
afl-fuzz /path/to/program
```
Python
```bash
export PYTHONPATH=`dirname /full/path/to/example.py`
export AFL_PYTHON_MODULE=example
@ -288,8 +299,8 @@ afl-fuzz /path/to/program
## 4) Example
Please see [example.c](../custom_mutators/examples/example.c) and
[example.py](../custom_mutators/examples/example.py)
See [example.c](../custom_mutators/examples/example.c) and
[example.py](../custom_mutators/examples/example.py).
## 5) Other Resources
@ -297,4 +308,4 @@ Please see [example.c](../custom_mutators/examples/example.c) and
- [bruce30262/libprotobuf-mutator_fuzzing_learning](https://github.com/bruce30262/libprotobuf-mutator_fuzzing_learning/tree/master/4_libprotobuf_aflpp_custom_mutator)
- [thebabush/afl-libprotobuf-mutator](https://github.com/thebabush/afl-libprotobuf-mutator)
- [XML Fuzzing@NullCon 2017](https://www.agarri.fr/docs/XML_Fuzzing-NullCon2017-PUBLIC.pdf)
- [A bug detected by AFL + XML-aware mutators](https://bugs.chromium.org/p/chromium/issues/detail?id=930663)
- [A bug detected by AFL + XML-aware mutators](https://bugs.chromium.org/p/chromium/issues/detail?id=930663)

124
docs/docs2.md Normal file
View File

@ -0,0 +1,124 @@
# Restructure AFL++'s documentation - Case Study
## Problem statement
AFL++ inherited it's documentation from the original Google AFL project.
Since then it has been massively improved - feature and performance wise -
and although the documenation has likewise been continued it has grown out
of proportion.
The documentation is done by non-natives to the English language, plus
none of us has a writer background.
We see questions on AFL++ usage on mailing lists (e.g. afl-users), discord
channels, web forums and as issues in our repository.
Most of them could be answered if people would read through all the
documentation.
This only increases as AFL++ has been on the top of Google's fuzzbench
statistics (which measures the performance of fuzzers) and has been
integrated in Google's oss-fuzz and clusterfuzz - and is in many Unix
packaging repositories, e.g. Debian, FreeBSD, etc.
AFL++ had 44 (!) documentation files with 13k total lines of content.
This was way too much.
## Proposal abstract
AFL++'s documentatin needs a complete overhaul, both on a
organisation/structural level as well as the content.
Overall the following actions have to be performed:
* Create a better structure of documentation so it is easier to find the
information that is being looked for, combining and/or splitting up the
existing documents as needed.
* Rewrite some documentation to remove duplication. Several information is
present several times in the documentation. These should be removed to
where needed so that we have as little bloat as possible.
* The documents have been written and modified by a lot of different people,
most of them non-native English speaker. Hence an overall review where
parts should be rewritten has to be performed and then the rewrite done.
* Create a cheat-sheet for a very short best-setup build and run of AFL++
* Pictures explain more than 1000 words. We need at least 4 images that
explain the workflow with AFL++:
- the build workflow
- the fuzzing workflow
- the fuzzing campaign management workflow
- the overall workflow that is an overview of the above
- maybe more? where the technical writes seems it necessary for
understanding.
Requirements:
* Documentation has to be in Markdown format
* Images have to be either in SVG or PNG format.
* All documentation should be (moved) in(to) docs/
## Project description
We created our proposal by discussing in the team what the issues are and
what was needed to fix it.
This resulted in the [project proposal](https://github.com/AFLplusplus/AFLplusplus/blob/stable/docs/docs.md).
We did not want to be selected by a writer but select a writer ourselves, so
we combed through the list and reviewed every single one of them.
We were not looking for coders writing technical documentation, but rather
someone who is an experienced writer and has documented experience with
structuring documentation.
Few fit that profile and we sent out messages to 6 people.
We finally decided on Jana because she had a strong background in technical
documentation and structuring information.
She had no technical experience in fuzzing whatsoever, but we saw that as
a plus - of course this made the whole process longer to explain details,
but overall ensured that the documentation can be read by (mostly) everyone.
We communicated via video calls every few weeks and she kept a public kanban
board about her todos, additional we used a Signal channel.
Her changes were imported via PRs where we discussed details.
The project was off to a good start, but then Jana got pregnant with serious
side effects that made working impossible for her for a longer time, hence
the schedule was thrown back.
She offered to rescind the payment and we select a new writer, but we saw
little opportunity in that, as that would mean a new selection of a writer,
someone else with a different vision on how the result should look like so
basically a full restart of the project and a large impact on our own time.
So we agreed on - after discussion with the Google GSoD team - that she
continues the project after the GSoD completion deadline as best as she can.
End of November she took one week off from work and fully dedicated her time
for the documenation which brought the project a big step forward.
Originally the project should have been ended begin of October, but now - at
nearing the end of November, we are at about 85% completion, with the end
being expected around mid of December.
## Metrics
We merged most of the changes in our development branch and are getting
close to a state where the user documentation part is completed and we
can create a new release. Only then the new documentatin is actually visible
to users. Therefore no metrics could be collected so far.
We plan on a user-assisted QA review end of November/begin of December.
The documentation was reviewed by a few test users so far however who gave
it a thumbs up.
## Summary
The GSoD project itself is great. It helps to get the documentation back in
line.
It was and is a larger time investment from our side, but we expected that.
When the project is done, the documentation will be more accessible by users
and also need less maintenance by us.
There is still follow-up work to be done by us afterwards (web site for the
docs, etc.).
Not sure what we would do differently next time. I think we prepared best as
possible and reacted best as possible to the unexpected.
Recommendations for other organizations who would like to participate in GSoD:
- expect the process to take a larger part of your time. the writer needs
your full support.
- have someone dedicated from the dev/org side to support, educate and
supervice the writer
- set clear goals and expectations

File diff suppressed because it is too large Load Diff

61
docs/features.md Normal file
View File

@ -0,0 +1,61 @@
# Important features of AFL++
AFL++ supports llvm from 3.8 up to version 12, very fast binary fuzzing with
QEMU 5.1 with laf-intel and redqueen, FRIDA mode, unicorn mode, gcc plugin, full
*BSD, Mac OS, Solaris and Android support and much, much, much more.
| Feature/Instrumentation | afl-gcc | llvm | gcc_plugin | FRIDA mode(9) | QEMU mode(10) |unicorn_mode(10) |coresight_mode(11)|
| -------------------------|:-------:|:---------:|:----------:|:----------------:|:----------------:|:----------------:|:----------------:|
| Threadsafe counters | | x(3) | | | | | |
| NeverZero | x86[_64]| x(1) | x | x | x | x | |
| Persistent Mode | | x | x | x86[_64]/arm64 | x86[_64]/arm[64] | x | |
| LAF-Intel / CompCov | | x | | | x86[_64]/arm[64] | x86[_64]/arm[64] | |
| CmpLog | | x | | x86[_64]/arm64 | x86[_64]/arm[64] | | |
| Selective Instrumentation| | x | x | x | x | | |
| Non-Colliding Coverage | | x(4) | | | (x)(5) | | |
| Ngram prev_loc Coverage | | x(6) | | | | | |
| Context Coverage | | x(6) | | | | | |
| Auto Dictionary | | x(7) | | | | | |
| Snapshot LKM Support | | (x)(8) | (x)(8) | | (x)(5) | | |
| Shared Memory Test cases | | x | x | x86[_64]/arm64 | x | x | |
1. default for LLVM >= 9.0, environment variable for older version due an
efficiency bug in previous llvm versions
2. GCC creates non-performant code, hence it is disabled in gcc_plugin
3. with `AFL_LLVM_THREADSAFE_INST`, disables NeverZero
4. with pcguard mode and LTO mode for LLVM 11 and newer
5. upcoming, development in the branch
6. not compatible with LTO instrumentation and needs at least LLVM v4.1
7. automatic in LTO mode with LLVM 11 and newer, an extra pass for all LLVM
versions that write to a file to use with afl-fuzz' `-x`
8. the snapshot LKM is currently unmaintained due to too many kernel changes
coming too fast :-(
9. FRIDA mode is supported on Linux and MacOS for Intel and ARM
10. QEMU/Unicorn is only supported on Linux
11. Coresight mode is only available on AARCH64 Linux with a CPU with Coresight
extension
Among others, the following features and patches have been integrated:
* NeverZero patch for afl-gcc, instrumentation, QEMU mode and unicorn_mode which
prevents a wrapping map value to zero, increases coverage
* Persistent mode, deferred forkserver and in-memory fuzzing for QEMU mode
* Unicorn mode which allows fuzzing of binaries from completely different
platforms (integration provided by domenukk)
* The new CmpLog instrumentation for LLVM and QEMU inspired by
[Redqueen](https://www.syssec.ruhr-uni-bochum.de/media/emma/veroeffentlichungen/2018/12/17/NDSS19-Redqueen.pdf)
* Win32 PE binary-only fuzzing with QEMU and Wine
* AFLfast's power schedules by Marcel Böhme:
[https://github.com/mboehme/aflfast](https://github.com/mboehme/aflfast)
* The MOpt mutator:
[https://github.com/puppet-meteor/MOpt-AFL](https://github.com/puppet-meteor/MOpt-AFL)
* LLVM mode Ngram coverage by Adrian Herrera
[https://github.com/adrianherrera/afl-ngram-pass](https://github.com/adrianherrera/afl-ngram-pass)
* LAF-Intel/CompCov support for instrumentation, QEMU mode and unicorn_mode
(with enhanced capabilities)
* Radamsa and honggfuzz mutators (as custom mutators).
* QBDI mode to fuzz android native libraries via Quarkslab's
[QBDI](https://github.com/QBDI/QBDI) framework
* Frida and ptrace mode to fuzz binary-only libraries, etc.
So all in all this is the best-of AFL that is out there :-)

View File

@ -0,0 +1,296 @@
# Fuzzing binary-only targets
AFL++, libfuzzer, and other fuzzers are great if you have the source code of the
target. This allows for very fast and coverage guided fuzzing.
However, if there is only the binary program and no source code available, then
standard `afl-fuzz -n` (non-instrumented mode) is not effective.
For fast, on-the-fly instrumentation of black-box binaries, AFL++ still offers
various support. The following is a description of how these binaries can be
fuzzed with AFL++.
## TL;DR:
QEMU mode in persistent mode is the fastest - if the stability is high enough.
Otherwise, try RetroWrite, Dyninst, and if these fail, too, then try standard
QEMU mode with `AFL_ENTRYPOINT` to where you need it.
If your target is a library, then use FRIDA mode.
If your target is non-linux, then use unicorn_mode.
## Fuzzing binary-only targets with AFL++
### QEMU mode
QEMU mode is the "native" solution to the program. It is available in the
./qemu_mode/ directory and, once compiled, it can be accessed by the afl-fuzz -Q
command line option. It is the easiest to use alternative and even works for
cross-platform binaries.
For linux programs and its libraries, this is accomplished with a version of
QEMU running in the lesser-known "user space emulation" mode. QEMU is a project
separate from AFL++, but you can conveniently build the feature by doing:
```shell
cd qemu_mode
./build_qemu_support.sh
```
The following setup to use QEMU mode is recommended:
* run 1 afl-fuzz -Q instance with CMPLOG (`-c 0` + `AFL_COMPCOV_LEVEL=2`)
* run 1 afl-fuzz -Q instance with QASAN (`AFL_USE_QASAN=1`)
* run 1 afl-fuzz -Q instance with LAF (`AFL_PRELOAD=libcmpcov.so` +
`AFL_COMPCOV_LEVEL=2`), alternatively you can use FRIDA mode, just switch `-Q`
with `-O` and remove the LAF instance
Then run as many instances as you have cores left with either -Q mode or - even
better - use a binary rewriter like Dyninst, RetroWrite, ZAFL, etc.
If [afl-dyninst](https://github.com/vanhauser-thc/afl-dyninst) works for your
binary, then you can use afl-fuzz normally and it will have twice the speed
compared to QEMU mode (but slower than QEMU persistent mode). Note that several
other binary rewriters exist, all with their advantages and caveats.
The speed decrease of QEMU mode is at about 50%. However, various options exist
to increase the speed:
- using AFL_ENTRYPOINT to move the forkserver entry to a later basic block in
the binary (+5-10% speed)
- using persistent mode
[qemu_mode/README.persistent.md](../qemu_mode/README.persistent.md) this will
result in a 150-300% overall speed increase - so 3-8x the original QEMU mode
speed!
- using AFL_CODE_START/AFL_CODE_END to only instrument specific parts
For additional instructions and caveats, see
[qemu_mode/README.md](../qemu_mode/README.md). If possible, you should use the
persistent mode, see
[qemu_mode/README.persistent.md](../qemu_mode/README.persistent.md). The mode is
approximately 2-5x slower than compile-time instrumentation, and is less
conducive to parallelization.
Note that there is also honggfuzz:
[https://github.com/google/honggfuzz](https://github.com/google/honggfuzz) which
now has a QEMU mode, but its performance is just 1.5% ...
If you like to code a customized fuzzer without much work, we highly recommend
to check out our sister project libafl which supports QEMU, too:
[https://github.com/AFLplusplus/LibAFL](https://github.com/AFLplusplus/LibAFL)
### WINE+QEMU
Wine mode can run Win32 PE binaries with the QEMU instrumentation. It needs
Wine, python3, and the pefile python package installed.
It is included in AFL++.
For more information, see
[qemu_mode/README.wine.md](../qemu_mode/README.wine.md).
### FRIDA mode
In FRIDA mode, you can fuzz binary-only targets as easily as with QEMU mode.
FRIDA mode is sometimes faster and sometimes slower than QEMU mode. It is also
newer, lacks COMPCOV, and has the advantage that it works on MacOS (both intel
and M1).
To build FRIDA mode:
```shell
cd frida_mode
make
```
For additional instructions and caveats, see
[frida_mode/README.md](../frida_mode/README.md).
If possible, you should use the persistent mode, see
[instrumentation/README.persistent_mode.md](../instrumentation/README.persistent_mode.md).
The mode is approximately 2-5x slower than compile-time instrumentation, and is
less conducive to parallelization. But for binary-only fuzzing, it gives a huge
speed improvement if it is possible to use.
If you want to fuzz a binary-only library, then you can fuzz it with frida-gum
via frida_mode/. You will have to write a harness to call the target function in
the library, use afl-frida.c as a template.
You can also perform remote fuzzing with frida, e.g., if you want to fuzz on
iPhone or Android devices, for this you can use
[https://github.com/ttdennis/fpicker/](https://github.com/ttdennis/fpicker/) as
an intermediate that uses AFL++ for fuzzing.
If you like to code a customized fuzzer without much work, we highly recommend
to check out our sister project libafl which supports Frida, too:
[https://github.com/AFLplusplus/LibAFL](https://github.com/AFLplusplus/LibAFL).
Working examples already exist :-)
### Unicorn
Unicorn is a fork of QEMU. The instrumentation is, therefore, very similar. In
contrast to QEMU, Unicorn does not offer a full system or even userland
emulation. Runtime environment and/or loaders have to be written from scratch,
if needed. On top, block chaining has been removed. This means the speed boost
introduced in the patched QEMU Mode of AFL++ cannot be ported over to Unicorn.
For non-Linux binaries, you can use AFL++'s unicorn_mode which can emulate
anything you want - for the price of speed and user written scripts.
To build unicorn_mode:
```shell
cd unicorn_mode
./build_unicorn_support.sh
```
For further information, check out
[unicorn_mode/README.md](../unicorn_mode/README.md).
### Shared libraries
If the goal is to fuzz a dynamic library, then there are two options available.
For both, you need to write a small harness that loads and calls the library.
Then you fuzz this with either FRIDA mode or QEMU mode and either use
`AFL_INST_LIBS=1` or `AFL_QEMU/FRIDA_INST_RANGES`.
Another, less precise and slower option is to fuzz it with utils/afl_untracer/
and use afl-untracer.c as a template. It is slower than FRIDA mode.
For more information, see
[utils/afl_untracer/README.md](../utils/afl_untracer/README.md).
### Coresight
Coresight is ARM's answer to Intel's PT. With AFL++ v3.15, there is a coresight
tracer implementation available in `coresight_mode/` which is faster than QEMU,
however, cannot run in parallel. Currently, only one process can be traced, it
is WIP.
Fore more information, see
[coresight_mode/README.md](../coresight_mode/README.md).
## Binary rewriters
An alternative solution are binary rewriters. They are faster than the solutions
native to AFL++ but don't always work.
### ZAFL
ZAFL is a static rewriting platform supporting x86-64 C/C++,
stripped/unstripped, and PIE/non-PIE binaries. Beyond conventional
instrumentation, ZAFL's API enables transformation passes (e.g., laf-Intel,
context sensitivity, InsTrim, etc.).
Its baseline instrumentation speed typically averages 90-95% of
afl-clang-fast's.
[https://git.zephyr-software.com/opensrc/zafl](https://git.zephyr-software.com/opensrc/zafl)
### RetroWrite
If you have an x86/x86_64 binary that still has its symbols, is compiled with
position independent code (PIC/PIE), and does not use most of the C++ features,
then the RetroWrite solution might be for you. It decompiles to ASM files which
can then be instrumented with afl-gcc.
It is at about 80-85% performance.
[https://github.com/HexHive/retrowrite](https://github.com/HexHive/retrowrite)
### Dyninst
Dyninst is a binary instrumentation framework similar to Pintool and DynamoRIO.
However, whereas Pintool and DynamoRIO work at runtime, Dyninst instruments the
target at load time and then let it run - or save the binary with the changes.
This is great for some things, e.g., fuzzing, and not so effective for others,
e.g., malware analysis.
So, what you can do with Dyninst is taking every basic block and putting AFL++'s
instrumentation code in there - and then save the binary. Afterwards, just fuzz
the newly saved target binary with afl-fuzz. Sounds great? It is. The issue
though - it is a non-trivial problem to insert instructions, which change
addresses in the process space, so that everything is still working afterwards.
Hence, more often than not binaries crash when they are run.
The speed decrease is about 15-35%, depending on the optimization options used
with afl-dyninst.
[https://github.com/vanhauser-thc/afl-dyninst](https://github.com/vanhauser-thc/afl-dyninst)
### Mcsema
Theoretically, you can also decompile to llvm IR with mcsema, and then use
llvm_mode to instrument the binary. Good luck with that.
[https://github.com/lifting-bits/mcsema](https://github.com/lifting-bits/mcsema)
## Binary tracers
### Pintool & DynamoRIO
Pintool and DynamoRIO are dynamic instrumentation engines. They can be used for
getting basic block information at runtime. Pintool is only available for Intel
x32/x64 on Linux, Mac OS, and Windows, whereas DynamoRIO is additionally
available for ARM and AARCH64. DynamoRIO is also 10x faster than Pintool.
The big issue with DynamoRIO (and therefore Pintool, too) is speed. DynamoRIO
has a speed decrease of 98-99%, Pintool has a speed decrease of 99.5%.
Hence, DynamoRIO is the option to go for if everything else fails and Pintool
only if DynamoRIO fails, too.
DynamoRIO solutions:
* [https://github.com/vanhauser-thc/afl-dynamorio](https://github.com/vanhauser-thc/afl-dynamorio)
* [https://github.com/mxmssh/drAFL](https://github.com/mxmssh/drAFL)
* [https://github.com/googleprojectzero/winafl/](https://github.com/googleprojectzero/winafl/)
<= very good but windows only
Pintool solutions:
* [https://github.com/vanhauser-thc/afl-pin](https://github.com/vanhauser-thc/afl-pin)
* [https://github.com/mothran/aflpin](https://github.com/mothran/aflpin)
* [https://github.com/spinpx/afl_pin_mode](https://github.com/spinpx/afl_pin_mode)
<= only old Pintool version supported
### Intel PT
If you have a newer Intel CPU, you can make use of Intel's processor trace. The
big issue with Intel's PT is the small buffer size and the complex encoding of
the debug information collected through PT. This makes the decoding very CPU
intensive and hence slow. As a result, the overall speed decrease is about
70-90% (depending on the implementation and other factors).
There are two AFL intel-pt implementations:
1. [https://github.com/junxzm1990/afl-pt](https://github.com/junxzm1990/afl-pt)
=> This needs Ubuntu 14.04.05 without any updates and the 4.4 kernel.
2. [https://github.com/hunter-ht-2018/ptfuzzer](https://github.com/hunter-ht-2018/ptfuzzer)
=> This needs a 4.14 or 4.15 kernel. The "nopti" kernel boot option must be
used. This one is faster than the other.
Note that there is also honggfuzz:
[https://github.com/google/honggfuzz](https://github.com/google/honggfuzz). But
its IPT performance is just 6%!
## Non-AFL++ solutions
There are many binary-only fuzzing frameworks. Some are great for CTFs but don't
work with large binaries, others are very slow but have good path discovery,
some are very hard to set-up...
* Jackalope:
[https://github.com/googleprojectzero/Jackalope](https://github.com/googleprojectzero/Jackalope)
* Manticore:
[https://github.com/trailofbits/manticore](https://github.com/trailofbits/manticore)
* QSYM:
[https://github.com/sslab-gatech/qsym](https://github.com/sslab-gatech/qsym)
* S2E: [https://github.com/S2E](https://github.com/S2E)
* TinyInst:
[https://github.com/googleprojectzero/TinyInst](https://github.com/googleprojectzero/TinyInst)
(Mac/Windows only)
* ... please send me any missing that are good
## Closing words
That's it! News, corrections, updates? Send an email to vh@thc.org.

861
docs/fuzzing_in_depth.md Normal file
View File

@ -0,0 +1,861 @@
# Fuzzing with AFL++
The following describes how to fuzz with a target if source code is available.
If you have a binary-only target, go to
[fuzzing_binary-only_targets.md](fuzzing_binary-only_targets.md).
Fuzzing source code is a three-step process:
1. Compile the target with a special compiler that prepares the target to be
fuzzed efficiently. This step is called "instrumenting a target".
2. Prepare the fuzzing by selecting and optimizing the input corpus for the
target.
3. Perform the fuzzing of the target by randomly mutating input and assessing if
a generated input was processed in a new path in the target binary.
## 0. Common sense risks
Please keep in mind that, similarly to many other computationally-intensive
tasks, fuzzing may put a strain on your hardware and on the OS. In particular:
- Your CPU will run hot and will need adequate cooling. In most cases, if
cooling is insufficient or stops working properly, CPU speeds will be
automatically throttled. That said, especially when fuzzing on less suitable
hardware (laptops, smartphones, etc.), it's not entirely impossible for
something to blow up.
- Targeted programs may end up erratically grabbing gigabytes of memory or
filling up disk space with junk files. AFL++ tries to enforce basic memory
limits, but can't prevent each and every possible mishap. The bottom line is
that you shouldn't be fuzzing on systems where the prospect of data loss is
not an acceptable risk.
- Fuzzing involves billions of reads and writes to the filesystem. On modern
systems, this will be usually heavily cached, resulting in fairly modest
"physical" I/O - but there are many factors that may alter this equation. It
is your responsibility to monitor for potential trouble; with very heavy I/O,
the lifespan of many HDDs and SSDs may be reduced.
A good way to monitor disk I/O on Linux is the `iostat` command:
```shell
$ iostat -d 3 -x -k [...optional disk ID...]
```
Using the `AFL_TMPDIR` environment variable and a RAM-disk, you can have the
heavy writing done in RAM to prevent the aforementioned wear and tear. For
example, the following line will run a Docker container with all this preset:
```shell
# docker run -ti --mount type=tmpfs,destination=/ramdisk -e AFL_TMPDIR=/ramdisk aflplusplus/aflplusplus
```
## 1. Instrumenting the target
### a) Selecting the best AFL++ compiler for instrumenting the target
AFL++ comes with a central compiler `afl-cc` that incorporates various different
kinds of compiler targets and and instrumentation options. The following
evaluation flow will help you to select the best possible.
It is highly recommended to have the newest llvm version possible installed,
anything below 9 is not recommended.
```
+--------------------------------+
| clang/clang++ 11+ is available | --> use LTO mode (afl-clang-lto/afl-clang-lto++)
+--------------------------------+ see [instrumentation/README.lto.md](instrumentation/README.lto.md)
|
| if not, or if the target fails with LTO afl-clang-lto/++
|
v
+---------------------------------+
| clang/clang++ 3.8+ is available | --> use LLVM mode (afl-clang-fast/afl-clang-fast++)
+---------------------------------+ see [instrumentation/README.llvm.md](instrumentation/README.llvm.md)
|
| if not, or if the target fails with LLVM afl-clang-fast/++
|
v
+--------------------------------+
| gcc 5+ is available | -> use GCC_PLUGIN mode (afl-gcc-fast/afl-g++-fast)
+--------------------------------+ see [instrumentation/README.gcc_plugin.md](instrumentation/README.gcc_plugin.md) and
[instrumentation/README.instrument_list.md](instrumentation/README.instrument_list.md)
|
| if not, or if you do not have a gcc with plugin support
|
v
use GCC mode (afl-gcc/afl-g++) (or afl-clang/afl-clang++ for clang)
```
Clickable README links for the chosen compiler:
* [LTO mode - afl-clang-lto](../instrumentation/README.lto.md)
* [LLVM mode - afl-clang-fast](../instrumentation/README.llvm.md)
* [GCC_PLUGIN mode - afl-gcc-fast](../instrumentation/README.gcc_plugin.md)
* GCC/CLANG modes (afl-gcc/afl-clang) have no README as they have no own
features
You can select the mode for the afl-cc compiler by:
1. use a symlink to afl-cc: afl-gcc, afl-g++, afl-clang, afl-clang++,
afl-clang-fast, afl-clang-fast++, afl-clang-lto, afl-clang-lto++,
afl-gcc-fast, afl-g++-fast (recommended!)
2. using the environment variable AFL_CC_COMPILER with MODE
3. passing --afl-MODE command line options to the compiler via
CFLAGS/CXXFLAGS/CPPFLAGS
MODE can be one of: LTO (afl-clang-lto*), LLVM (afl-clang-fast*), GCC_PLUGIN
(afl-g*-fast) or GCC (afl-gcc/afl-g++) or CLANG(afl-clang/afl-clang++).
Because no AFL++ specific command-line options are accepted (beside the
--afl-MODE command), the compile-time tools make fairly broad use of environment
variables, which can be listed with `afl-cc -hh` or by reading
[env_variables.md](env_variables.md).
### b) Selecting instrumentation options
The following options are available when you instrument with LTO mode
(afl-clang-fast/afl-clang-lto):
* Splitting integer, string, float and switch comparisons so AFL++ can easier
solve these. This is an important option if you do not have a very good and
large input corpus. This technique is called laf-intel or COMPCOV. To use this
set the following environment variable before compiling the target: `export
AFL_LLVM_LAF_ALL=1` You can read more about this in
[instrumentation/README.laf-intel.md](../instrumentation/README.laf-intel.md).
* A different technique (and usually a better one than laf-intel) is to
instrument the target so that any compare values in the target are sent to
AFL++ which then tries to put these values into the fuzzing data at different
locations. This technique is very fast and good - if the target does not
transform input data before comparison. Therefore this technique is called
`input to state` or `redqueen`. If you want to use this technique, then you
have to compile the target twice, once specifically with/for this mode by
setting `AFL_LLVM_CMPLOG=1`, and pass this binary to afl-fuzz via the `-c`
parameter. Note that you can compile also just a cmplog binary and use that
for both, however, there will be a performance penalty. You can read more
about this in
[instrumentation/README.cmplog.md](../instrumentation/README.cmplog.md).
If you use LTO, LLVM or GCC_PLUGIN mode
(afl-clang-fast/afl-clang-lto/afl-gcc-fast) you have the option to selectively
only instrument parts of the target that you are interested in:
* To instrument only those parts of the target that you are interested in create
a file with all the filenames of the source code that should be instrumented.
For afl-clang-lto and afl-gcc-fast - or afl-clang-fast if a mode other than
DEFAULT/PCGUARD is used or you have llvm > 10.0.0 - just put one filename or
function per line (no directory information necessary for filenames9, and
either set `export AFL_LLVM_ALLOWLIST=allowlist.txt` **or** `export
AFL_LLVM_DENYLIST=denylist.txt` - depending on if you want per default to
instrument unless noted (DENYLIST) or not perform instrumentation unless
requested (ALLOWLIST). **NOTE:** During optimization functions might be
inlined and then would not match! See
[instrumentation/README.instrument_list.md](../instrumentation/README.instrument_list.md)
There are many more options and modes available, however, these are most of the
time less effective. See:
* [instrumentation/README.llvm.md#6) AFL++ Context Sensitive Branch Coverage](../instrumentation/README.llvm.md#6-afl-context-sensitive-branch-coverage)
* [instrumentation/README.llvm.md#7) AFL++ N-Gram Branch Coverage](../instrumentation/README.llvm.md#7-afl-n-gram-branch-coverage)
AFL++ performs "never zero" counting in its bitmap. You can read more about this
here:
* [instrumentation/README.llvm.md#8-neverzero-counters](../instrumentation/README.llvm.md#8-neverzero-counters)
### c) Selecting sanitizers
It is possible to use sanitizers when instrumenting targets for fuzzing, which
allows you to find bugs that would not necessarily result in a crash.
Note that sanitizers have a huge impact on CPU (= less executions per second)
and RAM usage. Also you should only run one afl-fuzz instance per sanitizer
type. This is enough because a use-after-free bug will be picked up, e.g., by
ASAN (address sanitizer) anyway when syncing to other fuzzing instances, so not
all fuzzing instances need to be instrumented with ASAN.
The following sanitizers have built-in support in AFL++:
* ASAN = Address SANitizer, finds memory corruption vulnerabilities like
use-after-free, NULL pointer dereference, buffer overruns, etc. Enabled with
`export AFL_USE_ASAN=1` before compiling.
* MSAN = Memory SANitizer, finds read access to uninitialized memory, e.g., a
local variable that is defined and read before it is even set. Enabled with
`export AFL_USE_MSAN=1` before compiling.
* UBSAN = Undefined Behavior SANitizer, finds instances where - by the C and C++
standards - undefined behavior happens, e.g., adding two signed integers
together where the result is larger than a signed integer can hold. Enabled
with `export AFL_USE_UBSAN=1` before compiling.
* CFISAN = Control Flow Integrity SANitizer, finds instances where the control
flow is found to be illegal. Originally this was rather to prevent return
oriented programming exploit chains from functioning, in fuzzing this is
mostly reduced to detecting type confusion vulnerabilities - which is,
however, one of the most important and dangerous C++ memory corruption
classes! Enabled with `export AFL_USE_CFISAN=1` before compiling.
* TSAN = Thread SANitizer, finds thread race conditions. Enabled with `export
AFL_USE_TSAN=1` before compiling.
* LSAN = Leak SANitizer, finds memory leaks in a program. This is not really a
security issue, but for developers this can be very valuable. Note that unlike
the other sanitizers above this needs `__AFL_LEAK_CHECK();` added to all areas
of the target source code where you find a leak check necessary! Enabled with
`export AFL_USE_LSAN=1` before compiling.
It is possible to further modify the behavior of the sanitizers at run-time by
setting `ASAN_OPTIONS=...`, `LSAN_OPTIONS` etc. - the available parameters can
be looked up in the sanitizer documentation of llvm/clang. afl-fuzz, however,
requires some specific parameters important for fuzzing to be set. If you want
to set your own, it might bail and report what it is missing.
Note that some sanitizers cannot be used together, e.g., ASAN and MSAN, and
others often cannot work together because of target weirdness, e.g., ASAN and
CFISAN. You might need to experiment which sanitizers you can combine in a
target (which means more instances can be run without a sanitized target, which
is more effective).
### d) Modifying the target
If the target has features that make fuzzing more difficult, e.g., checksums,
HMAC, etc., then modify the source code so that checks for these values are
removed. This can even be done safely for source code used in operational
products by eliminating these checks within these AFL++ specific blocks:
```
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
// say that the checksum or HMAC was fine - or whatever is required
// to eliminate the need for the fuzzer to guess the right checksum
return 0;
#endif
```
All AFL++ compilers will set this preprocessor definition automatically.
### e) Instrumenting the target
In this step the target source code is compiled so that it can be fuzzed.
Basically you have to tell the target build system that the selected AFL++
compiler is used. Also - if possible - you should always configure the build
system such that the target is compiled statically and not dynamically. How to
do this is described below.
The #1 rule when instrumenting a target is: avoid instrumenting shared libraries
at all cost. You would need to set LD_LIBRARY_PATH to point to these, you could
accidentally type "make install" and install them system wide - so don't. Really
don't. **Always compile libraries you want to have instrumented as static and
link these to the target program!**
Then build the target. (Usually with `make`)
**NOTES**
1. Sometimes configure and build systems are fickle and do not like stderr
output (and think this means a test failure) - which is something AFL++ likes
to do to show statistics. It is recommended to disable AFL++ instrumentation
reporting via `export AFL_QUIET=1`.
2. Sometimes configure and build systems error on warnings - these should be
disabled (e.g., `--disable-werror` for some configure scripts).
3. In case the configure/build system complains about AFL++'s compiler and
aborts, then set `export AFL_NOOPT=1` which will then just behave like the
real compiler. This option has to be unset again before building the target!
#### configure
For `configure` build systems this is usually done by:
`CC=afl-clang-fast CXX=afl-clang-fast++ ./configure --disable-shared`
Note that if you are using the (better) afl-clang-lto compiler you also have to
set AR to llvm-ar[-VERSION] and RANLIB to llvm-ranlib[-VERSION] - as is
described in [instrumentation/README.lto.md](../instrumentation/README.lto.md).
#### cmake
For `cmake` build systems this is usually done by:
`mkdir build; cd build; cmake -DCMAKE_C_COMPILER=afl-cc -DCMAKE_CXX_COMPILER=afl-c++ ..`
Note that if you are using the (better) afl-clang-lto compiler you also have to
set AR to llvm-ar[-VERSION] and RANLIB to llvm-ranlib[-VERSION] - as is
described in [instrumentation/README.lto.md](../instrumentation/README.lto.md).
#### meson
For meson you have to set the AFL++ compiler with the very first command!
`CC=afl-cc CXX=afl-c++ meson`
#### other build systems or if configure/cmake didn't work
Sometimes cmake and configure do not pick up the AFL++ compiler, or the
ranlib/ar that is needed - because this was just not foreseen by the developer
of the target. Or they have non-standard options. Figure out if there is a
non-standard way to set this, otherwise set up the build normally and edit the
generated build environment afterwards manually to point it to the right
compiler (and/or ranlib and ar).
### f) Better instrumentation
If you just fuzz a target program as-is you are wasting a great opportunity for
much more fuzzing speed.
This variant requires the usage of afl-clang-lto, afl-clang-fast or
afl-gcc-fast.
It is the so-called `persistent mode`, which is much, much faster but requires
that you code a source file that is specifically calling the target functions
that you want to fuzz, plus a few specific AFL++ functions around it. See
[instrumentation/README.persistent_mode.md](../instrumentation/README.persistent_mode.md)
for details.
Basically if you do not fuzz a target in persistent mode, then you are just
doing it for a hobby and not professionally :-).
### g) libfuzzer fuzzer harnesses with LLVMFuzzerTestOneInput()
libfuzzer `LLVMFuzzerTestOneInput()` harnesses are the defacto standard for
fuzzing, and they can be used with AFL++ (and honggfuzz) as well!
Compiling them is as simple as:
```
afl-clang-fast++ -fsanitize=fuzzer -o harness harness.cpp targetlib.a
```
You can even use advanced libfuzzer features like `FuzzedDataProvider`,
`LLVMFuzzerMutate()` etc. and they will work!
The generated binary is fuzzed with afl-fuzz like any other fuzz target.
Bonus: the target is already optimized for fuzzing due to persistent mode and
shared-memory test cases and hence gives you the fastest speed possible.
For more information, see
[utils/aflpp_driver/README.md](../utils/aflpp_driver/README.md).
## 2. Preparing the fuzzing campaign
As you fuzz the target with mutated input, having as diverse inputs for the
target as possible improves the efficiency a lot.
### a) Collecting inputs
To operate correctly, the fuzzer requires one or more starting files that
contain a good example of the input data normally expected by the targeted
application.
Try to gather valid inputs for the target from wherever you can. E.g., if it is
the PNG picture format, try to find as many PNG files as possible, e.g., from
reported bugs, test suites, random downloads from the internet, unit test case
data - from all kind of PNG software.
If the input format is not known, you can also modify a target program to write
normal data it receives and processes to a file and use these.
You can find many good examples of starting files in the
[testcases/](../testcases) subdirectory that comes with this tool.
### b) Making the input corpus unique
Use the AFL++ tool `afl-cmin` to remove inputs from the corpus that do not
produce a new path in the target.
Put all files from step a) into one directory, e.g., INPUTS.
If the target program is to be called by fuzzing as `bin/target -d INPUTFILE`
the run afl-cmin like this:
`afl-cmin -i INPUTS -o INPUTS_UNIQUE -- bin/target -d @@`
Note that the INPUTFILE argument that the target program would read from has to
be set as `@@`.
If the target reads from stdin instead, just omit the `@@` as this is the
default.
This step is highly recommended!
### c) Minimizing all corpus files
The shorter the input files that still traverse the same path within the target,
the better the fuzzing will be. This minimization is done with `afl-tmin`,
however, it is a long process as this has to be done for every file:
```
mkdir input
cd INPUTS_UNIQUE
for i in *; do
afl-tmin -i "$i" -o "../input/$i" -- bin/target -d @@
done
```
This step can also be parallelized, e.g., with `parallel`. Note that this step
is rather optional though.
### Done!
The INPUTS_UNIQUE/ directory from step b) - or even better the directory input/
if you minimized the corpus in step c) - is the resulting input corpus directory
to be used in fuzzing! :-)
## 3. Fuzzing the target
In this final step, fuzz the target. There are not that many important options
to run the target - unless you want to use many CPU cores/threads for the
fuzzing, which will make the fuzzing much more useful.
If you just use one CPU for fuzzing, then you are fuzzing just for fun and not
seriously :-)
### a) Running afl-fuzz
Before you do even a test run of afl-fuzz execute `sudo afl-system-config` (on
the host if you execute afl-fuzz in a docker container). This reconfigures the
system for optimal speed - which afl-fuzz checks and bails otherwise. Set
`export AFL_SKIP_CPUFREQ=1` for afl-fuzz to skip this check if you cannot run
afl-system-config with root privileges on the host for whatever reason.
Note there is also `sudo afl-persistent-config` which sets additional permanent
boot options for a much better fuzzing performance.
Note that both scripts improve your fuzzing performance but also decrease your
system protection against attacks! So set strong firewall rules and only expose
SSH as a network service if you use these (which is highly recommended).
If you have an input corpus from step 2, then specify this directory with the
`-i` option. Otherwise, create a new directory and create a file with any
content as test data in there.
If you do not want anything special, the defaults are already usually best,
hence all you need is to specify the seed input directory with the result of
step [2a) Collect inputs](#a-collect-inputs):
`afl-fuzz -i input -o output -- bin/target -d @@`
Note that the directory specified with `-o` will be created if it does not
exist.
It can be valuable to run afl-fuzz in a screen or tmux shell so you can log off,
or afl-fuzz is not aborted if you are running it in a remote ssh session where
the connection fails in between. Only do that though once you have verified that
your fuzzing setup works! Run it like `screen -dmS afl-main -- afl-fuzz -M
main-$HOSTNAME -i ...` and it will start away in a screen session. To enter this
session, type `screen -r afl-main`. You see - it makes sense to name the screen
session same as the afl-fuzz -M/-S naming :-) For more information on screen or
tmux, check their documentation.
If you need to stop and re-start the fuzzing, use the same command line options
(or even change them by selecting a different power schedule or another mutation
mode!) and switch the input directory with a dash (`-`):
`afl-fuzz -i - -o output -- bin/target -d @@`
Adding a dictionary is helpful. See the directory
[dictionaries/](../dictionaries/) if something is already included for your data
format, and tell afl-fuzz to load that dictionary by adding `-x
dictionaries/FORMAT.dict`. With afl-clang-lto, you have an autodictionary
generation for which you need to do nothing except to use afl-clang-lto as the
compiler. You also have the option to generate a dictionary yourself, see
[utils/libtokencap/README.md](../utils/libtokencap/README.md).
afl-fuzz has a variety of options that help to workaround target quirks like
specific locations for the input file (`-f`), performing deterministic fuzzing
(`-D`) and many more. Check out `afl-fuzz -h`.
We highly recommend that you set a memory limit for running the target with `-m`
which defines the maximum memory in MB. This prevents a potential out-of-memory
problem for your system plus helps you detect missing `malloc()` failure
handling in the target. Play around with various -m values until you find one
that safely works for all your input seeds (if you have good ones and then
double or quadruple that.
By default, afl-fuzz never stops fuzzing. To terminate AFL++, press Control-C or
send a signal SIGINT. You can limit the number of executions or approximate
runtime in seconds with options also.
When you start afl-fuzz you will see a user interface that shows what the status
is:
![resources/screenshot.png](resources/screenshot.png)
All labels are explained in
[afl-fuzz_approach.md#understanding-the-status-screen](afl-fuzz_approach.md#understanding-the-status-screen).
### b) Keeping memory use and timeouts in check
Memory limits are not enforced by afl-fuzz by default and the system may run out
of memory. You can decrease the memory with the `-m` option, the value is in MB.
If this is too small for the target, you can usually see this by afl-fuzz
bailing with the message that it could not connect to the forkserver.
Consider setting low values for `-m` and `-t`.
For programs that are nominally very fast, but get sluggish for some inputs, you
can also try setting `-t` values that are more punishing than what `afl-fuzz`
dares to use on its own. On fast and idle machines, going down to `-t 5` may be
a viable plan.
The `-m` parameter is worth looking at, too. Some programs can end up spending a
fair amount of time allocating and initializing megabytes of memory when
presented with pathological inputs. Low `-m` values can make them give up sooner
and not waste CPU time.
### c) Using multiple cores
If you want to seriously fuzz, then use as many cores/threads as possible to
fuzz your target.
On the same machine - due to the design of how AFL++ works - there is a maximum
number of CPU cores/threads that are useful, use more and the overall
performance degrades instead. This value depends on the target, and the limit is
between 32 and 64 cores per machine.
If you have the RAM, it is highly recommended run the instances with a caching
of the test cases. Depending on the average test case size (and those found
during fuzzing) and their number, a value between 50-500MB is recommended. You
can set the cache size (in MB) by setting the environment variable
`AFL_TESTCACHE_SIZE`.
There should be one main fuzzer (`-M main-$HOSTNAME` option) and as many
secondary fuzzers (e.g., `-S variant1`) as you have cores that you use. Every
-M/-S entry needs a unique name (that can be whatever), however, the same -o
output directory location has to be used for all instances.
For every secondary fuzzer there should be a variation, e.g.:
* one should fuzz the target that was compiled differently: with sanitizers
activated (`export AFL_USE_ASAN=1 ; export AFL_USE_UBSAN=1 ; export
AFL_USE_CFISAN=1`)
* one or two should fuzz the target with CMPLOG/redqueen (see above), at least
one cmplog instance should follow transformations (`-l AT`)
* one to three fuzzers should fuzz a target compiled with laf-intel/COMPCOV (see
above). Important note: If you run more than one laf-intel/COMPCOV fuzzer and
you want them to share their intermediate results, the main fuzzer (`-M`) must
be one of them! (Although this is not really recommended.)
All other secondaries should be used like this:
* a quarter to a third with the MOpt mutator enabled: `-L 0`
* run with a different power schedule, recommended are:
`fast (default), explore, coe, lin, quad, exploit and rare` which you can set
with, e.g., `-p explore`
* a few instances should use the old queue cycling with `-Z`
Also, it is recommended to set `export AFL_IMPORT_FIRST=1` to load test cases
from other fuzzers in the campaign first.
If you have a large corpus, a corpus from a previous run or are fuzzing in a CI,
then also set `export AFL_CMPLOG_ONLY_NEW=1` and `export AFL_FAST_CAL=1`.
You can also use different fuzzers. If you are using AFL spinoffs or AFL
conforming fuzzers, then just use the same -o directory and give it a unique
`-S` name. Examples are:
* [Fuzzolic](https://github.com/season-lab/fuzzolic)
* [symcc](https://github.com/eurecom-s3/symcc/)
* [Eclipser](https://github.com/SoftSec-KAIST/Eclipser/)
* [AFLsmart](https://github.com/aflsmart/aflsmart)
* [FairFuzz](https://github.com/carolemieux/afl-rb)
* [Neuzz](https://github.com/Dongdongshe/neuzz)
* [Angora](https://github.com/AngoraFuzzer/Angora)
A long list can be found at
[https://github.com/Microsvuln/Awesome-AFL](https://github.com/Microsvuln/Awesome-AFL).
However, you can also sync AFL++ with honggfuzz, libfuzzer with `-entropic=1`,
etc. Just show the main fuzzer (-M) with the `-F` option where the queue/work
directory of a different fuzzer is, e.g., `-F /src/target/honggfuzz`. Using
honggfuzz (with `-n 1` or `-n 2`) and libfuzzer in parallel is highly
recommended!
### d) Using multiple machines for fuzzing
Maybe you have more than one machine you want to fuzz the same target on. Start
the `afl-fuzz` (and perhaps libfuzzer, honggfuzz, ...) orchestra as you like,
just ensure that your have one and only one `-M` instance per server, and that
its name is unique, hence the recommendation for `-M main-$HOSTNAME`.
Now there are three strategies on how you can sync between the servers:
* never: sounds weird, but this makes every server an island and has the chance
the each follow different paths into the target. You can make this even more
interesting by even giving different seeds to each server.
* regularly (~4h): this ensures that all fuzzing campaigns on the servers "see"
the same thing. It is like fuzzing on a huge server.
* in intervals of 1/10th of the overall expected runtime of the fuzzing you
sync. This tries a bit to combine both. have some individuality of the paths
each campaign on a server explores, on the other hand if one gets stuck where
another found progress this is handed over making it unstuck.
The syncing process itself is very simple. As the `-M main-$HOSTNAME` instance
syncs to all `-S` secondaries as well as to other fuzzers, you have to copy only
this directory to the other machines.
Lets say all servers have the `-o out` directory in /target/foo/out, and you
created a file `servers.txt` which contains the hostnames of all participating
servers, plus you have an ssh key deployed to all of them, then run:
```bash
for FROM in `cat servers.txt`; do
for TO in `cat servers.txt`; do
rsync -rlpogtz --rsh=ssh $FROM:/target/foo/out/main-$FROM $TO:target/foo/out/
done
done
```
You can run this manually, per cron job - as you need it. There is a more
complex and configurable script in `utils/distributed_fuzzing`.
### e) The status of the fuzz campaign
AFL++ comes with the `afl-whatsup` script to show the status of the fuzzing
campaign.
Just supply the directory that afl-fuzz is given with the `-o` option and you
will see a detailed status of every fuzzer in that campaign plus a summary.
To have only the summary, use the `-s` switch, e.g., `afl-whatsup -s out/`.
If you have multiple servers, then use the command after a sync or you have to
execute this script per server.
Another tool to inspect the current state and history of a specific instance is
afl-plot, which generates an index.html file and a graphs that show how the
fuzzing instance is performing. The syntax is `afl-plot instance_dir web_dir`,
e.g., `afl-plot out/default /srv/www/htdocs/plot`.
### f) Stopping fuzzing, restarting fuzzing, adding new seeds
To stop an afl-fuzz run, press Control-C.
To restart an afl-fuzz run, just reuse the same command line but replace the `-i
directory` with `-i -` or set `AFL_AUTORESUME=1`.
If you want to add new seeds to a fuzzing campaign you can run a temporary
fuzzing instance, e.g., when your main fuzzer is using `-o out` and the new
seeds are in `newseeds/` directory:
```
AFL_BENCH_JUST_ONE=1 AFL_FAST_CAL=1 afl-fuzz -i newseeds -o out -S newseeds -- ./target
```
### g) Checking the coverage of the fuzzing
The `paths found` value is a bad indicator for checking how good the coverage
is.
A better indicator - if you use default llvm instrumentation with at least
version 9 - is to use `afl-showmap` with the collect coverage option `-C` on the
output directory:
```
$ afl-showmap -C -i out -o /dev/null -- ./target -params @@
...
[*] Using SHARED MEMORY FUZZING feature.
[*] Target map size: 9960
[+] Processed 7849 input files.
[+] Captured 4331 tuples (highest value 255, total values 67130596) in '/dev/nul
l'.
[+] A coverage of 4331 edges were achieved out of 9960 existing (43.48%) with 7849 input files.
```
It is even better to check out the exact lines of code that have been reached -
and which have not been found so far.
An "easy" helper script for this is
[https://github.com/vanhauser-thc/afl-cov](https://github.com/vanhauser-thc/afl-cov),
just follow the README of that separate project.
If you see that an important area or a feature has not been covered so far, then
try to find an input that is able to reach that and start a new secondary in
that fuzzing campaign with that seed as input, let it run for a few minutes,
then terminate it. The main node will pick it up and make it available to the
other secondary nodes over time. Set `export AFL_NO_AFFINITY=1` or `export
AFL_TRY_AFFINITY=1` if you have no free core.
Note that in nearly all cases you can never reach full coverage. A lot of
functionality is usually dependent on exclusive options that would need
individual fuzzing campaigns each with one of these options set. E.g., if you
fuzz a library to convert image formats and your target is the png to tiff API,
then you will not touch any of the other library APIs and features.
### h) How long to fuzz a target?
This is a difficult question. Basically, if no new path is found for a long time
(e.g., for a day or a week), then you can expect that your fuzzing won't be
fruitful anymore. However, often this just means that you should switch out
secondaries for others, e.g., custom mutator modules, sync to very different
fuzzers, etc.
Keep the queue/ directory (for future fuzzings of the same or similar targets)
and use them to seed other good fuzzers like libfuzzer with the -entropic switch
or honggfuzz.
### i) Improve the speed!
* Use [persistent mode](../instrumentation/README.persistent_mode.md) (x2-x20
speed increase)
* If you do not use shmem persistent mode, use `AFL_TMPDIR` to point the input
file on a tempfs location, see [env_variables.md](env_variables.md)
* Linux: Improve kernel performance: modify `/etc/default/grub`, set
`GRUB_CMDLINE_LINUX_DEFAULT="ibpb=off ibrs=off kpti=off l1tf=off mds=off
mitigations=off no_stf_barrier noibpb noibrs nopcid nopti
nospec_store_bypass_disable nospectre_v1 nospectre_v2 pcid=off pti=off
spec_store_bypass_disable=off spectre_v2=off stf_barrier=off"`; then
`update-grub` and `reboot` (warning: makes the system more insecure) - you can
also just run `sudo afl-persistent-config`
* Linux: Running on an `ext2` filesystem with `noatime` mount option will be a
bit faster than on any other journaling filesystem
* Use your cores! [3c) Using multiple cores](#c-using-multiple-cores)
* Run `sudo afl-system-config` before starting the first afl-fuzz instance after
a reboot
### j) Going beyond crashes
Fuzzing is a wonderful and underutilized technique for discovering non-crashing
design and implementation errors, too. Quite a few interesting bugs have been
found by modifying the target programs to call `abort()` when say:
- Two bignum libraries produce different outputs when given the same
fuzzer-generated input.
- An image library produces different outputs when asked to decode the same
input image several times in a row.
- A serialization/deserialization library fails to produce stable outputs when
iteratively serializing and deserializing fuzzer-supplied data.
- A compression library produces an output inconsistent with the input file when
asked to compress and then decompress a particular blob.
Implementing these or similar sanity checks usually takes very little time; if
you are the maintainer of a particular package, you can make this code
conditional with `#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION` (a flag also
shared with libfuzzer and honggfuzz) or `#ifdef __AFL_COMPILER` (this one is
just for AFL++).
### k) Known limitations & areas for improvement
Here are some of the most important caveats for AFL++:
- AFL++ detects faults by checking for the first spawned process dying due to a
signal (SIGSEGV, SIGABRT, etc.). Programs that install custom handlers for
these signals may need to have the relevant code commented out. In the same
vein, faults in child processes spawned by the fuzzed target may evade
detection unless you manually add some code to catch that.
- As with any other brute-force tool, the fuzzer offers limited coverage if
encryption, checksums, cryptographic signatures, or compression are used to
wholly wrap the actual data format to be tested.
To work around this, you can comment out the relevant checks (see
utils/libpng_no_checksum/ for inspiration); if this is not possible, you can
also write a postprocessor, one of the hooks of custom mutators. See
[custom_mutators.md](custom_mutators.md) on how to use
`AFL_CUSTOM_MUTATOR_LIBRARY`.
- There are some unfortunate trade-offs with ASAN and 64-bit binaries. This
isn't due to any specific fault of afl-fuzz.
- There is no direct support for fuzzing network services, background daemons,
or interactive apps that require UI interaction to work. You may need to make
simple code changes to make them behave in a more traditional way. Preeny may
offer a relatively simple option, too - see:
[https://github.com/zardus/preeny](https://github.com/zardus/preeny)
Some useful tips for modifying network-based services can be also found at:
[https://www.fastly.com/blog/how-to-fuzz-server-american-fuzzy-lop](https://www.fastly.com/blog/how-to-fuzz-server-american-fuzzy-lop)
- Occasionally, sentient machines rise against their creators. If this happens
to you, please consult
[https://lcamtuf.coredump.cx/prep/](https://lcamtuf.coredump.cx/prep/).
Beyond this, see [INSTALL.md](INSTALL.md) for platform-specific tips.
## 4. Triaging crashes
The coverage-based grouping of crashes usually produces a small data set that
can be quickly triaged manually or with a very simple GDB or Valgrind script.
Every crash is also traceable to its parent non-crashing test case in the queue,
making it easier to diagnose faults.
Having said that, it's important to acknowledge that some fuzzing crashes can be
difficult to quickly evaluate for exploitability without a lot of debugging and
code analysis work. To assist with this task, afl-fuzz supports a very unique
"crash exploration" mode enabled with the -C flag.
In this mode, the fuzzer takes one or more crashing test cases as the input and
uses its feedback-driven fuzzing strategies to very quickly enumerate all code
paths that can be reached in the program while keeping it in the crashing state.
Mutations that do not result in a crash are rejected; so are any changes that do
not affect the execution path.
The output is a small corpus of files that can be very rapidly examined to see
what degree of control the attacker has over the faulting address, or whether it
is possible to get past an initial out-of-bounds read - and see what lies
beneath.
Oh, one more thing: for test case minimization, give afl-tmin a try. The tool
can be operated in a very simple way:
```shell
./afl-tmin -i test_case -o minimized_result -- /path/to/program [...]
```
The tool works with crashing and non-crashing test cases alike. In the crash
mode, it will happily accept instrumented and non-instrumented binaries. In the
non-crashing mode, the minimizer relies on standard AFL++ instrumentation to
make the file simpler without altering the execution path.
The minimizer accepts the -m, -t, -f and @@ syntax in a manner compatible with
afl-fuzz.
Another tool in AFL++ is the afl-analyze tool. It takes an input file, attempts
to sequentially flip bytes, and observes the behavior of the tested program. It
then color-codes the input based on which sections appear to be critical, and
which are not; while not bulletproof, it can often offer quick insights into
complex file formats.
## 5. CI fuzzing
Some notes on CI fuzzing - this fuzzing is different to normal fuzzing campaigns
as these are much shorter runnings.
1. Always:
* LTO has a much longer compile time which is diametrical to short fuzzing -
hence use afl-clang-fast instead.
* If you compile with CMPLOG, then you can save fuzzing time and reuse that
compiled target for both the `-c` option and the main fuzz target. This
will impact the speed by ~15% though.
* `AFL_FAST_CAL` - Enable fast calibration, this halves the time the
saturated corpus needs to be loaded.
* `AFL_CMPLOG_ONLY_NEW` - only perform cmplog on new found paths, not the
initial corpus as this very likely has been done for them already.
* Keep the generated corpus, use afl-cmin and reuse it every time!
2. Additionally randomize the AFL++ compilation options, e.g.:
* 40% for `AFL_LLVM_CMPLOG`
* 10% for `AFL_LLVM_LAF_ALL`
3. Also randomize the afl-fuzz runtime options, e.g.:
* 65% for `AFL_DISABLE_TRIM`
* 50% use a dictionary generated by `AFL_LLVM_DICT2FILE`
* 40% use MOpt (`-L 0`)
* 40% for `AFL_EXPAND_HAVOC_NOW`
* 20% for old queue processing (`-Z`)
* for CMPLOG targets, 60% for `-l 2`, 40% for `-l 3`
4. Do *not* run any `-M` modes, just running `-S` modes is better for CI
fuzzing. `-M` enables old queue handling etc. which is good for a fuzzing
campaign but not good for short CI runs.
How this can look like can, e.g., be seen at AFL++'s setup in Google's
[oss-fuzz](https://github.com/google/oss-fuzz/blob/master/infra/base-images/base-builder/compile_afl)
and
[clusterfuzz](https://github.com/google/clusterfuzz/blob/master/src/clusterfuzz/_internal/bot/fuzzers/afl/launcher.py).
## The End
Check out the [FAQ](FAQ.md) if it maybe answers your question (that you might
not even have known you had ;-) ).
This is basically all you need to know to professionally run fuzzing campaigns.
If you want to know more, the tons of texts in [docs/](./) will have you
covered.
Note that there are also a lot of tools out there that help fuzzing with AFL++
(some might be deprecated or unsupported), see
[third_party_tools.md](third_party_tools.md).

View File

@ -1,58 +1,57 @@
# Ideas for AFL++
In the following, we describe a variety of ideas that could be implemented
for future AFL++ versions.
In the following, we describe a variety of ideas that could be implemented for
future AFL++ versions.
## Analysis software
Currently analysis is done by using afl-plot, which is rather outdated.
A GTK or browser tool to create run-time analysis based on fuzzer_stats,
queue/id* information and plot_data that allows for zooming in and out,
changing min/max display values etc. and doing that for a single run,
different runs and campaigns vs campaigns.
Interesting values are execs, and execs/s, edges discovered (total, when
each edge was discovered and which other fuzzer share finding that edge),
test cases executed.
It should be clickable which value is X and Y axis, zoom factor, log scaling
on-off, etc.
Currently analysis is done by using afl-plot, which is rather outdated. A GTK or
browser tool to create run-time analysis based on fuzzer_stats, queue/id*
information and plot_data that allows for zooming in and out, changing min/max
display values etc. and doing that for a single run, different runs and
campaigns vs. campaigns. Interesting values are execs, and execs/s, edges
discovered (total, when each edge was discovered and which other fuzzer share
finding that edge), test cases executed. It should be clickable which value is X
and Y axis, zoom factor, log scaling on-off, etc.
Mentor: vanhauser-thc
## WASM Instrumentation
Currently, AFL++ can be used for source code fuzzing and traditional binaries.
With the rise of WASM as compile target, however, a novel way of
instrumentation needs to be implemented for binaries compiled to Webassembly.
This can either be done by inserting instrumentation directly into the
WASM AST, or by patching feedback into a WASM VMs of choice, similar to
the current Unicorn instrumentation.
With the rise of WASM as compile target, however, a novel way of instrumentation
needs to be implemented for binaries compiled to Webassembly. This can either be
done by inserting instrumentation directly into the WASM AST, or by patching
feedback into a WASM VMs of choice, similar to the current Unicorn
instrumentation.
Mentor: any
## Support other programming languages
Other programming languages also use llvm hence they could (easily?) supported
for fuzzing, e.g. mono, swift, go, kotlin native, fortran, ...
for fuzzing, e.g., mono, swift, go, kotlin native, fortran, ...
GCC also supports: Objective-C, Fortran, Ada, Go, and D
(according to [Gcc homepage](https://gcc.gnu.org/))
GCC also supports: Objective-C, Fortran, Ada, Go, and D (according to
[Gcc homepage](https://gcc.gnu.org/))
LLVM is also used by: Rust, LLGo (Go), kaleidoscope (Haskell), flang (Fortran), emscripten (JavaScript, WASM), ilwasm (CIL (C#))
(according to [LLVM frontends](https://gist.github.com/axic/62d66fb9d8bccca6cc48fa9841db9241))
LLVM is also used by: Rust, LLGo (Go), kaleidoscope (Haskell), flang (Fortran),
emscripten (JavaScript, WASM), ilwasm (CIL (C#)) (according to
[LLVM frontends](https://gist.github.com/axic/62d66fb9d8bccca6cc48fa9841db9241))
Mentor: vanhauser-thc
## Machine Learning
Something with machine learning, better than [NEUZZ](https://github.com/dongdongshe/neuzz) :-)
Either improve a single mutator thorugh learning of many different bugs
(a bug class) or gather deep insights about a single target beforehand
(CFG, DFG, VFG, ...?) and improve performance for a single target.
Something with machine learning, better than
[NEUZZ](https://github.com/dongdongshe/neuzz) :-) Either improve a single
mutator through learning of many different bugs (a bug class) or gather deep
insights about a single target beforehand (CFG, DFG, VFG, ...?) and improve
performance for a single target.
Mentor: domenukk
## Your idea!
Finally, we are open to proposals!
Create an issue at https://github.com/AFLplusplus/AFLplusplus/issues and let's discuss :-)
Finally, we are open to proposals! Create an issue at
https://github.com/AFLplusplus/AFLplusplus/issues and let's discuss :-)

58
docs/important_changes.md Normal file
View File

@ -0,0 +1,58 @@
# Important changes in AFL++
This document lists important changes in AFL++, for example, major behavior
changes.
## From version 3.00 onwards
With AFL++ 3.13-3.20, we introduce FRIDA mode (`-O`) to have an alternative for
binary-only fuzzing. It is slower than QEMU mode but works on MacOS, Android,
iOS etc.
With AFL++ 3.15, we introduced the following changes from previous behaviors:
* Also -M main mode does not do deterministic fuzzing by default anymore
* afl-cmin and afl-showmap -Ci now descent into subdirectories like afl-fuzz
-i does (but note that afl-cmin.bash does not)
With AFL++ 3.14, we introduced the following changes from previous behaviors:
* afl-fuzz: deterministic fuzzing it not a default for -M main anymore
* afl-cmin/afl-showmap -i now descends into subdirectories (afl-cmin.bash,
however, does not)
With AFL++ 3.10, we introduced the following changes from previous behaviors:
* The '+' feature of the '-t' option now means to auto-calculate the timeout
with the value given being the maximum timeout. The original meaning of
"skipping timeouts instead of abort" is now inherent to the -t option.
With AFL++ 3.00, we introduced changes that break some previous AFL and AFL++
behaviors and defaults:
* There are no llvm_mode and gcc_plugin subdirectories anymore and there is
only one compiler: afl-cc. All previous compilers now symlink to this one.
All instrumentation source code is now in the `instrumentation/` folder.
* The gcc_plugin was replaced with a new version submitted by AdaCore that
supports more features. Thank you!
* QEMU mode got upgraded to QEMU 5.1, but to be able to build this a current
ninja build tool version and python3 setuptools are required. QEMU mode also
got new options like snapshotting, instrumenting specific shared libraries,
etc. Additionally QEMU 5.1 supports more CPU targets so this is really worth
it.
* When instrumenting targets, afl-cc will not supersede optimizations anymore
if any were given. This allows to fuzz targets build regularly like those
for debug or release versions.
* afl-fuzz:
* if neither -M or -S is specified, `-S default` is assumed, so more fuzzers
can easily be added later
* `-i` input directory option now descends into subdirectories. It also does
not fatal on crashes and too large files, instead it skips them and uses
them for splicing mutations
* -m none is now default, set memory limits (in MB) with, e.g., -m 250
* deterministic fuzzing is now disabled by default (unless using -M) and can
be enabled with -D
* a caching of test cases can now be performed and can be modified by
editing config.h for TESTCASE_CACHE or by specifying the environment
variable `AFL_TESTCACHE_SIZE` (in MB). Good values are between 50-500
(default: 50).
* -M mains do not perform trimming
* examples/ got renamed to utils/
* libtokencap/ libdislocator/ and qdbi_mode/ were moved to utils/
* afl-cmin/afl-cmin.bash now search first in PATH and last in AFL_PATH

View File

@ -1,87 +0,0 @@
# AFL "Life Pro Tips"
Bite-sized advice for those who understand the basics, but can't be bothered
to read or memorize every other piece of documentation for AFL.
## Get more bang for your buck by using fuzzing dictionaries.
See [dictionaries/README.md](../dictionaries/README.md) to learn how.
## You can get the most out of your hardware by parallelizing AFL jobs.
See [parallel_fuzzing.md](parallel_fuzzing.md) for step-by-step tips.
## Improve the odds of spotting memory corruption bugs with libdislocator.so!
It's easy. Consult [utils/libdislocator/README.md](../utils/libdislocator/README.md) for usage tips.
## Want to understand how your target parses a particular input file?
Try the bundled `afl-analyze` tool; it's got colors and all!
## You can visually monitor the progress of your fuzzing jobs.
Run the bundled `afl-plot` utility to generate browser-friendly graphs.
## Need to monitor AFL jobs programmatically?
Check out the `fuzzer_stats` file in the AFL output dir or try `afl-whatsup`.
## Puzzled by something showing up in red or purple in the AFL UI?
It could be important - consult docs/status_screen.md right away!
## Know your target? Convert it to persistent mode for a huge performance gain!
Consult section #5 in README.llvm.md for tips.
## Using clang?
Check out instrumentation/ for a faster alternative to afl-gcc!
## Did you know that AFL can fuzz closed-source or cross-platform binaries?
Check out qemu_mode/README.md and unicorn_mode/README.md for more.
## Did you know that afl-fuzz can minimize any test case for you?
Try the bundled `afl-tmin` tool - and get small repro files fast!
## Not sure if a crash is exploitable? AFL can help you figure it out. Specify
`-C` to enable the peruvian were-rabbit mode.
## Trouble dealing with a machine uprising? Relax, we've all been there.
Find essential survival tips at http://lcamtuf.coredump.cx/prep/.
## Want to automatically spot non-crashing memory handling bugs?
Try running an AFL-generated corpus through ASAN, MSAN, or Valgrind.
## Good selection of input files is critical to a successful fuzzing job.
See docs/perf_tips.md for pro tips.
## You can improve the odds of automatically spotting stack corruption issues.
Specify `AFL_HARDEN=1` in the environment to enable hardening flags.
## Bumping into problems with non-reproducible crashes?
It happens, but usually
isn't hard to diagnose. See section #7 in README.md for tips.
## Fuzzing is not just about memory corruption issues in the codebase.
Add some
sanity-checking `assert()` / `abort()` statements to effortlessly catch logic bugs.
## Hey kid... pssst... want to figure out how AFL really works?
Check out docs/technical_details.md for all the gory details in one place!
## There's a ton of third-party helper tools designed to work with AFL!
Be sure to check out docs/sister_projects.md before writing your own.
## Need to fuzz the command-line arguments of a particular program?
You can find a simple solution in utils/argv_fuzzing.
## Attacking a format that uses checksums?
Remove the checksum-checking code or use a postprocessor!
See `afl_custom_post_process` in custom_mutators/examples/example.c for more.

View File

@ -1,259 +0,0 @@
# Tips for parallel fuzzing
This document talks about synchronizing afl-fuzz jobs on a single machine
or across a fleet of systems. See README.md for the general instruction manual.
Note that this document is rather outdated. please refer to the main document
section on multiple core usage [../README.md#Using multiple cores](../README.md#b-using-multiple-coresthreads)
for up to date strategies!
## 1) Introduction
Every copy of afl-fuzz will take up one CPU core. This means that on an
n-core system, you can almost always run around n concurrent fuzzing jobs with
virtually no performance hit (you can use the afl-gotcpu tool to make sure).
In fact, if you rely on just a single job on a multi-core system, you will
be underutilizing the hardware. So, parallelization is always the right way to
go.
When targeting multiple unrelated binaries or using the tool in
"non-instrumented" (-n) mode, it is perfectly fine to just start up several
fully separate instances of afl-fuzz. The picture gets more complicated when
you want to have multiple fuzzers hammering a common target: if a hard-to-hit
but interesting test case is synthesized by one fuzzer, the remaining instances
will not be able to use that input to guide their work.
To help with this problem, afl-fuzz offers a simple way to synchronize test
cases on the fly.
Note that AFL++ has AFLfast's power schedules implemented.
It is therefore a good idea to use different power schedules if you run
several instances in parallel. See [power_schedules.md](power_schedules.md)
Alternatively running other AFL spinoffs in parallel can be of value,
e.g. Angora (https://github.com/AngoraFuzzer/Angora/)
## 2) Single-system parallelization
If you wish to parallelize a single job across multiple cores on a local
system, simply create a new, empty output directory ("sync dir") that will be
shared by all the instances of afl-fuzz; and then come up with a naming scheme
for every instance - say, "fuzzer01", "fuzzer02", etc.
Run the first one ("main node", -M) like this:
```
./afl-fuzz -i testcase_dir -o sync_dir -M fuzzer01 [...other stuff...]
```
...and then, start up secondary (-S) instances like this:
```
./afl-fuzz -i testcase_dir -o sync_dir -S fuzzer02 [...other stuff...]
./afl-fuzz -i testcase_dir -o sync_dir -S fuzzer03 [...other stuff...]
```
Each fuzzer will keep its state in a separate subdirectory, like so:
/path/to/sync_dir/fuzzer01/
Each instance will also periodically rescan the top-level sync directory
for any test cases found by other fuzzers - and will incorporate them into
its own fuzzing when they are deemed interesting enough.
For performance reasons only -M main node syncs the queue with everyone, the
-S secondary nodes will only sync from the main node.
The difference between the -M and -S modes is that the main instance will
still perform deterministic checks; while the secondary instances will
proceed straight to random tweaks.
Note that you must always have one -M main instance!
Running multiple -M instances is wasteful!
You can also monitor the progress of your jobs from the command line with the
provided afl-whatsup tool. When the instances are no longer finding new paths,
it's probably time to stop.
WARNING: Exercise caution when explicitly specifying the -f option. Each fuzzer
must use a separate temporary file; otherwise, things will go south. One safe
example may be:
```
./afl-fuzz [...] -S fuzzer10 -f file10.txt ./fuzzed/binary @@
./afl-fuzz [...] -S fuzzer11 -f file11.txt ./fuzzed/binary @@
./afl-fuzz [...] -S fuzzer12 -f file12.txt ./fuzzed/binary @@
```
This is not a concern if you use @@ without -f and let afl-fuzz come up with the
file name.
## 3) Multiple -M mains
There is support for parallelizing the deterministic checks.
This is only needed where
1. many new paths are found fast over a long time and it looks unlikely that
main node will ever catch up, and
2. deterministic fuzzing is actively helping path discovery (you can see this
in the main node for the first for lines in the "fuzzing strategy yields"
section. If the ration `found/attemps` is high, then it is effective. It
most commonly isn't.)
Only if both are true it is beneficial to have more than one main.
You can leverage this by creating -M instances like so:
```
./afl-fuzz -i testcase_dir -o sync_dir -M mainA:1/3 [...]
./afl-fuzz -i testcase_dir -o sync_dir -M mainB:2/3 [...]
./afl-fuzz -i testcase_dir -o sync_dir -M mainC:3/3 [...]
```
... where the first value after ':' is the sequential ID of a particular main
instance (starting at 1), and the second value is the total number of fuzzers to
distribute the deterministic fuzzing across. Note that if you boot up fewer
fuzzers than indicated by the second number passed to -M, you may end up with
poor coverage.
## 4) Syncing with non-AFL fuzzers or independant instances
A -M main node can be told with the `-F other_fuzzer_queue_directory` option
to sync results from other fuzzers, e.g. libfuzzer or honggfuzz.
Only the specified directory will by synced into afl, not subdirectories.
The specified directory does not need to exist yet at the start of afl.
The `-F` option can be passed to the main node several times.
## 5) Multi-system parallelization
The basic operating principle for multi-system parallelization is similar to
the mechanism explained in section 2. The key difference is that you need to
write a simple script that performs two actions:
- Uses SSH with authorized_keys to connect to every machine and retrieve
a tar archive of the /path/to/sync_dir/<main_node(s)> directory local to
the machine.
It is best to use a naming scheme that includes host name and it's being
a main node (e.g. main1, main2) in the fuzzer ID, so that you can do
something like:
```sh
for host in `cat HOSTLIST`; do
ssh user@$host "tar -czf - sync/$host_main*/" > $host.tgz
done
```
- Distributes and unpacks these files on all the remaining machines, e.g.:
```sh
for srchost in `cat HOSTLIST`; do
for dsthost in `cat HOSTLIST`; do
test "$srchost" = "$dsthost" && continue
ssh user@$srchost 'tar -kxzf -' < $dsthost.tgz
done
done
```
There is an example of such a script in utils/distributed_fuzzing/.
There are other (older) more featured, experimental tools:
* https://github.com/richo/roving
* https://github.com/MartijnB/disfuzz-afl
However these do not support syncing just main nodes (yet).
When developing custom test case sync code, there are several optimizations
to keep in mind:
- The synchronization does not have to happen very often; running the
task every 60 minutes or even less often at later fuzzing stages is
fine
- There is no need to synchronize crashes/ or hangs/; you only need to
copy over queue/* (and ideally, also fuzzer_stats).
- It is not necessary (and not advisable!) to overwrite existing files;
the -k option in tar is a good way to avoid that.
- There is no need to fetch directories for fuzzers that are not running
locally on a particular machine, and were simply copied over onto that
system during earlier runs.
- For large fleets, you will want to consolidate tarballs for each host,
as this will let you use n SSH connections for sync, rather than n*(n-1).
You may also want to implement staged synchronization. For example, you
could have 10 groups of systems, with group 1 pushing test cases only
to group 2; group 2 pushing them only to group 3; and so on, with group
eventually 10 feeding back to group 1.
This arrangement would allow test interesting cases to propagate across
the fleet without having to copy every fuzzer queue to every single host.
- You do not want a "main" instance of afl-fuzz on every system; you should
run them all with -S, and just designate a single process somewhere within
the fleet to run with -M.
- Syncing is only necessary for the main nodes on a system. It is possible
to run main-less with only secondaries. However then you need to find out
which secondary took over the temporary role to be the main node. Look for
the `is_main_node` file in the fuzzer directories, eg. `sync-dir/hostname-*/is_main_node`
It is *not* advisable to skip the synchronization script and run the fuzzers
directly on a network filesystem; unexpected latency and unkillable processes
in I/O wait state can mess things up.
## 6) Remote monitoring and data collection
You can use screen, nohup, tmux, or something equivalent to run remote
instances of afl-fuzz. If you redirect the program's output to a file, it will
automatically switch from a fancy UI to more limited status reports. There is
also basic machine-readable information which is always written to the
fuzzer_stats file in the output directory. Locally, that information can be
interpreted with afl-whatsup.
In principle, you can use the status screen of the main (-M) instance to
monitor the overall fuzzing progress and decide when to stop. In this
mode, the most important signal is just that no new paths are being found
for a longer while. If you do not have a main instance, just pick any
single secondary instance to watch and go by that.
You can also rely on that instance's output directory to collect the
synthesized corpus that covers all the noteworthy paths discovered anywhere
within the fleet. Secondary (-S) instances do not require any special
monitoring, other than just making sure that they are up.
Keep in mind that crashing inputs are *not* automatically propagated to the
main instance, so you may still want to monitor for crashes fleet-wide
from within your synchronization or health checking scripts (see afl-whatsup).
## 7) Asymmetric setups
It is perhaps worth noting that all of the following is permitted:
- Running afl-fuzz with conjunction with other guided tools that can extend
coverage (e.g., via concolic execution). Third-party tools simply need to
follow the protocol described above for pulling new test cases from
out_dir/<fuzzer_id>/queue/* and writing their own finds to sequentially
numbered id:nnnnnn files in out_dir/<ext_tool_id>/queue/*.
- Running some of the synchronized fuzzers with different (but related)
target binaries. For example, simultaneously stress-testing several
different JPEG parsers (say, IJG jpeg and libjpeg-turbo) while sharing
the discovered test cases can have synergistic effects and improve the
overall coverage.
(In this case, running one -M instance per target is necessary.)
- Having some of the fuzzers invoke the binary in different ways.
For example, 'djpeg' supports several DCT modes, configurable with
a command-line flag, while 'dwebp' supports incremental and one-shot
decoding. In some scenarios, going after multiple distinct modes and then
pooling test cases will improve coverage.
- Much less convincingly, running the synchronized fuzzers with different
starting test cases (e.g., progressive and standard JPEG) or dictionaries.
The synchronization mechanism ensures that the test sets will get fairly
homogeneous over time, but it introduces some initial variability.

View File

@ -1,209 +0,0 @@
## Tips for performance optimization
This file provides tips for troubleshooting slow or wasteful fuzzing jobs.
See README.md for the general instruction manual.
## 1. Keep your test cases small
This is probably the single most important step to take! Large test cases do
not merely take more time and memory to be parsed by the tested binary, but
also make the fuzzing process dramatically less efficient in several other
ways.
To illustrate, let's say that you're randomly flipping bits in a file, one bit
at a time. Let's assume that if you flip bit #47, you will hit a security bug;
flipping any other bit just results in an invalid document.
Now, if your starting test case is 100 bytes long, you will have a 71% chance of
triggering the bug within the first 1,000 execs - not bad! But if the test case
is 1 kB long, the probability that we will randomly hit the right pattern in
the same timeframe goes down to 11%. And if it has 10 kB of non-essential
cruft, the odds plunge to 1%.
On top of that, with larger inputs, the binary may be now running 5-10x times
slower than before - so the overall drop in fuzzing efficiency may be easily
as high as 500x or so.
In practice, this means that you shouldn't fuzz image parsers with your
vacation photos. Generate a tiny 16x16 picture instead, and run it through
`jpegtran` or `pngcrunch` for good measure. The same goes for most other types
of documents.
There's plenty of small starting test cases in ../testcases/ - try them out
or submit new ones!
If you want to start with a larger, third-party corpus, run `afl-cmin` with an
aggressive timeout on that data set first.
## 2. Use a simpler target
Consider using a simpler target binary in your fuzzing work. For example, for
image formats, bundled utilities such as `djpeg`, `readpng`, or `gifhisto` are
considerably (10-20x) faster than the convert tool from ImageMagick - all while exercising roughly the same library-level image parsing code.
Even if you don't have a lightweight harness for a particular target, remember
that you can always use another, related library to generate a corpus that will
be then manually fed to a more resource-hungry program later on.
Also note that reading the fuzzing input via stdin is faster than reading from
a file.
## 3. Use LLVM persistent instrumentation
The LLVM mode offers a "persistent", in-process fuzzing mode that can
work well for certain types of self-contained libraries, and for fast targets,
can offer performance gains up to 5-10x; and a "deferred fork server" mode
that can offer huge benefits for programs with high startup overhead. Both
modes require you to edit the source code of the fuzzed program, but the
changes often amount to just strategically placing a single line or two.
If there are important data comparisons performed (e.g. `strcmp(ptr, MAGIC_HDR)`)
then using laf-intel (see instrumentation/README.laf-intel.md) will help `afl-fuzz` a lot
to get to the important parts in the code.
If you are only interested in specific parts of the code being fuzzed, you can
instrument_files the files that are actually relevant. This improves the speed and
accuracy of afl. See instrumentation/README.instrument_list.md
## 4. Profile and optimize the binary
Check for any parameters or settings that obviously improve performance. For
example, the djpeg utility that comes with IJG jpeg and libjpeg-turbo can be
called with:
```bash
-dct fast -nosmooth -onepass -dither none -scale 1/4
```
...and that will speed things up. There is a corresponding drop in the quality
of decoded images, but it's probably not something you care about.
In some programs, it is possible to disable output altogether, or at least use
an output format that is computationally inexpensive. For example, with image
transcoding tools, converting to a BMP file will be a lot faster than to PNG.
With some laid-back parsers, enabling "strict" mode (i.e., bailing out after
first error) may result in smaller files and improved run time without
sacrificing coverage; for example, for sqlite, you may want to specify -bail.
If the program is still too slow, you can use `strace -tt` or an equivalent
profiling tool to see if the targeted binary is doing anything silly.
Sometimes, you can speed things up simply by specifying `/dev/null` as the
config file, or disabling some compile-time features that aren't really needed
for the job (try `./configure --help`). One of the notoriously resource-consuming
things would be calling other utilities via `exec*()`, `popen()`, `system()`, or
equivalent calls; for example, tar can invoke external decompression tools
when it decides that the input file is a compressed archive.
Some programs may also intentionally call `sleep()`, `usleep()`, or `nanosleep()`;
vim is a good example of that. Other programs may attempt `fsync()` and so on.
There are third-party libraries that make it easy to get rid of such code,
e.g.:
https://launchpad.net/libeatmydata
In programs that are slow due to unavoidable initialization overhead, you may
want to try the LLVM deferred forkserver mode (see README.llvm.md),
which can give you speed gains up to 10x, as mentioned above.
Last but not least, if you are using ASAN and the performance is unacceptable,
consider turning it off for now, and manually examining the generated corpus
with an ASAN-enabled binary later on.
## 5. Instrument just what you need
Instrument just the libraries you actually want to stress-test right now, one
at a time. Let the program use system-wide, non-instrumented libraries for
any functionality you don't actually want to fuzz. For example, in most
cases, it doesn't make to instrument `libgmp` just because you're testing a
crypto app that relies on it for bignum math.
Beware of programs that come with oddball third-party libraries bundled with
their source code (Spidermonkey is a good example of this). Check `./configure`
options to use non-instrumented system-wide copies instead.
## 6. Parallelize your fuzzers
The fuzzer is designed to need ~1 core per job. This means that on a, say,
4-core system, you can easily run four parallel fuzzing jobs with relatively
little performance hit. For tips on how to do that, see parallel_fuzzing.md.
The `afl-gotcpu` utility can help you understand if you still have idle CPU
capacity on your system. (It won't tell you about memory bandwidth, cache
misses, or similar factors, but they are less likely to be a concern.)
## 7. Keep memory use and timeouts in check
Consider setting low values for `-m` and `-t`.
For programs that are nominally very fast, but get sluggish for some inputs,
you can also try setting `-t` values that are more punishing than what `afl-fuzz`
dares to use on its own. On fast and idle machines, going down to `-t 5` may be
a viable plan.
The `-m` parameter is worth looking at, too. Some programs can end up spending
a fair amount of time allocating and initializing megabytes of memory when
presented with pathological inputs. Low `-m` values can make them give up sooner
and not waste CPU time.
## 8. Check OS configuration
There are several OS-level factors that may affect fuzzing speed:
- If you have no risk of power loss then run your fuzzing on a tmpfs
partition. This increases the performance noticably.
Alternatively you can use `AFL_TMPDIR` to point to a tmpfs location to
just write the input file to a tmpfs.
- High system load. Use idle machines where possible. Kill any non-essential
CPU hogs (idle browser windows, media players, complex screensavers, etc).
- Network filesystems, either used for fuzzer input / output, or accessed by
the fuzzed binary to read configuration files (pay special attention to the
home directory - many programs search it for dot-files).
- Disable all the spectre, meltdown etc. security countermeasures in the
kernel if your machine is properly separated:
```
ibpb=off ibrs=off kpti=off l1tf=off mds=off mitigations=off
no_stf_barrier noibpb noibrs nopcid nopti nospec_store_bypass_disable
nospectre_v1 nospectre_v2 pcid=off pti=off spec_store_bypass_disable=off
spectre_v2=off stf_barrier=off
```
In most Linux distributions you can put this into a `/etc/default/grub`
variable.
You can use `sudo afl-persistent-config` to set these options for you.
The following list of changes are made when executing `afl-system-config`:
- On-demand CPU scaling. The Linux `ondemand` governor performs its analysis
on a particular schedule and is known to underestimate the needs of
short-lived processes spawned by `afl-fuzz` (or any other fuzzer). On Linux,
this can be fixed with:
``` bash
cd /sys/devices/system/cpu
echo performance | tee cpu*/cpufreq/scaling_governor
```
On other systems, the impact of CPU scaling will be different; when fuzzing,
use OS-specific tools to find out if all cores are running at full speed.
- Transparent huge pages. Some allocators, such as `jemalloc`, can incur a
heavy fuzzing penalty when transparent huge pages (THP) are enabled in the
kernel. You can disable this via:
```bash
echo never > /sys/kernel/mm/transparent_hugepage/enabled
```
- Suboptimal scheduling strategies. The significance of this will vary from
one target to another, but on Linux, you may want to make sure that the
following options are set:
```bash
echo 1 >/proc/sys/kernel/sched_child_runs_first
echo 1 >/proc/sys/kernel/sched_autogroup_enabled
```
Setting a different scheduling policy for the fuzzer process - say
`SCHED_RR` - can usually speed things up, too, but needs to be done with
care.

View File

@ -1,143 +1,190 @@
# Remote monitoring with StatsD
# Remote monitoring and metrics visualization
StatsD allows you to receive and aggregate metrics from a wide range of applications and retransmit them to the backend of your choice.
This enables you to create nice and readable dashboards containing all the information you need on your fuzzer instances.
No need to write your own statistics parsing system, deploy and maintain it to all your instances, sync with your graph rendering system...
AFL++ can send out metrics as StatsD messages. For remote monitoring and
visualization of the metrics, you can set up a tool chain. For example, with
Prometheus and Grafana. All tools are free and open source.
The available metrics are :
This enables you to create nice and readable dashboards containing all the
information you need on your fuzzer instances. There is no need to write your
own statistics parsing system, deploy and maintain it to all your instances, and
sync with your graph rendering system.
Compared to the default integrated UI of AFL++, this can help you to visualize
trends and the fuzzing state over time. You might be able to see when the
fuzzing process has reached a state of no progress and visualize what are the
"best strategies" for your targets (according to your own criteria). You can do
so without logging into each instance individually.
![example visualization with Grafana](resources/statsd-grafana.png)
This is an example visualization with Grafana. The dashboard can be imported
with [this JSON template](resources/grafana-afl++.json).
## AFL++ metrics and StatsD
StatsD allows you to receive and aggregate metrics from a wide range of
applications and retransmit them to a backend of your choice.
From AFL++, StatsD can receive the following metrics:
- cur_path
- cycle_done
- cycles_wo_finds
- edges_found
- execs_done
- execs_per_sec
- paths_total
- havoc_expansion
- max_depth
- paths_favored
- paths_found
- paths_imported
- max_depth
- cur_path
- paths_total
- pending_favs
- pending_total
- variable_paths
- slowest_exec_ms
- total_crashes
- unique_crashes
- unique_hangs
- total_crashes
- slowest_exec_ms
- edges_found
- var_byte_count
- havoc_expansion
- variable_paths
Compared to the default integrated UI, these metrics give you the opportunity to visualize trends and fuzzing state over time.
By doing so, you might be able to see when the fuzzing process has reached a state of no progress, visualize what are the "best strategies"
(according to your own criteria) for your targets, etc. And doing so without requiring to log into each instance manually.
Depending on your StatsD server, you will be able to monitor, trigger alerts, or
perform actions based on these metrics (for example: alert on slow exec/s for a
new build, threshold of crashes, time since last crash > X, and so on).
An example visualisation may look like the following:
![StatsD Grafana](resources/statsd-grafana.png)
## Setting environment variables in AFL++
*Notes: The exact same dashboard can be imported with [this JSON template](resources/grafana-afl++.json).*
1. To enable the StatsD metrics collection on your fuzzer instances, set the
environment variable `AFL_STATSD=1`. By default, AFL++ will send the metrics
over UDP to 127.0.0.1:8125.
## How to use
2. To enable tags for each metric based on their format (banner and
afl_version), set the environment variable `AFL_STATSD_TAGS_FLAVOR`. By
default, no tags will be added to the metrics.
To enable the StatsD reporting on your fuzzer instances, you need to set the environment variable `AFL_STATSD=1`.
The available values are the following:
- `dogstatsd`
- `influxdb`
- `librato`
- `signalfx`
Setting `AFL_STATSD_TAGS_FLAVOR` to the provider of your choice will assign tags / labels to each metric based on their format.
The possible values are `dogstatsd`, `librato`, `signalfx` or `influxdb`.
For more information on these env vars, check out `docs/env_variables.md`.
For more information on environment variables, see
[env_variables.md](env_variables.md).
The simplest way of using this feature is to use any metric provider and change the host/port of your StatsD daemon,
with `AFL_STATSD_HOST` and `AFL_STATSD_PORT`, if required (defaults are `localhost` and port `8125`).
To get started, here are some instructions with free and open source tools.
The following setup is based on Prometheus, statsd_exporter and Grafana.
Grafana here is not mandatory, but gives you some nice graphs and features.
Note: When using multiple fuzzer instances with StatsD it is *strongly*
recommended to set up `AFL_STATSD_TAGS_FLAVOR` to match your StatsD server.
This will allow you to see individual fuzzer performance, detect bad ones,
and see the progress of each strategy.
Depending on your setup and infrastructure, you may want to run these applications not on your fuzzer instances.
Only one instance of these 3 application is required for all your fuzzers.
3. Optional: To set the host and port of your StatsD daemon, set
`AFL_STATSD_HOST` and `AFL_STATSD_PORT`. The default values are `localhost`
and `8125`.
To simplify everything, we will use Docker and docker-compose.
Make sure you have them both installed. On most common Linux distributions, it's as simple as:
## Installing and setting up StatsD, Prometheus, and Grafana
```sh
curl -fsSL https://get.docker.com -o get-docker.sh
sh get-docker.sh
```
The easiest way to install and set up the infrastructure is with Docker and
Docker Compose.
Once that's done, we can create the infrastructure.
Create and move into the directory of your choice. This will store all the configurations files required.
Depending on your fuzzing setup and infrastructure, you may not want to run
these applications on your fuzzer instances. This setup may be modified before
use in a production environment; for example, adding passwords, creating volumes
for storage, tweaking the metrics gathering to get host metrics (CPU, RAM, and
so on).
First, create a `docker-compose.yml` containing the following:
```yml
version: '3'
For all your fuzzing instances, only one instance of Prometheus and Grafana is
required. The
[statsd exporter](https://registry.hub.docker.com/r/prom/statsd-exporter)
converts the StatsD metrics to Prometheus. If you are using a provider that
supports StatsD directly, you can skip this part of the setup."
networks:
statsd-net:
driver: bridge
You can create and move the infrastructure files into a directory of your
choice. The directory will store all the required configuration files.
To install and set up Prometheus and Grafana:
1. Install Docker and Docker Compose:
```sh
curl -fsSL https://get.docker.com -o get-docker.sh
sh get-docker.sh
```
2. Create a `docker-compose.yml` containing the following:
```yml
version: '3'
services:
prometheus:
image: prom/prometheus
container_name: prometheus
volumes:
- ./prometheus.yml:/prometheus.yml
command:
- '--config.file=/prometheus.yml'
restart: unless-stopped
ports:
- "9090:9090"
networks:
- statsd-net
statsd-net:
driver: bridge
statsd_exporter:
image: prom/statsd-exporter
container_name: statsd_exporter
volumes:
- ./statsd_mapping.yml:/statsd_mapping.yml
command:
- "--statsd.mapping-config=/statsd_mapping.yml"
ports:
- "9102:9102/tcp"
- "8125:9125/udp"
networks:
- statsd-net
grafana:
image: grafana/grafana
container_name: grafana
restart: unless-stopped
ports:
- "3000:3000"
networks:
- statsd-net
```
services:
prometheus:
image: prom/prometheus
container_name: prometheus
volumes:
- ./prometheus.yml:/prometheus.yml
command:
- '--config.file=/prometheus.yml'
restart: unless-stopped
ports:
- "9090:9090"
networks:
- statsd-net
Then `prometheus.yml`
```yml
global:
scrape_interval: 15s
evaluation_interval: 15s
statsd_exporter:
image: prom/statsd-exporter
container_name: statsd_exporter
volumes:
- ./statsd_mapping.yml:/statsd_mapping.yml
command:
- "--statsd.mapping-config=/statsd_mapping.yml"
ports:
- "9102:9102/tcp"
- "8125:9125/udp"
networks:
- statsd-net
scrape_configs:
- job_name: 'fuzzing_metrics'
static_configs:
- targets: ['statsd_exporter:9102']
```
grafana:
image: grafana/grafana
container_name: grafana
restart: unless-stopped
ports:
- "3000:3000"
networks:
- statsd-net
```
And finally `statsd_mapping.yml`
```yml
mappings:
- match: "fuzzing.*"
name: "fuzzing"
labels:
type: "$1"
```
3. Create a `prometheus.yml` containing the following:
Run `docker-compose up -d`.
```yml
global:
scrape_interval: 15s
evaluation_interval: 15s
Everything should now be setup, you are now able to run your fuzzers with
scrape_configs:
- job_name: 'fuzzing_metrics'
static_configs:
- targets: ['statsd_exporter:9102']
```
4. Create a `statsd_mapping.yml` containing the following:
```yml
mappings:
- match: "fuzzing.*"
name: "fuzzing"
labels:
type: "$1"
```
5. Run `docker-compose up -d`.
## Running AFL++ with StatsD
To run your fuzzing instances:
```
AFL_STATSD_TAGS_FLAVOR=dogstatsd AFL_STATSD=1 afl-fuzz -M test-fuzzer-1 -i i -o o ./bin/my-application @@
AFL_STATSD_TAGS_FLAVOR=dogstatsd AFL_STATSD=1 afl-fuzz -S test-fuzzer-2 -i i -o o ./bin/my-application @@
AFL_STATSD_TAGS_FLAVOR=dogstatsd AFL_STATSD=1 afl-fuzz -M test-fuzzer-1 -i i -o o [./bin/my-application] @@
AFL_STATSD_TAGS_FLAVOR=dogstatsd AFL_STATSD=1 afl-fuzz -S test-fuzzer-2 -i i -o o [./bin/my-application] @@
...
```
This setup may be modified before use in a production environment. Depending on your needs: adding passwords, creating volumes for storage,
tweaking the metrics gathering to get host metrics (CPU, RAM ...).
```

View File

@ -1,319 +0,0 @@
# Sister projects
This doc lists some of the projects that are inspired by, derived from,
designed for, or meant to integrate with AFL. See README.md for the general
instruction manual.
!!!
!!! This list is outdated and needs an update, missing: e.g. Angora, FairFuzz
!!!
## Support for other languages / environments:
### Python AFL (Jakub Wilk)
Allows fuzz-testing of Python programs. Uses custom instrumentation and its
own forkserver.
http://jwilk.net/software/python-afl
### Go-fuzz (Dmitry Vyukov)
AFL-inspired guided fuzzing approach for Go targets:
https://github.com/dvyukov/go-fuzz
### afl.rs (Keegan McAllister)
Allows Rust features to be easily fuzzed with AFL (using the LLVM mode).
https://github.com/kmcallister/afl.rs
### OCaml support (KC Sivaramakrishnan)
Adds AFL-compatible instrumentation to OCaml programs.
https://github.com/ocamllabs/opam-repo-dev/pull/23
http://canopy.mirage.io/Posts/Fuzzing
### AFL for GCJ Java and other GCC frontends (-)
GCC Java programs are actually supported out of the box - simply rename
afl-gcc to afl-gcj. Unfortunately, by default, unhandled exceptions in GCJ do
not result in abort() being called, so you will need to manually add a
top-level exception handler that exits with SIGABRT or something equivalent.
Other GCC-supported languages should be fairly easy to get working, but may
face similar problems. See https://gcc.gnu.org/frontends.html for a list of
options.
## AFL-style in-process fuzzer for LLVM (Kostya Serebryany)
Provides an evolutionary instrumentation-guided fuzzing harness that allows
some programs to be fuzzed without the fork / execve overhead. (Similar
functionality is now available as the "persistent" feature described in
[the llvm_mode readme](../instrumentation/README.llvm.md))
http://llvm.org/docs/LibFuzzer.html
## TriforceAFL (Tim Newsham and Jesse Hertz)
Leverages QEMU full system emulation mode to allow AFL to target operating
systems and other alien worlds:
https://www.nccgroup.trust/us/about-us/newsroom-and-events/blog/2016/june/project-triforce-run-afl-on-everything/
## WinAFL (Ivan Fratric)
As the name implies, allows you to fuzz Windows binaries (using DynamoRio).
https://github.com/ivanfratric/winafl
Another Windows alternative may be:
https://github.com/carlosgprado/BrundleFuzz/
## Network fuzzing
### Preeny (Yan Shoshitaishvili)
Provides a fairly simple way to convince dynamically linked network-centric
programs to read from a file or not fork. Not AFL-specific, but described as
useful by many users. Some assembly required.
https://github.com/zardus/preeny
## Distributed fuzzing and related automation
### roving (Richo Healey)
A client-server architecture for effortlessly orchestrating AFL runs across
a fleet of machines. You don't want to use this on systems that face the
Internet or live in other untrusted environments.
https://github.com/richo/roving
### Distfuzz-AFL (Martijn Bogaard)
Simplifies the management of afl-fuzz instances on remote machines. The
author notes that the current implementation isn't secure and should not
be exposed on the Internet.
https://github.com/MartijnB/disfuzz-afl
### AFLDFF (quantumvm)
A nice GUI for managing AFL jobs.
https://github.com/quantumvm/AFLDFF
### afl-launch (Ben Nagy)
Batch AFL launcher utility with a simple CLI.
https://github.com/bnagy/afl-launch
### AFL Utils (rc0r)
Simplifies the triage of discovered crashes, start parallel instances, etc.
https://github.com/rc0r/afl-utils
### AFL crash analyzer (floyd)
Another crash triage tool:
https://github.com/floyd-fuh/afl-crash-analyzer
### afl-extras (fekir)
Collect data, parallel afl-tmin, startup scripts.
https://github.com/fekir/afl-extras
### afl-fuzzing-scripts (Tobias Ospelt)
Simplifies starting up multiple parallel AFL jobs.
https://github.com/floyd-fuh/afl-fuzzing-scripts/
### afl-sid (Jacek Wielemborek)
Allows users to more conveniently build and deploy AFL via Docker.
https://github.com/d33tah/afl-sid
Another Docker-related project:
https://github.com/ozzyjohnson/docker-afl
### afl-monitor (Paul S. Ziegler)
Provides more detailed and versatile statistics about your running AFL jobs.
https://github.com/reflare/afl-monitor
### FEXM (Security in Telecommunications)
Fully automated fuzzing framework, based on AFL
https://github.com/fgsect/fexm
## Crash triage, coverage analysis, and other companion tools:
### afl-crash-analyzer (Tobias Ospelt)
Makes it easier to navigate and annotate crashing test cases.
https://github.com/floyd-fuh/afl-crash-analyzer/
### Crashwalk (Ben Nagy)
AFL-aware tool to annotate and sort through crashing test cases.
https://github.com/bnagy/crashwalk
### afl-cov (Michael Rash)
Produces human-readable coverage data based on the output queue of afl-fuzz.
https://github.com/mrash/afl-cov
### afl-sancov (Bhargava Shastry)
Similar to afl-cov, but uses clang sanitizer instrumentation.
https://github.com/bshastry/afl-sancov
### RecidiVM (Jakub Wilk)
Makes it easy to estimate memory usage limits when fuzzing with ASAN or MSAN.
http://jwilk.net/software/recidivm
### aflize (Jacek Wielemborek)
Automatically build AFL-enabled versions of Debian packages.
https://github.com/d33tah/aflize
### afl-ddmin-mod (Markus Teufelberger)
A variant of afl-tmin that uses a more sophisticated (but slower)
minimization algorithm.
https://github.com/MarkusTeufelberger/afl-ddmin-mod
### afl-kit (Kuang-che Wu)
Replacements for afl-cmin and afl-tmin with additional features, such
as the ability to filter crashes based on stderr patterns.
https://github.com/kcwu/afl-kit
## Narrow-purpose or experimental:
### Cygwin support (Ali Rizvi-Santiago)
Pretty self-explanatory. As per the author, this "mostly" ports AFL to
Windows. Field reports welcome!
https://github.com/arizvisa/afl-cygwin
### Pause and resume scripts (Ben Nagy)
Simple automation to suspend and resume groups of fuzzing jobs.
https://github.com/bnagy/afl-trivia
### Static binary-only instrumentation (Aleksandar Nikolich)
Allows black-box binaries to be instrumented statically (i.e., by modifying
the binary ahead of the time, rather than translating it on the run). Author
reports better performance compared to QEMU, but occasional translation
errors with stripped binaries.
https://github.com/vanhauser-thc/afl-dyninst
### AFL PIN (Parker Thompson)
Early-stage Intel PIN instrumentation support (from before we settled on
faster-running QEMU).
https://github.com/mothran/aflpin
### AFL-style instrumentation in llvm (Kostya Serebryany)
Allows AFL-equivalent instrumentation to be injected at compiler level.
This is currently not supported by AFL as-is, but may be useful in other
projects.
https://code.google.com/p/address-sanitizer/wiki/AsanCoverage#Coverage_counters
### AFL JS (Han Choongwoo)
One-off optimizations to speed up the fuzzing of JavaScriptCore (now likely
superseded by LLVM deferred forkserver init - see README.llvm.md).
https://github.com/tunz/afl-fuzz-js
### AFL harness for fwknop (Michael Rash)
An example of a fairly involved integration with AFL.
https://github.com/mrash/fwknop/tree/master/test/afl
### Building harnesses for DNS servers (Jonathan Foote, Ron Bowes)
Two articles outlining the general principles and showing some example code.
https://www.fastly.com/blog/how-to-fuzz-server-american-fuzzy-lop
https://goo.gl/j9EgFf
### Fuzzer shell for SQLite (Richard Hipp)
A simple SQL shell designed specifically for fuzzing the underlying library.
http://www.sqlite.org/src/artifact/9e7e273da2030371
### Support for Python mutation modules (Christian Holler)
now integrated in AFL++, originally from here
https://github.com/choller/afl/blob/master/docs/mozilla/python_modules.txt
### Support for selective instrumentation (Christian Holler)
now integrated in AFL++, originally from here
https://github.com/choller/afl/blob/master/docs/mozilla/partial_instrumentation.txt
### Syzkaller (Dmitry Vyukov)
A similar guided approach as applied to fuzzing syscalls:
https://github.com/google/syzkaller/wiki/Found-Bugs
https://github.com/dvyukov/linux/commit/33787098ffaaa83b8a7ccf519913ac5fd6125931
http://events.linuxfoundation.org/sites/events/files/slides/AFL%20filesystem%20fuzzing%2C%20Vault%202016_0.pdf
### Kernel Snapshot Fuzzing using Unicornafl (Security in Telecommunications)
https://github.com/fgsect/unicorefuzz
### Android support (ele7enxxh)
Based on a somewhat dated version of AFL:
https://github.com/ele7enxxh/android-afl
### CGI wrapper (floyd)
Facilitates the testing of CGI scripts.
https://github.com/floyd-fuh/afl-cgi-wrapper
### Fuzzing difficulty estimation (Marcel Boehme)
A fork of AFL that tries to quantify the likelihood of finding additional
paths or crashes at any point in a fuzzing job.
https://github.com/mboehme/pythia

View File

@ -1,444 +0,0 @@
# Understanding the status screen
This document provides an overview of the status screen - plus tips for
troubleshooting any warnings and red text shown in the UI. See README.md for
the general instruction manual.
## A note about colors
The status screen and error messages use colors to keep things readable and
attract your attention to the most important details. For example, red almost
always means "consult this doc" :-)
Unfortunately, the UI will render correctly only if your terminal is using
traditional un*x palette (white text on black background) or something close
to that.
If you are using inverse video, you may want to change your settings, say:
- For GNOME Terminal, go to `Edit > Profile` preferences, select the "colors" tab, and from the list of built-in schemes, choose "white on black".
- For the MacOS X Terminal app, open a new window using the "Pro" scheme via the `Shell > New Window` menu (or make "Pro" your default).
Alternatively, if you really like your current colors, you can edit config.h
to comment out USE_COLORS, then do `make clean all`.
I'm not aware of any other simple way to make this work without causing
other side effects - sorry about that.
With that out of the way, let's talk about what's actually on the screen...
### The status bar
```
american fuzzy lop ++3.01a (default) [fast] {0}
```
The top line shows you which mode afl-fuzz is running in
(normal: "american fuzy lop", crash exploration mode: "peruvian rabbit mode")
and the version of AFL++.
Next to the version is the banner, which, if not set with -T by hand, will
either show the binary name being fuzzed, or the -M/-S main/secondary name for
parallel fuzzing.
Second to last is the power schedule mode being run (default: fast).
Finally, the last item is the CPU id.
### Process timing
```
+----------------------------------------------------+
| run time : 0 days, 8 hrs, 32 min, 43 sec |
| last new path : 0 days, 0 hrs, 6 min, 40 sec |
| last uniq crash : none seen yet |
| last uniq hang : 0 days, 1 hrs, 24 min, 32 sec |
+----------------------------------------------------+
```
This section is fairly self-explanatory: it tells you how long the fuzzer has
been running and how much time has elapsed since its most recent finds. This is
broken down into "paths" (a shorthand for test cases that trigger new execution
patterns), crashes, and hangs.
When it comes to timing: there is no hard rule, but most fuzzing jobs should be
expected to run for days or weeks; in fact, for a moderately complex project, the
first pass will probably take a day or so. Every now and then, some jobs
will be allowed to run for months.
There's one important thing to watch out for: if the tool is not finding new
paths within several minutes of starting, you're probably not invoking the
target binary correctly and it never gets to parse the input files we're
throwing at it; another possible explanations are that the default memory limit
(`-m`) is too restrictive, and the program exits after failing to allocate a
buffer very early on; or that the input files are patently invalid and always
fail a basic header check.
If there are no new paths showing up for a while, you will eventually see a big
red warning in this section, too :-)
### Overall results
```
+-----------------------+
| cycles done : 0 |
| total paths : 2095 |
| uniq crashes : 0 |
| uniq hangs : 19 |
+-----------------------+
```
The first field in this section gives you the count of queue passes done so far - that is, the number of times the fuzzer went over all the interesting test
cases discovered so far, fuzzed them, and looped back to the very beginning.
Every fuzzing session should be allowed to complete at least one cycle; and
ideally, should run much longer than that.
As noted earlier, the first pass can take a day or longer, so sit back and
relax.
To help make the call on when to hit `Ctrl-C`, the cycle counter is color-coded.
It is shown in magenta during the first pass, progresses to yellow if new finds
are still being made in subsequent rounds, then blue when that ends - and
finally, turns green after the fuzzer hasn't been seeing any action for a
longer while.
The remaining fields in this part of the screen should be pretty obvious:
there's the number of test cases ("paths") discovered so far, and the number of
unique faults. The test cases, crashes, and hangs can be explored in real-time
by browsing the output directory, as discussed in README.md.
### Cycle progress
```
+-------------------------------------+
| now processing : 1296 (61.86%) |
| paths timed out : 0 (0.00%) |
+-------------------------------------+
```
This box tells you how far along the fuzzer is with the current queue cycle: it
shows the ID of the test case it is currently working on, plus the number of
inputs it decided to ditch because they were persistently timing out.
The "*" suffix sometimes shown in the first line means that the currently
processed path is not "favored" (a property discussed later on).
### Map coverage
```
+--------------------------------------+
| map density : 10.15% / 29.07% |
| count coverage : 4.03 bits/tuple |
+--------------------------------------+
```
The section provides some trivia about the coverage observed by the
instrumentation embedded in the target binary.
The first line in the box tells you how many branch tuples we have already
hit, in proportion to how much the bitmap can hold. The number on the left
describes the current input; the one on the right is the value for the entire
input corpus.
Be wary of extremes:
- Absolute numbers below 200 or so suggest one of three things: that the
program is extremely simple; that it is not instrumented properly (e.g.,
due to being linked against a non-instrumented copy of the target
library); or that it is bailing out prematurely on your input test cases.
The fuzzer will try to mark this in pink, just to make you aware.
- Percentages over 70% may very rarely happen with very complex programs
that make heavy use of template-generated code.
Because high bitmap density makes it harder for the fuzzer to reliably
discern new program states, I recommend recompiling the binary with
`AFL_INST_RATIO=10` or so and trying again (see env_variables.md).
The fuzzer will flag high percentages in red. Chances are, you will never
see that unless you're fuzzing extremely hairy software (say, v8, perl,
ffmpeg).
The other line deals with the variability in tuple hit counts seen in the
binary. In essence, if every taken branch is always taken a fixed number of
times for all the inputs we have tried, this will read `1.00`. As we manage
to trigger other hit counts for every branch, the needle will start to move
toward `8.00` (every bit in the 8-bit map hit), but will probably never
reach that extreme.
Together, the values can be useful for comparing the coverage of several
different fuzzing jobs that rely on the same instrumented binary.
### Stage progress
```
+-------------------------------------+
| now trying : interest 32/8 |
| stage execs : 3996/34.4k (11.62%) |
| total execs : 27.4M |
| exec speed : 891.7/sec |
+-------------------------------------+
```
This part gives you an in-depth peek at what the fuzzer is actually doing right
now. It tells you about the current stage, which can be any of:
- calibration - a pre-fuzzing stage where the execution path is examined
to detect anomalies, establish baseline execution speed, and so on. Executed
very briefly whenever a new find is being made.
- trim L/S - another pre-fuzzing stage where the test case is trimmed to the
shortest form that still produces the same execution path. The length (L)
and stepover (S) are chosen in general relationship to file size.
- bitflip L/S - deterministic bit flips. There are L bits toggled at any given
time, walking the input file with S-bit increments. The current L/S variants
are: `1/1`, `2/1`, `4/1`, `8/8`, `16/8`, `32/8`.
- arith L/8 - deterministic arithmetics. The fuzzer tries to subtract or add
small integers to 8-, 16-, and 32-bit values. The stepover is always 8 bits.
- interest L/8 - deterministic value overwrite. The fuzzer has a list of known
"interesting" 8-, 16-, and 32-bit values to try. The stepover is 8 bits.
- extras - deterministic injection of dictionary terms. This can be shown as
"user" or "auto", depending on whether the fuzzer is using a user-supplied
dictionary (`-x`) or an auto-created one. You will also see "over" or "insert",
depending on whether the dictionary words overwrite existing data or are
inserted by offsetting the remaining data to accommodate their length.
- havoc - a sort-of-fixed-length cycle with stacked random tweaks. The
operations attempted during this stage include bit flips, overwrites with
random and "interesting" integers, block deletion, block duplication, plus
assorted dictionary-related operations (if a dictionary is supplied in the
first place).
- splice - a last-resort strategy that kicks in after the first full queue
cycle with no new paths. It is equivalent to 'havoc', except that it first
splices together two random inputs from the queue at some arbitrarily
selected midpoint.
- sync - a stage used only when `-M` or `-S` is set (see parallel_fuzzing.md).
No real fuzzing is involved, but the tool scans the output from other
fuzzers and imports test cases as necessary. The first time this is done,
it may take several minutes or so.
The remaining fields should be fairly self-evident: there's the exec count
progress indicator for the current stage, a global exec counter, and a
benchmark for the current program execution speed. This may fluctuate from
one test case to another, but the benchmark should be ideally over 500 execs/sec
most of the time - and if it stays below 100, the job will probably take very
long.
The fuzzer will explicitly warn you about slow targets, too. If this happens,
see the [perf_tips.md](perf_tips.md) file included with the fuzzer for ideas on how to speed
things up.
### Findings in depth
```
+--------------------------------------+
| favored paths : 879 (41.96%) |
| new edges on : 423 (20.19%) |
| total crashes : 0 (0 unique) |
| total tmouts : 24 (19 unique) |
+--------------------------------------+
```
This gives you several metrics that are of interest mostly to complete nerds.
The section includes the number of paths that the fuzzer likes the most based
on a minimization algorithm baked into the code (these will get considerably
more air time), and the number of test cases that actually resulted in better
edge coverage (versus just pushing the branch hit counters up). There are also
additional, more detailed counters for crashes and timeouts.
Note that the timeout counter is somewhat different from the hang counter; this
one includes all test cases that exceeded the timeout, even if they did not
exceed it by a margin sufficient to be classified as hangs.
### Fuzzing strategy yields
```
+-----------------------------------------------------+
| bit flips : 57/289k, 18/289k, 18/288k |
| byte flips : 0/36.2k, 4/35.7k, 7/34.6k |
| arithmetics : 53/2.54M, 0/537k, 0/55.2k |
| known ints : 8/322k, 12/1.32M, 10/1.70M |
| dictionary : 9/52k, 1/53k, 1/24k |
|havoc/splice : 1903/20.0M, 0/0 |
|py/custom/rq : unused, 53/2.54M, unused |
| trim/eff : 20.31%/9201, 17.05% |
+-----------------------------------------------------+
```
This is just another nerd-targeted section keeping track of how many paths we
have netted, in proportion to the number of execs attempted, for each of the
fuzzing strategies discussed earlier on. This serves to convincingly validate
assumptions about the usefulness of the various approaches taken by afl-fuzz.
The trim strategy stats in this section are a bit different than the rest.
The first number in this line shows the ratio of bytes removed from the input
files; the second one corresponds to the number of execs needed to achieve this
goal. Finally, the third number shows the proportion of bytes that, although
not possible to remove, were deemed to have no effect and were excluded from
some of the more expensive deterministic fuzzing steps.
Note that when deterministic mutation mode is off (which is the default
because it is not very efficient) the first five lines display
"disabled (default, enable with -D)".
Only what is activated will have counter shown.
### Path geometry
```
+---------------------+
| levels : 5 |
| pending : 1570 |
| pend fav : 583 |
| own finds : 0 |
| imported : 0 |
| stability : 100.00% |
+---------------------+
```
The first field in this section tracks the path depth reached through the
guided fuzzing process. In essence: the initial test cases supplied by the
user are considered "level 1". The test cases that can be derived from that
through traditional fuzzing are considered "level 2"; the ones derived by
using these as inputs to subsequent fuzzing rounds are "level 3"; and so forth.
The maximum depth is therefore a rough proxy for how much value you're getting
out of the instrumentation-guided approach taken by afl-fuzz.
The next field shows you the number of inputs that have not gone through any
fuzzing yet. The same stat is also given for "favored" entries that the fuzzer
really wants to get to in this queue cycle (the non-favored entries may have to
wait a couple of cycles to get their chance).
Next, we have the number of new paths found during this fuzzing section and
imported from other fuzzer instances when doing parallelized fuzzing; and the
extent to which identical inputs appear to sometimes produce variable behavior
in the tested binary.
That last bit is actually fairly interesting: it measures the consistency of
observed traces. If a program always behaves the same for the same input data,
it will earn a score of 100%. When the value is lower but still shown in purple,
the fuzzing process is unlikely to be negatively affected. If it goes into red,
you may be in trouble, since AFL will have difficulty discerning between
meaningful and "phantom" effects of tweaking the input file.
Now, most targets will just get a 100% score, but when you see lower figures,
there are several things to look at:
- The use of uninitialized memory in conjunction with some intrinsic sources
of entropy in the tested binary. Harmless to AFL, but could be indicative
of a security bug.
- Attempts to manipulate persistent resources, such as left over temporary
files or shared memory objects. This is usually harmless, but you may want
to double-check to make sure the program isn't bailing out prematurely.
Running out of disk space, SHM handles, or other global resources can
trigger this, too.
- Hitting some functionality that is actually designed to behave randomly.
Generally harmless. For example, when fuzzing sqlite, an input like
`select random();` will trigger a variable execution path.
- Multiple threads executing at once in semi-random order. This is harmless
when the 'stability' metric stays over 90% or so, but can become an issue
if not. Here's what to try:
* Use afl-clang-fast from [instrumentation](../instrumentation/) - it uses a thread-local tracking
model that is less prone to concurrency issues,
* See if the target can be compiled or run without threads. Common
`./configure` options include `--without-threads`, `--disable-pthreads`, or
`--disable-openmp`.
* Replace pthreads with GNU Pth (https://www.gnu.org/software/pth/), which
allows you to use a deterministic scheduler.
- In persistent mode, minor drops in the "stability" metric can be normal,
because not all the code behaves identically when re-entered; but major
dips may signify that the code within `__AFL_LOOP()` is not behaving
correctly on subsequent iterations (e.g., due to incomplete clean-up or
reinitialization of the state) and that most of the fuzzing effort goes
to waste.
The paths where variable behavior is detected are marked with a matching entry
in the `<out_dir>/queue/.state/variable_behavior/` directory, so you can look
them up easily.
### CPU load
```
[cpu: 25%]
```
This tiny widget shows the apparent CPU utilization on the local system. It is
calculated by taking the number of processes in the "runnable" state, and then
comparing it to the number of logical cores on the system.
If the value is shown in green, you are using fewer CPU cores than available on
your system and can probably parallelize to improve performance; for tips on
how to do that, see parallel_fuzzing.md.
If the value is shown in red, your CPU is *possibly* oversubscribed, and
running additional fuzzers may not give you any benefits.
Of course, this benchmark is very simplistic; it tells you how many processes
are ready to run, but not how resource-hungry they may be. It also doesn't
distinguish between physical cores, logical cores, and virtualized CPUs; the
performance characteristics of each of these will differ quite a bit.
If you want a more accurate measurement, you can run the `afl-gotcpu` utility from the command line.
### Addendum: status and plot files
For unattended operation, some of the key status screen information can be also
found in a machine-readable format in the fuzzer_stats file in the output
directory. This includes:
- `start_time` - unix time indicating the start time of afl-fuzz
- `last_update` - unix time corresponding to the last update of this file
- `run_time` - run time in seconds to the last update of this file
- `fuzzer_pid` - PID of the fuzzer process
- `cycles_done` - queue cycles completed so far
- `cycles_wo_finds` - number of cycles without any new paths found
- `execs_done` - number of execve() calls attempted
- `execs_per_sec` - overall number of execs per second
- `paths_total` - total number of entries in the queue
- `paths_favored` - number of queue entries that are favored
- `paths_found` - number of entries discovered through local fuzzing
- `paths_imported` - number of entries imported from other instances
- `max_depth` - number of levels in the generated data set
- `cur_path` - currently processed entry number
- `pending_favs` - number of favored entries still waiting to be fuzzed
- `pending_total` - number of all entries waiting to be fuzzed
- `variable_paths` - number of test cases showing variable behavior
- `stability` - percentage of bitmap bytes that behave consistently
- `bitmap_cvg` - percentage of edge coverage found in the map so far
- `unique_crashes` - number of unique crashes recorded
- `unique_hangs` - number of unique hangs encountered
- `last_path` - seconds since the last path was found
- `last_crash` - seconds since the last crash was found
- `last_hang` - seconds since the last hang was found
- `execs_since_crash` - execs since the last crash was found
- `exec_timeout` - the -t command line value
- `slowest_exec_ms` - real time of the slowest execution in ms
- `peak_rss_mb` - max rss usage reached during fuzzing in MB
- `edges_found` - how many edges have been found
- `var_byte_count` - how many edges are non-deterministic
- `afl_banner` - banner text (e.g. the target name)
- `afl_version` - the version of AFL used
- `target_mode` - default, persistent, qemu, unicorn, non-instrumented
- `command_line` - full command line used for the fuzzing session
Most of these map directly to the UI elements discussed earlier on.
On top of that, you can also find an entry called `plot_data`, containing a
plottable history for most of these fields. If you have gnuplot installed, you
can turn this into a nice progress report with the included `afl-plot` tool.
### Addendum: Automatically send metrics with StatsD
In a CI environment or when running multiple fuzzers, it can be tedious to
log into each of them or deploy scripts to read the fuzzer statistics.
Using `AFL_STATSD` (and the other related environment variables `AFL_STATSD_HOST`,
`AFL_STATSD_PORT`, `AFL_STATSD_TAGS_FLAVOR`) you can automatically send metrics
to your favorite StatsD server. Depending on your StatsD server you will be able
to monitor, trigger alerts or perform actions based on these metrics (e.g: alert on
slow exec/s for a new build, threshold of crashes, time since last crash > X, etc).
The selected metrics are a subset of all the metrics found in the status and in
the plot file. The list is the following: `cycle_done`, `cycles_wo_finds`,
`execs_done`,`execs_per_sec`, `paths_total`, `paths_favored`, `paths_found`,
`paths_imported`, `max_depth`, `cur_path`, `pending_favs`, `pending_total`,
`variable_paths`, `unique_crashes`, `unique_hangs`, `total_crashes`,
`slowest_exec_ms`, `edges_found`, `var_byte_count`, `havoc_expansion`.
Their definitions can be found in the addendum above.
When using multiple fuzzer instances with StatsD it is *strongly* recommended to setup
the flavor (AFL_STATSD_TAGS_FLAVOR) to match your StatsD server. This will allow you
to see individual fuzzer performance, detect bad ones, see the progress of each
strategy...

View File

@ -1,550 +0,0 @@
# Technical "whitepaper" for afl-fuzz
NOTE: this document is rather outdated!
This document provides a quick overview of the guts of American Fuzzy Lop.
See README.md for the general instruction manual; and for a discussion of
motivations and design goals behind AFL, see historical_notes.md.
## 0. Design statement
American Fuzzy Lop does its best not to focus on any singular principle of
operation and not be a proof-of-concept for any specific theory. The tool can
be thought of as a collection of hacks that have been tested in practice,
found to be surprisingly effective, and have been implemented in the simplest,
most robust way I could think of at the time.
Many of the resulting features are made possible thanks to the availability of
lightweight instrumentation that served as a foundation for the tool, but this
mechanism should be thought of merely as a means to an end. The only true
governing principles are speed, reliability, and ease of use.
## 1. Coverage measurements
The instrumentation injected into compiled programs captures branch (edge)
coverage, along with coarse branch-taken hit counts. The code injected at
branch points is essentially equivalent to:
```c
cur_location = <COMPILE_TIME_RANDOM>;
shared_mem[cur_location ^ prev_location]++;
prev_location = cur_location >> 1;
```
The `cur_location` value is generated randomly to simplify the process of
linking complex projects and keep the XOR output distributed uniformly.
The `shared_mem[]` array is a 64 kB SHM region passed to the instrumented binary
by the caller. Every byte set in the output map can be thought of as a hit for
a particular (`branch_src`, `branch_dst`) tuple in the instrumented code.
The size of the map is chosen so that collisions are sporadic with almost all
of the intended targets, which usually sport between 2k and 10k discoverable
branch points:
```
Branch cnt | Colliding tuples | Example targets
------------+------------------+-----------------
1,000 | 0.75% | giflib, lzo
2,000 | 1.5% | zlib, tar, xz
5,000 | 3.5% | libpng, libwebp
10,000 | 7% | libxml
20,000 | 14% | sqlite
50,000 | 30% | -
```
At the same time, its size is small enough to allow the map to be analyzed
in a matter of microseconds on the receiving end, and to effortlessly fit
within L2 cache.
This form of coverage provides considerably more insight into the execution
path of the program than simple block coverage. In particular, it trivially
distinguishes between the following execution traces:
```
A -> B -> C -> D -> E (tuples: AB, BC, CD, DE)
A -> B -> D -> C -> E (tuples: AB, BD, DC, CE)
```
This aids the discovery of subtle fault conditions in the underlying code,
because security vulnerabilities are more often associated with unexpected
or incorrect state transitions than with merely reaching a new basic block.
The reason for the shift operation in the last line of the pseudocode shown
earlier in this section is to preserve the directionality of tuples (without
this, A ^ B would be indistinguishable from B ^ A) and to retain the identity
of tight loops (otherwise, A ^ A would be obviously equal to B ^ B).
The absence of simple saturating arithmetic opcodes on Intel CPUs means that
the hit counters can sometimes wrap around to zero. Since this is a fairly
unlikely and localized event, it's seen as an acceptable performance trade-off.
### 2. Detecting new behaviors
The fuzzer maintains a global map of tuples seen in previous executions; this
data can be rapidly compared with individual traces and updated in just a couple
of dword- or qword-wide instructions and a simple loop.
When a mutated input produces an execution trace containing new tuples, the
corresponding input file is preserved and routed for additional processing
later on (see section #3). Inputs that do not trigger new local-scale state
transitions in the execution trace (i.e., produce no new tuples) are discarded,
even if their overall control flow sequence is unique.
This approach allows for a very fine-grained and long-term exploration of
program state while not having to perform any computationally intensive and
fragile global comparisons of complex execution traces, and while avoiding the
scourge of path explosion.
To illustrate the properties of the algorithm, consider that the second trace
shown below would be considered substantially new because of the presence of
new tuples (CA, AE):
```
#1: A -> B -> C -> D -> E
#2: A -> B -> C -> A -> E
```
At the same time, with #2 processed, the following pattern will not be seen
as unique, despite having a markedly different overall execution path:
```
#3: A -> B -> C -> A -> B -> C -> A -> B -> C -> D -> E
```
In addition to detecting new tuples, the fuzzer also considers coarse tuple
hit counts. These are divided into several buckets:
```
1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+
```
To some extent, the number of buckets is an implementation artifact: it allows
an in-place mapping of an 8-bit counter generated by the instrumentation to
an 8-position bitmap relied on by the fuzzer executable to keep track of the
already-seen execution counts for each tuple.
Changes within the range of a single bucket are ignored; transition from one
bucket to another is flagged as an interesting change in program control flow,
and is routed to the evolutionary process outlined in the section below.
The hit count behavior provides a way to distinguish between potentially
interesting control flow changes, such as a block of code being executed
twice when it was normally hit only once. At the same time, it is fairly
insensitive to empirically less notable changes, such as a loop going from
47 cycles to 48. The counters also provide some degree of "accidental"
immunity against tuple collisions in dense trace maps.
The execution is policed fairly heavily through memory and execution time
limits; by default, the timeout is set at 5x the initially-calibrated
execution speed, rounded up to 20 ms. The aggressive timeouts are meant to
prevent dramatic fuzzer performance degradation by descending into tarpits
that, say, improve coverage by 1% while being 100x slower; we pragmatically
reject them and hope that the fuzzer will find a less expensive way to reach
the same code. Empirical testing strongly suggests that more generous time
limits are not worth the cost.
## 3. Evolving the input queue
Mutated test cases that produced new state transitions within the program are
added to the input queue and used as a starting point for future rounds of
fuzzing. They supplement, but do not automatically replace, existing finds.
In contrast to more greedy genetic algorithms, this approach allows the tool
to progressively explore various disjoint and possibly mutually incompatible
features of the underlying data format, as shown in this image:
![gzip_coverage](./resources/afl_gzip.png)
Several practical examples of the results of this algorithm are discussed
here:
http://lcamtuf.blogspot.com/2014/11/pulling-jpegs-out-of-thin-air.html
http://lcamtuf.blogspot.com/2014/11/afl-fuzz-nobody-expects-cdata-sections.html
The synthetic corpus produced by this process is essentially a compact
collection of "hmm, this does something new!" input files, and can be used to
seed any other testing processes down the line (for example, to manually
stress-test resource-intensive desktop apps).
With this approach, the queue for most targets grows to somewhere between 1k
and 10k entries; approximately 10-30% of this is attributable to the discovery
of new tuples, and the remainder is associated with changes in hit counts.
The following table compares the relative ability to discover file syntax and
explore program states when using several different approaches to guided
fuzzing. The instrumented target was GNU patch 2.7k.3 compiled with `-O3` and
seeded with a dummy text file; the session consisted of a single pass over the
input queue with afl-fuzz:
```
Fuzzer guidance | Blocks | Edges | Edge hit | Highest-coverage
strategy used | reached | reached | cnt var | test case generated
------------------+---------+---------+----------+---------------------------
(Initial file) | 156 | 163 | 1.00 | (none)
| | | |
Blind fuzzing S | 182 | 205 | 2.23 | First 2 B of RCS diff
Blind fuzzing L | 228 | 265 | 2.23 | First 4 B of -c mode diff
Block coverage | 855 | 1,130 | 1.57 | Almost-valid RCS diff
Edge coverage | 1,452 | 2,070 | 2.18 | One-chunk -c mode diff
AFL model | 1,765 | 2,597 | 4.99 | Four-chunk -c mode diff
```
The first entry for blind fuzzing ("S") corresponds to executing just a single
round of testing; the second set of figures ("L") shows the fuzzer running in a
loop for a number of execution cycles comparable with that of the instrumented
runs, which required more time to fully process the growing queue.
Roughly similar results have been obtained in a separate experiment where the
fuzzer was modified to compile out all the random fuzzing stages and leave just
a series of rudimentary, sequential operations such as walking bit flips.
Because this mode would be incapable of altering the size of the input file,
the sessions were seeded with a valid unified diff:
```
Queue extension | Blocks | Edges | Edge hit | Number of unique
strategy used | reached | reached | cnt var | crashes found
------------------+---------+---------+----------+------------------
(Initial file) | 624 | 717 | 1.00 | -
| | | |
Blind fuzzing | 1,101 | 1,409 | 1.60 | 0
Block coverage | 1,255 | 1,649 | 1.48 | 0
Edge coverage | 1,259 | 1,734 | 1.72 | 0
AFL model | 1,452 | 2,040 | 3.16 | 1
```
At noted earlier on, some of the prior work on genetic fuzzing relied on
maintaining a single test case and evolving it to maximize coverage. At least
in the tests described above, this "greedy" approach appears to confer no
substantial benefits over blind fuzzing strategies.
### 4. Culling the corpus
The progressive state exploration approach outlined above means that some of
the test cases synthesized later on in the game may have edge coverage that
is a strict superset of the coverage provided by their ancestors.
To optimize the fuzzing effort, AFL periodically re-evaluates the queue using a
fast algorithm that selects a smaller subset of test cases that still cover
every tuple seen so far, and whose characteristics make them particularly
favorable to the tool.
The algorithm works by assigning every queue entry a score proportional to its
execution latency and file size; and then selecting lowest-scoring candidates
for each tuple.
The tuples are then processed sequentially using a simple workflow:
1) Find next tuple not yet in the temporary working set,
2) Locate the winning queue entry for this tuple,
3) Register *all* tuples present in that entry's trace in the working set,
4) Go to #1 if there are any missing tuples in the set.
The generated corpus of "favored" entries is usually 5-10x smaller than the
starting data set. Non-favored entries are not discarded, but they are skipped
with varying probabilities when encountered in the queue:
- If there are new, yet-to-be-fuzzed favorites present in the queue, 99%
of non-favored entries will be skipped to get to the favored ones.
- If there are no new favorites:
* If the current non-favored entry was fuzzed before, it will be skipped
95% of the time.
* If it hasn't gone through any fuzzing rounds yet, the odds of skipping
drop down to 75%.
Based on empirical testing, this provides a reasonable balance between queue
cycling speed and test case diversity.
Slightly more sophisticated but much slower culling can be performed on input
or output corpora with `afl-cmin`. This tool permanently discards the redundant
entries and produces a smaller corpus suitable for use with `afl-fuzz` or
external tools.
## 5. Trimming input files
File size has a dramatic impact on fuzzing performance, both because large
files make the target binary slower, and because they reduce the likelihood
that a mutation would touch important format control structures, rather than
redundant data blocks. This is discussed in more detail in perf_tips.md.
The possibility that the user will provide a low-quality starting corpus aside,
some types of mutations can have the effect of iteratively increasing the size
of the generated files, so it is important to counter this trend.
Luckily, the instrumentation feedback provides a simple way to automatically
trim down input files while ensuring that the changes made to the files have no
impact on the execution path.
The built-in trimmer in afl-fuzz attempts to sequentially remove blocks of data
with variable length and stepover; any deletion that doesn't affect the checksum
of the trace map is committed to disk. The trimmer is not designed to be
particularly thorough; instead, it tries to strike a balance between precision
and the number of `execve()` calls spent on the process, selecting the block size
and stepover to match. The average per-file gains are around 5-20%.
The standalone `afl-tmin` tool uses a more exhaustive, iterative algorithm, and
also attempts to perform alphabet normalization on the trimmed files. The
operation of `afl-tmin` is as follows.
First, the tool automatically selects the operating mode. If the initial input
crashes the target binary, afl-tmin will run in non-instrumented mode, simply
keeping any tweaks that produce a simpler file but still crash the target.
The same mode is used for hangs, if `-H` (hang mode) is specified.
If the target is non-crashing, the tool uses an instrumented mode and keeps only
the tweaks that produce exactly the same execution path.
The actual minimization algorithm is:
1) Attempt to zero large blocks of data with large stepovers. Empirically,
this is shown to reduce the number of execs by preempting finer-grained
efforts later on.
2) Perform a block deletion pass with decreasing block sizes and stepovers,
binary-search-style.
3) Perform alphabet normalization by counting unique characters and trying
to bulk-replace each with a zero value.
4) As a last result, perform byte-by-byte normalization on non-zero bytes.
Instead of zeroing with a 0x00 byte, `afl-tmin` uses the ASCII digit '0'. This
is done because such a modification is much less likely to interfere with
text parsing, so it is more likely to result in successful minimization of
text files.
The algorithm used here is less involved than some other test case
minimization approaches proposed in academic work, but requires far fewer
executions and tends to produce comparable results in most real-world
applications.
## 6. Fuzzing strategies
The feedback provided by the instrumentation makes it easy to understand the
value of various fuzzing strategies and optimize their parameters so that they
work equally well across a wide range of file types. The strategies used by
afl-fuzz are generally format-agnostic and are discussed in more detail here:
http://lcamtuf.blogspot.com/2014/08/binary-fuzzing-strategies-what-works.html
It is somewhat notable that especially early on, most of the work done by
`afl-fuzz` is actually highly deterministic, and progresses to random stacked
modifications and test case splicing only at a later stage. The deterministic
strategies include:
- Sequential bit flips with varying lengths and stepovers,
- Sequential addition and subtraction of small integers,
- Sequential insertion of known interesting integers (`0`, `1`, `INT_MAX`, etc),
The purpose of opening with deterministic steps is related to their tendency to
produce compact test cases and small diffs between the non-crashing and crashing
inputs.
With deterministic fuzzing out of the way, the non-deterministic steps include
stacked bit flips, insertions, deletions, arithmetics, and splicing of different
test cases.
The relative yields and `execve()` costs of all these strategies have been
investigated and are discussed in the aforementioned blog post.
For the reasons discussed in historical_notes.md (chiefly, performance,
simplicity, and reliability), AFL generally does not try to reason about the
relationship between specific mutations and program states; the fuzzing steps
are nominally blind, and are guided only by the evolutionary design of the
input queue.
That said, there is one (trivial) exception to this rule: when a new queue
entry goes through the initial set of deterministic fuzzing steps, and tweaks to
some regions in the file are observed to have no effect on the checksum of the
execution path, they may be excluded from the remaining phases of
deterministic fuzzing - and the fuzzer may proceed straight to random tweaks.
Especially for verbose, human-readable data formats, this can reduce the number
of execs by 10-40% or so without an appreciable drop in coverage. In extreme
cases, such as normally block-aligned tar archives, the gains can be as high as
90%.
Because the underlying "effector maps" are local every queue entry and remain
in force only during deterministic stages that do not alter the size or the
general layout of the underlying file, this mechanism appears to work very
reliably and proved to be simple to implement.
## 7. Dictionaries
The feedback provided by the instrumentation makes it easy to automatically
identify syntax tokens in some types of input files, and to detect that certain
combinations of predefined or auto-detected dictionary terms constitute a
valid grammar for the tested parser.
A discussion of how these features are implemented within afl-fuzz can be found
here:
http://lcamtuf.blogspot.com/2015/01/afl-fuzz-making-up-grammar-with.html
In essence, when basic, typically easily-obtained syntax tokens are combined
together in a purely random manner, the instrumentation and the evolutionary
design of the queue together provide a feedback mechanism to differentiate
between meaningless mutations and ones that trigger new behaviors in the
instrumented code - and to incrementally build more complex syntax on top of
this discovery.
The dictionaries have been shown to enable the fuzzer to rapidly reconstruct
the grammar of highly verbose and complex languages such as JavaScript, SQL,
or XML; several examples of generated SQL statements are given in the blog
post mentioned above.
Interestingly, the AFL instrumentation also allows the fuzzer to automatically
isolate syntax tokens already present in an input file. It can do so by looking
for run of bytes that, when flipped, produce a consistent change to the
program's execution path; this is suggestive of an underlying atomic comparison
to a predefined value baked into the code. The fuzzer relies on this signal
to build compact "auto dictionaries" that are then used in conjunction with
other fuzzing strategies.
## 8. De-duping crashes
De-duplication of crashes is one of the more important problems for any
competent fuzzing tool. Many of the naive approaches run into problems; in
particular, looking just at the faulting address may lead to completely
unrelated issues being clustered together if the fault happens in a common
library function (say, `strcmp`, `strcpy`); while checksumming call stack
backtraces can lead to extreme crash count inflation if the fault can be
reached through a number of different, possibly recursive code paths.
The solution implemented in `afl-fuzz` considers a crash unique if any of two
conditions are met:
- The crash trace includes a tuple not seen in any of the previous crashes,
- The crash trace is missing a tuple that was always present in earlier
faults.
The approach is vulnerable to some path count inflation early on, but exhibits
a very strong self-limiting effect, similar to the execution path analysis
logic that is the cornerstone of `afl-fuzz`.
## 9. Investigating crashes
The exploitability of many types of crashes can be ambiguous; afl-fuzz tries
to address this by providing a crash exploration mode where a known-faulting
test case is fuzzed in a manner very similar to the normal operation of the
fuzzer, but with a constraint that causes any non-crashing mutations to be
thrown away.
A detailed discussion of the value of this approach can be found here:
http://lcamtuf.blogspot.com/2014/11/afl-fuzz-crash-exploration-mode.html
The method uses instrumentation feedback to explore the state of the crashing
program to get past the ambiguous faulting condition and then isolate the
newly-found inputs for human review.
On the subject of crashes, it is worth noting that in contrast to normal
queue entries, crashing inputs are *not* trimmed; they are kept exactly as
discovered to make it easier to compare them to the parent, non-crashing entry
in the queue. That said, `afl-tmin` can be used to shrink them at will.
## 10 The fork server
To improve performance, `afl-fuzz` uses a "fork server", where the fuzzed process
goes through `execve()`, linking, and libc initialization only once, and is then
cloned from a stopped process image by leveraging copy-on-write. The
implementation is described in more detail here:
http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
The fork server is an integral aspect of the injected instrumentation and
simply stops at the first instrumented function to await commands from
`afl-fuzz`.
With fast targets, the fork server can offer considerable performance gains,
usually between 1.5x and 2x. It is also possible to:
- Use the fork server in manual ("deferred") mode, skipping over larger,
user-selected chunks of initialization code. It requires very modest
code changes to the targeted program, and With some targets, can
produce 10x+ performance gains.
- Enable "persistent" mode, where a single process is used to try out
multiple inputs, greatly limiting the overhead of repetitive `fork()`
calls. This generally requires some code changes to the targeted program,
but can improve the performance of fast targets by a factor of 5 or more - approximating the benefits of in-process fuzzing jobs while still
maintaining very robust isolation between the fuzzer process and the
targeted binary.
## 11. Parallelization
The parallelization mechanism relies on periodically examining the queues
produced by independently-running instances on other CPU cores or on remote
machines, and then selectively pulling in the test cases that, when tried
out locally, produce behaviors not yet seen by the fuzzer at hand.
This allows for extreme flexibility in fuzzer setup, including running synced
instances against different parsers of a common data format, often with
synergistic effects.
For more information about this design, see parallel_fuzzing.md.
## 12. Binary-only instrumentation
Instrumentation of black-box, binary-only targets is accomplished with the
help of a separately-built version of QEMU in "user emulation" mode. This also
allows the execution of cross-architecture code - say, ARM binaries on x86.
QEMU uses basic blocks as translation units; the instrumentation is implemented
on top of this and uses a model roughly analogous to the compile-time hooks:
```c
if (block_address > elf_text_start && block_address < elf_text_end) {
cur_location = (block_address >> 4) ^ (block_address << 8);
shared_mem[cur_location ^ prev_location]++;
prev_location = cur_location >> 1;
}
```
The shift-and-XOR-based scrambling in the second line is used to mask the
effects of instruction alignment.
The start-up of binary translators such as QEMU, DynamoRIO, and PIN is fairly
slow; to counter this, the QEMU mode leverages a fork server similar to that
used for compiler-instrumented code, effectively spawning copies of an
already-initialized process paused at `_start`.
First-time translation of a new basic block also incurs substantial latency. To
eliminate this problem, the AFL fork server is extended by providing a channel
between the running emulator and the parent process. The channel is used
to notify the parent about the addresses of any newly-encountered blocks and to
add them to the translation cache that will be replicated for future child
processes.
As a result of these two optimizations, the overhead of the QEMU mode is
roughly 2-5x, compared to 100x+ for PIN.
## 13. The `afl-analyze` tool
The file format analyzer is a simple extension of the minimization algorithm
discussed earlier on; instead of attempting to remove no-op blocks, the tool
performs a series of walking byte flips and then annotates runs of bytes
in the input file.
It uses the following classification scheme:
- "No-op blocks" - segments where bit flips cause no apparent changes to
control flow. Common examples may be comment sections, pixel data within
a bitmap file, etc.
- "Superficial content" - segments where some, but not all, bitflips
produce some control flow changes. Examples may include strings in rich
documents (e.g., XML, RTF).
- "Critical stream" - a sequence of bytes where all bit flips alter control
flow in different but correlated ways. This may be compressed data,
non-atomically compared keywords or magic values, etc.
- "Suspected length field" - small, atomic integer that, when touched in
any way, causes a consistent change to program control flow, suggestive
of a failed length check.
- "Suspected cksum or magic int" - an integer that behaves similarly to a
length field, but has a numerical value that makes the length explanation
unlikely. This is suggestive of a checksum or other "magic" integer.
- "Suspected checksummed block" - a long block of data where any change
always triggers the same new execution path. Likely caused by failing
a checksum or a similar integrity check before any subsequent parsing
takes place.
- "Magic value section" - a generic token where changes cause the type
of binary behavior outlined earlier, but that doesn't meet any of the
other criteria. May be an atomically compared keyword or so.

57
docs/third_party_tools.md Normal file
View File

@ -0,0 +1,57 @@
# Tools that help fuzzing with AFL++
Speeding up fuzzing:
* [libfiowrapper](https://github.com/marekzmyslowski/libfiowrapper) - if the
function you want to fuzz requires loading a file, this allows using the
shared memory test case feature :-) - recommended.
Minimization of test cases:
* [afl-pytmin](https://github.com/ilsani/afl-pytmin) - a wrapper for afl-tmin
that tries to speed up the process of minimization of a single test case by
using many CPU cores.
* [afl-ddmin-mod](https://github.com/MarkusTeufelberger/afl-ddmin-mod) - a
variation of afl-tmin based on the ddmin algorithm.
* [halfempty](https://github.com/googleprojectzero/halfempty) - is a fast
utility for minimizing test cases by Tavis Ormandy based on parallelization.
Distributed execution:
* [disfuzz-afl](https://github.com/MartijnB/disfuzz-afl) - distributed fuzzing
for AFL.
* [AFLDFF](https://github.com/quantumvm/AFLDFF) - AFL distributed fuzzing
framework.
* [afl-launch](https://github.com/bnagy/afl-launch) - a tool for the execution
of many AFL instances.
* [afl-mothership](https://github.com/afl-mothership/afl-mothership) -
management and execution of many synchronized AFL fuzzers on AWS cloud.
* [afl-in-the-cloud](https://github.com/abhisek/afl-in-the-cloud) - another
script for running AFL in AWS.
Deployment, management, monitoring, reporting
* [afl-utils](https://gitlab.com/rc0r/afl-utils) - a set of utilities for
automatic processing/analysis of crashes and reducing the number of test
cases.
* [afl-other-arch](https://github.com/shellphish/afl-other-arch) - is a set of
patches and scripts for easily adding support for various non-x86
architectures for AFL.
* [afl-trivia](https://github.com/bnagy/afl-trivia) - a few small scripts to
simplify the management of AFL.
* [afl-monitor](https://github.com/reflare/afl-monitor) - a script for
monitoring AFL.
* [afl-manager](https://github.com/zx1340/afl-manager) - a web server on Python
for managing multi-afl.
* [afl-remote](https://github.com/block8437/afl-remote) - a web server for the
remote management of AFL instances.
* [afl-extras](https://github.com/fekir/afl-extras) - shell scripts to
parallelize afl-tmin, startup, and data collection.
Crash processing
* [afl-crash-analyzer](https://github.com/floyd-fuh/afl-crash-analyzer) -
another crash analyzer for AFL.
* [fuzzer-utils](https://github.com/ThePatrickStar/fuzzer-utils) - a set of
scripts for the analysis of results.
* [atriage](https://github.com/Ayrx/atriage) - a simple triage tool.
* [afl-kit](https://github.com/kcwu/afl-kit) - afl-cmin on Python.
* [AFLize](https://github.com/d33tah/aflize) - a tool that automatically
generates builds of debian packages suitable for AFL.
* [afl-fid](https://github.com/FoRTE-Research/afl-fid) - a set of tools for
working with input data.

30
docs/tutorials.md Normal file
View File

@ -0,0 +1,30 @@
# Tutorials
Here are some good write-ups to show how to effectively use AFL++:
* [https://aflplus.plus/docs/tutorials/libxml2_tutorial/](https://aflplus.plus/docs/tutorials/libxml2_tutorial/)
* [https://bananamafia.dev/post/gb-fuzz/](https://bananamafia.dev/post/gb-fuzz/)
* [https://securitylab.github.com/research/fuzzing-challenges-solutions-1](https://securitylab.github.com/research/fuzzing-challenges-solutions-1)
* [https://securitylab.github.com/research/fuzzing-software-2](https://securitylab.github.com/research/fuzzing-software-2)
* [https://securitylab.github.com/research/fuzzing-sockets-FTP](https://securitylab.github.com/research/fuzzing-sockets-FTP)
* [https://securitylab.github.com/research/fuzzing-sockets-FreeRDP](https://securitylab.github.com/research/fuzzing-sockets-FreeRDP)
* [https://securitylab.github.com/research/fuzzing-apache-1](https://securitylab.github.com/research/fuzzing-apache-1)
If you do not want to follow a tutorial but rather try an exercise type of
training, then we can highly recommend the following:
* [https://github.com/antonio-morales/Fuzzing101](https://github.com/antonio-morales/Fuzzing101)
If you are interested in fuzzing structured data (where you define what the
structure is), these links have you covered:
* Superion for AFL++:
[https://github.com/adrian-rt/superion-mutator](https://github.com/adrian-rt/superion-mutator)
* libprotobuf for AFL++:
[https://github.com/P1umer/AFLplusplus-protobuf-mutator](https://github.com/P1umer/AFLplusplus-protobuf-mutator)
* libprotobuf raw:
[https://github.com/bruce30262/libprotobuf-mutator_fuzzing_learning/tree/master/4_libprotobuf_aflpp_custom_mutator](https://github.com/bruce30262/libprotobuf-mutator_fuzzing_learning/tree/master/4_libprotobuf_aflpp_custom_mutator)
* libprotobuf for old AFL++ API:
[https://github.com/thebabush/afl-libprotobuf-mutator](https://github.com/thebabush/afl-libprotobuf-mutator)
If you find other good ones, please send them to us :-)

View File

@ -1,6 +1,8 @@
# Debugging
If you are using FRIDA mode and have hit some problems, then this guide may help
you to diagnose any problems you are encountering. This assumes you have
followed the [osx-lib](#test/osx-lib) example to start fuzzing your target.
followed the [osx-lib](test/osx-lib) example to start fuzzing your target.
It should be noted that attempting to debug code using gdb which has been
instrumented in FRIDA is unlikely to be successful since the debugger will be
@ -10,69 +12,76 @@ you are very familiar with the implementation of Stalker, the instrumented code
generated by FRIDA is likely to be very difficult to follow. For this reason,
the following debugging strategies are outlined below.
By convention below all files should be provided with their path (they are
By convention, all files below should be provided with their path (they are
omitted for readability) and all items in `<braces>` are placeholders and should
be replaced accordingly.
# Select your version
## Select your version
Test with both the `dev` and `stable` branches of AFL++. The `dev` branch should
have the very latest version containing any fixes for identified issues. The
`stable` branch is updated less frequently, but equally might avoid a problem if
a regression has been introduced into the `dev` branch.
# Enable Diagnostic Information
- Run your target specifying the `AFL_DEBUG_CHILD=1` environment variable. This
will print a lot more diagnostic information to the screen when the target
starts up. If you have a simple configuration issue then you will likely see a
warning or error message in the output.
## Enable diagnostic information
Run your target specifying the `AFL_DEBUG_CHILD=1` environment variable. This
will print a lot more diagnostic information to the screen when the target
starts up. If you have a simple configuration issue, then you will likely see a
warning or error message in the output.
## Check your test harness
# Check your Test Harness
If any of the following steps fail, then there is a problem with your test
harness, or your target library. Since this is running without FRIDA mode or
harness or your target library. Since this is running without FRIDA mode or
`afl-fuzz` that greatly reduces the search area for your defect. This is why it
is *VERY* important to carry out these basic steps first before taking on the
additional complexity of debugging with FRIDA mode or `afl-fuzz`.
- Run your harness outside of the fuzzer, passing it a representative seed as
it's input `./harness <input>`.
- Pass you harness multiple seeds to check that it is stable when running
- Pass your harness multiple seeds to check that it is stable when running
multiple tests as it will when running in fork server mode `./harness <input1>
<intput2>`.
- Build your test harness with `CFLAGS=-fsanitize=address` and
`LDFLAGS=-fsanitize=address`. Then run it again with multiple inputs to check
for errors (note that when fuzzing your harness should not be built with any
for errors (note that when fuzzing, your harness should not be built with any
sanitizer options).
# Check the Samples
FRIDA mode contains a number of different sample targets in the `test` folder.
Have a look throught these and find one which is similar to your real target.
Check whether you have any issues running the sample target and make sure you
compare the command line used to launch the sample with that you are using to
launch your real target very carefully to check for any differences. If possible
start with one of these samples and gradually make changes one at a time
re-testing as you go until you have migrated it to run your own target.
## Check the samples
# FRIDA Mode
## Basic
First just try running your target with `LD_PRELOAD=afl-frida-trace.so ./harness
<input>`. An error here means that your defect occurs when running with just
FRIDA mode and isn't related to `afl-fuzz`.
FRIDA mode contains a number of different sample targets in the `test` folder.
Have a look through these and find one which is similar to your real target.
Check whether you have any issues running the sample target and make sure you
compare the command line used to launch the sample with the one you are using to
launch your real target very carefully to check for any differences. If
possible, start with one of these samples and gradually make changes one at a
time re-testing as you go until you have migrated it to run your own target.
## FRIDA mode
### Basic
First, just try running your target with `LD_PRELOAD=afl-frida-trace.so
./harness <input>`. An error here means that your defect occurs when running
with just FRIDA mode and isn't related to `afl-fuzz`.
Now you can try commenting out the implementation of `LLVMFuzzerTestOneInput` so
that the harness doesn't actually run your target library. This may also aid in
narrowing down the problem.
```c
int LLVMFuzzerTestOneInput(const unsigned char* data, size_t size){
// fpn_crashme(data, size);
return 0;
}
```
## Persistent Mode
### Persistent mode
If your target is ok running in basic mode, you can try running it in persistent
mode (if that is the configuration you are having issues with) as follows (again
outside of afl-fuzz). This time you will want to run it inside a debugger so
outside of `afl-fuzz`). This time, you will want to run it inside a debugger so
that you can use the debugger to send the `SIGCONT` signals (by continuing)
usually sent by `afl-fuzz` on each iteration.
@ -84,13 +93,15 @@ gdb \
--ex 'set environment AFL_FRIDA_PERSISTENT_ADDR=<entry_address>' \
--args ./harness <input>
```
Note we have to manually set the `__AFL_PERSISTENT` environment variable which
is usually passed by `afl-fuzz`.
Note that setting breakpoints etc is likely to interfere with FRIDA and cause
spurious errors.
Note:
- You have to manually set the `__AFL_PERSISTENT` environment variable which is
usually passed by `afl-fuzz`.
- Setting breakpoints etc. is likely to interfere with FRIDA and cause spurious
errors.
If this is successful, you can try additionally loading the hook library:
```bash
gdb \
--ex 'set environment __AFL_PERSISTENT=1' \
@ -100,6 +111,7 @@ gdb \
--ex 'set environment AFL_FRIDA_PERSISTENT_HOOK=frida_hook.so'
--args ./harness <input>
```
Note that the format of the hook used for FRIDA mode is subtly different to that
used when running in QEMU mode as shown below. Thus the DSO used for the hook is
not interchangeable.
@ -112,12 +124,14 @@ void afl_persistent_hook(struct x86_64_regs *regs, uint64_t guest_base,
uint8_t *input_buf, uint32_t input_buf_len);
```
## ASAN
### ASAN
It is also possible to enable ASAN (if that is the configuration you are having
issues with) without having to use `afl-fuzz`. This can be done as follows (note
that the name of the asan DSO may need to be changed depending on your
platform). Note that the asan DSO must appear first in the `LD_PRELOAD`
environment variable:
issues with) without having to use `afl-fuzz`. This can be done as follows:
Note:
- The name of the asan DSO may need to be changed depending on your platform.
- The asan DSO must appear first in the `LD_PRELOAD` environment variable.
```bash
LD_PRELOAD=libclang_rt.asan-x86_64.so:afl-frida-trace.so \
@ -132,29 +146,34 @@ DSO from coverage. Failure to do so will result in ASAN attempting to sanitize
itself and as a result detecting failures when it attempts to update the shadow
maps.
# Printf
## Printf
If you have an idea of where things are going wrong for you, then don't be
scared to add `printf` statements to either AFL++ or FRIDA mode itself to show
more diagnostic information. Just be sure to set `AFL_DEBUG=1` and
`AFL_DEBUG_CHILD=1` when you are testing it.
# Core Dumps
Lastly, if your defect only occurs when using `afl-fuzz` (e.g. when using
`CMPLOG` which cannot be tested outside of `afl-fuzz` due to it's need for a
## Core dumps
Lastly, if your defect only occurs when using `afl-fuzz` (e.g., when using
`CMPLOG` which cannot be tested outside of `afl-fuzz` due to its need for a
shared memory mapping being created for it to record its data), it is possible
to enable the creation of a core dump for post-mortem analysis.
Firstly check your `/proc/sys/kernel/core_pattern` configuration is simply set
to a filename (AFL++ encourages you to set it to the value 'core' in any case
since it doesn't want any handler applications getting in the way). Next set
`ulimit -c unlimited` to remove any size limitations for core files. Lastly,
when you `afl-fuzz` set the environment variable `AFL_DEBUG=1` to enable the
creation of the `core` file. The file should be created in the working directory
of the target application. If there is an existing `core` file aleady there,
then it may not be overwritten.
Firstly, check if your `/proc/sys/kernel/core_pattern` configuration is set to a
filename (AFL++ encourages you to set it to the value `core` in any case since
it doesn't want any handler applications getting in the way).
Next, set `ulimit -c unlimited` to remove any size limitations for core files.
Lastly, when you `afl-fuzz`, set the environment variable `AFL_DEBUG=1` to
enable the creation of the `core` file. The file should be created in the
working directory of the target application. If there is an existing `core` file
already there, then it may not be overwritten.
## Reach out
# Reach out
Get in touch on discord and ask for help. The groups are pretty active so
someone may well be able to offer some advice. Better still, if you are able to
create a minimal reproducer for your problem it will make it easier to diagnose
the issue.
create a minimal reproducer for your problem, it will make it easier to diagnose
the issue.

View File

@ -30,8 +30,7 @@ AFL_CFLAGS:=-Wno-unused-parameter \
LDFLAGS+=-shared \
-lpthread \
-lresolv \
-ldl
-lresolv
ifdef DEBUG
CFLAGS+=-Werror \
@ -46,6 +45,11 @@ FRIDA_BUILD_DIR:=$(BUILD_DIR)frida/
FRIDA_TRACE:=$(BUILD_DIR)afl-frida-trace.so
FRIDA_TRACE_EMBEDDED:=$(BUILD_DIR)afl-frida-trace-embedded
TARGET_CC?=$(CC)
TARGET_CXX?=$(CXX)
HOST_CC?=$(CC)
HOST_CXX?=$(CXX)
ifndef ARCH
ARCH=$(shell uname -m)
@ -71,19 +75,45 @@ ifdef DEBUG
endif
LDFLAGS+= -z noexecstack \
-Wl,--gc-sections \
-Wl,--exclude-libs,ALL
-Wl,--exclude-libs,ALL \
-ldl \
-lrt
LDSCRIPT:=-Wl,--version-script=$(PWD)frida.map
endif
ifeq "$(shell uname)" "Linux"
OS:=linux
ifneq "$(findstring musl, $(shell ldd --version 2>&1 | head -n 1))" ""
CFLAGS+= -D__MUSL__
endif
endif
ifneq "$(findstring android, $(shell $(CC) --version 2>/dev/null))" ""
OS:=android
ifneq "$(findstring aarch64, $(shell $(CC) --version 2>/dev/null))" ""
ARCH:=arm64
endif
ifneq "$(findstring arm, $(shell $(CC) --version 2>/dev/null))" ""
ARCH:=arm
endif
ifneq "$(findstring x86_64, $(shell $(CC) --version 2>/dev/null))" ""
ARCH:=x86_64
endif
ifneq "$(findstring i686, $(shell $(CC) --version 2>/dev/null))" ""
ARCH:=x86
endif
endif
ifeq "$(ARCH)" "armhf"
TARGET_CC:=arm-linux-gnueabihf-gcc
TARGET_CXX:=arm-linux-gnueabihf-g++
endif
ifndef OS
$(error "Operating system unsupported")
endif
GUM_DEVKIT_VERSION=15.0.16
GUM_DEVKIT_VERSION=15.1.13
GUM_DEVKIT_FILENAME=frida-gumjs-devkit-$(GUM_DEVKIT_VERSION)-$(OS)-$(ARCH).tar.xz
GUM_DEVKIT_URL="https://github.com/frida/frida/releases/download/$(GUM_DEVKIT_VERSION)/$(GUM_DEVKIT_FILENAME)"
@ -168,7 +198,7 @@ $(GUM_DEVIT_HEADER): $(GUM_DEVKIT_TARBALL)
############################## AFL #############################################
$(AFL_COMPILER_RT_OBJ): $(AFL_COMPILER_RT_SRC)
$(CC) \
$(TARGET_CC) \
$(CFLAGS) \
$(AFL_CFLAGS) \
-I $(ROOT) \
@ -177,7 +207,7 @@ $(AFL_COMPILER_RT_OBJ): $(AFL_COMPILER_RT_SRC)
-c $<
$(AFL_PERFORMANCE_OBJ): $(AFL_PERFORMANCE_SRC)
$(CC) \
$(TARGET_CC) \
$(CFLAGS) \
$(AFL_CFLAGS) \
-I $(ROOT) \
@ -188,13 +218,13 @@ $(AFL_PERFORMANCE_OBJ): $(AFL_PERFORMANCE_SRC)
############################### JS #############################################
$(BIN2C): $(BIN2C_SRC)
$(CC) -D_GNU_SOURCE -o $@ $<
$(HOST_CC) -D_GNU_SOURCE -o $@ $<
$(JS_SRC): $(JS) $(BIN2C)| $(BUILD_DIR)
cd $(JS_DIR) && $(BIN2C) api_js $(JS) $@
$(JS_OBJ): $(JS_SRC) GNUmakefile
$(CC) \
$(TARGET_CC) \
$(CFLAGS) \
-I $(ROOT)include \
-I $(FRIDA_BUILD_DIR) \
@ -206,7 +236,7 @@ $(JS_OBJ): $(JS_SRC) GNUmakefile
define BUILD_SOURCE
$(2): $(1) $(INCLUDES) GNUmakefile | $(OBJ_DIR)
$(CC) \
$(TARGET_CC) \
$(CFLAGS) \
-I $(ROOT)include \
-I $(FRIDA_BUILD_DIR) \
@ -220,7 +250,7 @@ $(foreach src,$(SOURCES),$(eval $(call BUILD_SOURCE,$(src),$(OBJ_DIR)$(notdir $(
######################## AFL-FRIDA-TRACE #######################################
$(FRIDA_TRACE): $(GUM_DEVIT_LIBRARY) $(GUM_DEVIT_HEADER) $(OBJS) $(JS_OBJ) $(AFL_COMPILER_RT_OBJ) $(AFL_PERFORMANCE_OBJ) GNUmakefile | $(BUILD_DIR)
$(CXX) \
$(TARGET_CXX) \
$(OBJS) \
$(JS_OBJ) \
$(GUM_DEVIT_LIBRARY) \
@ -235,10 +265,10 @@ $(FRIDA_TRACE): $(GUM_DEVIT_LIBRARY) $(GUM_DEVIT_HEADER) $(OBJS) $(JS_OBJ) $(AFL
############################# HOOK #############################################
$(AFLPP_FRIDA_DRIVER_HOOK_OBJ): $(AFLPP_FRIDA_DRIVER_HOOK_SRC) $(GUM_DEVIT_HEADER) | $(BUILD_DIR)
$(CC) $(CFLAGS) $(LDFLAGS) -I $(FRIDA_BUILD_DIR) $< -o $@
$(TARGET_CC) $(CFLAGS) $(LDFLAGS) -I $(FRIDA_BUILD_DIR) $< -o $@
$(AFLPP_QEMU_DRIVER_HOOK_OBJ): $(AFLPP_QEMU_DRIVER_HOOK_SRC) | $(BUILD_DIR)
$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
$(TARGET_CC) $(CFLAGS) $(LDFLAGS) $< -o $@
hook: $(AFLPP_FRIDA_DRIVER_HOOK_OBJ) $(AFLPP_QEMU_DRIVER_HOOK_OBJ)

View File

@ -1,8 +1,9 @@
# Map Density
# Map density
## How coverage works
# How Coverage Works
The coverage in AFL++ works by assigning each basic block of code a unique ID
and during execution when transitioning between blocks (e.g. by calls or jumps)
and during execution when transitioning between blocks (e.g., by calls or jumps)
assigning each of these edges an ID based upon the source and destination block
ID.
@ -13,11 +14,12 @@ A single dimensional cumulative byte array is also constructed where each byte
again represents an individual edge ID, but this time, the value of the byte
represents a range of how many times that edge has been traversed.
```1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+```
`1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+`
The theory is that a new path isn't particularly interesting if an edge has been
traversed `23` instead of `24` times for example, but is interesting if an edge
has been traversed for the very first time, or the number of times fits within a different bucket.
has been traversed for the very first time or the number of times fits within a
different bucket.
After each run, the count of times each edge is hit is compared to the values in
the cumulative map and if it is different, then the input is kept as a new seed
@ -27,19 +29,22 @@ This mechanism is described in greater detail in the seminal
[paper](https://lcamtuf.coredump.cx/afl/technical_details.txt) on AFL by
[lcamtuf](https://github.com/lcamtuf).
# Collisions
## Collisions
In black-box fuzzing, we must assume that control may flow from any block to any
other block, since we don't know any better. Thus for a target with `n` basic
other block, since we don't know any better. Thus, for a target with `n` basic
blocks of code, there are `n * n` potential edges. As we can see, even with a
small number of edges, a very large map will be required so that we have space
to fit them all. Even if our target only had `1024` blocks, this would require a
map containing `1048576` entries (or 1Mb in size).
Whilst this may not seem like a lot of memory, it causes problems for two reasons. Firstly, the processing step after each execution must now process much more
data, and secondly a map this size is unlikely to fit within the L2 cache of the processor. Since this is a very hot code path, we are likely to pay a very heavy
performance cost.
Whilst this may not seem like a lot of memory, it causes problems for two
reasons. Firstly, the processing step after each execution must now process much
more data, and secondly, a map this size is unlikely to fit within the L2 cache
of the processor. Since this is a very hot code path, we are likely to pay a
very heavy performance cost.
Therefore, we must accept that not all edges can have a unique and that
Therefore, we must accept that not all edges can have a unique ID and that
therefore there will be collisions. This means that if the fuzzer finds a new
path by uncovering an edge which was not previously found, but that the same
edge ID is used by another edge, then it may go completely unnoticed. This is
@ -47,15 +52,15 @@ obviously undesirable, but equally if our map is too large, then we will not be
able to process as many potential inputs in the same time and hence not uncover
edges for that reason. Thus a careful trade-off of map size must be made.
# Block & Edge Numbering
## Block & edge numbering
Since the original AFL, blocks and edges have always been numbered in the same
way as we can see from the following C snippet from the whitepaper.
way as we can see from the following C snippet from the whitepaper:
```c
cur_location = (block_address >> 4) ^ (block_address << 8);
shared_mem[cur_location ^ prev_location]++;
prev_location = cur_location >> 1;
cur_location = (block_address >> 4) ^ (block_address << 8);
shared_mem[cur_location ^ prev_location]++;
prev_location = cur_location >> 1;
```
Each block ID is generated by performing a shift and XOR on its address. Then
@ -63,36 +68,39 @@ the edge ID is calculated as `E = B ^ (B' >> 1)`. Here, we can make two
observations. In fact, the edge ID is also masked to ensure it is less than the
size of the map being used.
## Block IDs
### Block IDs
Firstly, the block ID doesn't have very good entropy. If we consider the address
of the block, then whilst each block has a unique ID, it isn't necessarily very
evenly distributed.
We start with a large address, and need to discard a large number of the bits to
We start with a large address and need to discard a large number of the bits to
generate a block ID which is within range. But how do we choose the unique bits
of the address verus those which are the same for every block? The high bits of
the address may simply be all `0s` or all `1s` to make the address cannonical,
the middle portion of the address may be the same for all blocks (since if they
are all within the same binary, then they will all be adjacent in memory), and
on some systems, even the low bits may have poor entropy as some use fixed
length aligned instructions. Then we need to consider that a portion of each
binary may contain the `.data` or `.bss` sections and so may not contain any
blocks of code at all.
of the address versus those which are the same for every block? The high bits of
the address may be all `0s` or all `1s` to make the address canonical, the
middle portion of the address may be the same for all blocks (since if they are
all within the same binary, then they will all be adjacent in memory), and on
some systems, even the low bits may have poor entropy as some use fixed length
aligned instructions. Then we need to consider that a portion of each binary may
contain the `.data` or `.bss` sections and so may not contain any blocks of code
at all.
### Edge IDs
## Edge IDs
Secondly, we can observe that when we generate an edge ID from the source and
destination block IDs, we perform a right shift on the source block ID. Whilst
there are good reasons as set out in the whitepaper why such a transform is
applied, in so doing, we dispose of `1` bit of precious entropy in our source
applied, in doing so, we dispose of `1` bit of precious entropy in our source
block ID.
All together, this means that some edge IDs may be more popular than others.
This means that some portions of the map may be very densly populated with large
numbers of edges, whilst others may be very sparsely populated, or not populated
at all.
This means that some portions of the map may be very densely populated with
large numbers of edges, whilst others may be very sparsely populated, or not
populated at all.
# Improvements
One of the main reaons why this algorithm selected, is performance. All of the
## Improvements
One of the main reasons why this algorithm selected, is performance. All of the
operations are very quick to perform and given we may be carrying this out for
every block of code we execute, performance is critical.
@ -106,23 +114,25 @@ only need to generate this ID once per block and so this ID generation no longer
needs to be as performant. We can therefore use a hash algorithm to generate
this ID and therefore ensure that the block IDs are more evenly distributed.
Edge IDs however, can only be determined at run-time. Since we don't know which
Edge IDs, however, can only be determined at run-time. Since we don't know which
blocks a given input will traverse until we run it. However, given our block IDs
are now evenly distributed, generating an evenly distributed edge ID becomes
simple. Here, the only change we make is to use a rotate operation rather than
a shift operation so we don't lose a bit of entropy from the source ID.
simple. Here, the only change we make is to use a rotate operation rather than a
shift operation so we don't lose a bit of entropy from the source ID.
So our new algorithm becomes:
```c
cur_location = hash(block_address)
shared_mem[cur_location ^ prev_location]++;
prev_location = rotate(cur_location, 1);
cur_location = hash(block_address)
shared_mem[cur_location ^ prev_location]++;
prev_location = rotate(cur_location, 1);
```
Lastly, in the original design, the `cur_location` was always set to `0`, at the
beginning of a run, we instead set the value of `cur_location` to `hash(0)`.
# Parallel Fuzzing
## Parallel fuzzing
Another sub-optimal aspect of the original design is that no matter how many
instances of the fuzzer you ran in parallel, each instance numbered each block
and so each edge with the same ID. Each instance would therefore find the same
@ -144,4 +154,4 @@ If only a single new edge is found, and the new path is shared with an instance
for which that edge collides, that instance may disregard it as irrelevant. In
practice, however, the discovery of a single new edge, likely leads to several
more edges beneath it also being found and therefore the likelihood of all of
these being collisions is very slim.
these being collisions is very slim.

View File

@ -1,38 +1,34 @@
# FRIDA MODE
# FRIDA mode
The purpose of FRIDA mode is to provide an alternative binary only fuzzer for
AFL just like that provided by QEMU mode. The intention is to provide a very
AFL++ just like that provided by QEMU mode. The intention is to provide a very
similar user experience, right down to the options provided through environment
variables.
Whilst AFLplusplus already has some support for running on FRIDA [here](https://github.com/AFLplusplus/AFLplusplus/tree/stable/utils/afl_frida)
this requires the code to be fuzzed to be provided as a shared library, it
cannot be used to fuzz executables. Additionally, it requires the user to write
a small harness around their target code of interest.
FRIDA mode instead takes a different approach to avoid these limitations.
In Frida mode binary programs are instrumented, similarly to QEMU mode.
In FRIDA mode, binary programs are instrumented, similarly to QEMU mode.
## Current Progress
## Current progress
As FRIDA mode is new, it is missing a lot of features. The design is such that it
should be possible to add these features in a similar manner to QEMU mode and
As FRIDA mode is new, it is missing a lot of features. The design is such that
it should be possible to add these features in a similar manner to QEMU mode and
perhaps leverage some of its design and implementation.
| Feature/Instrumentation | frida-mode | Notes |
| -------------------------|:----------:|:--------------------------------------------:|
| NeverZero | x | |
| Persistent Mode | x | (x86/x64/aarch64 only) |
| LAF-Intel / CompCov | - | (CMPLOG is better 90% of the time) |
| CMPLOG | x | (x86/x64/aarch64 only) |
| Selective Instrumentation| x | |
| Non-Colliding Coverage | - | (Not possible in binary-only instrumentation |
| Ngram prev_loc Coverage | - | |
| Context Coverage | - | |
| Auto Dictionary | - | |
| Snapshot LKM Support | - | |
| In-Memory Test Cases | x | (x86/x64/aarch64 only) |
| Feature/Instrumentation | FRIDA mode | Notes |
| -------------------------|:----------:|:---------------------------------------------:|
| NeverZero | x | |
| Persistent Mode | x | (x86/x64/aarch64 only) |
| LAF-Intel / CompCov | - | (CMPLOG is better 90% of the time) |
| CMPLOG | x | (x86/x64/aarch64 only) |
| Selective Instrumentation| x | |
| Non-Colliding Coverage | - | (not possible in binary-only instrumentation) |
| Ngram prev_loc Coverage | - | |
| Context Coverage | - | |
| Auto Dictionary | - | |
| Snapshot LKM Support | - | |
| In-Memory Test Cases | x | (x86/x64/aarch64 only) |
## Compatibility
Currently FRIDA mode supports Linux and macOS targets on both x86/x64
architecture and aarch64. Later releases may add support for aarch32 and Windows
targets as well as embedded linux environments.
@ -41,46 +37,58 @@ FRIDA has been used on various embedded targets using both uClibc and musl C
runtime libraries, so porting should be possible. However, the current build
system does not support cross compilation.
## Getting Started
## Getting started
To build everything run `make`. To build for x86 run `make 32`. Note that in
To build everything, run `make`. To build for x86, run `make 32`. Note that in
x86 bit mode, it is not necessary for afl-fuzz to be built for 32-bit. However,
the shared library for frida_mode must be since it is injected into the target
the shared library for FRIDA mode must be since it is injected into the target
process.
Various tests can be found in subfolders within the `test/` directory. To use
these, first run `make` to build any dependencies. Then run `make qemu` or
`make frida` to run on either QEMU of FRIDA mode respectively. To run frida
tests in 32-bit mode, run `make ARCH=x86 frida`. When switching between
architectures it may be necessary to run `make clean` first for a given build
target to remove previously generated binaries for a different architecture.
these, first run `make` to build any dependencies. Then run `make qemu` or `make
frida` to run on either QEMU of FRIDA mode respectively. To run frida tests in
32-bit mode, run `make ARCH=x86 frida`. When switching between architectures, it
may be necessary to run `make clean` first for a given build target to remove
previously generated binaries for a different architecture.
### Android
In order to build, you need to download the Android SDK:
[https://developer.android.com/ndk/downloads](https://developer.android.com/ndk/downloads)
Then creating locally a standalone chain as follows:
[https://developer.android.com/ndk/guides/standalone_toolchain](https://developer.android.com/ndk/guides/standalone_toolchain)
## Usage
FRIDA mode added some small modifications to `afl-fuzz` and similar tools
in AFLplusplus. The intention was that it behaves identically to QEMU, but it uses
the 'O' switch rather than 'Q'. Whilst the options 'f', 'F', 's' or 'S' may have
FRIDA mode added some small modifications to `afl-fuzz` and similar tools in
AFL++. The intention was that it behaves identically to QEMU, but it uses the
'O' switch rather than 'Q'. Whilst the options 'f', 'F', 's' or 'S' may have
made more sense for a mode powered by FRIDA Stalker, they were all taken, so
instead we use 'O' in hommage to the [author](https://github.com/oleavr) of
instead we use 'O' in homage to the [author](https://github.com/oleavr) of
FRIDA.
Similarly, the intention is to mimic the use of environment variables used by
QEMU where possible (by replacing `s/QEMU/FRIDA/g`). Accordingly, the
following options are currently supported:
QEMU where possible (by replacing `s/QEMU/FRIDA/g`). Accordingly, the following
options are currently supported:
* `AFL_FRIDA_DEBUG_MAPS` - See `AFL_QEMU_DEBUG_MAPS`
* `AFL_FRIDA_EXCLUDE_RANGES` - See `AFL_QEMU_EXCLUDE_RANGES`
* `AFL_FRIDA_INST_RANGES` - See `AFL_QEMU_INST_RANGES`
* `AFL_FRIDA_PERSISTENT_ADDR` - See `AFL_QEMU_PERSISTENT_ADDR`
* `AFL_FRIDA_PERSISTENT_CNT` - See `AFL_QEMU_PERSISTENT_CNT`
* `AFL_FRIDA_PERSISTENT_HOOK` - See `AFL_QEMU_PERSISTENT_HOOK`
* `AFL_FRIDA_PERSISTENT_RET` - See `AFL_QEMU_PERSISTENT_RET`
* `AFL_FRIDA_DEBUG_MAPS` - See `AFL_QEMU_DEBUG_MAPS`.
* `AFL_FRIDA_EXCLUDE_RANGES` - See `AFL_QEMU_EXCLUDE_RANGES`.
* `AFL_FRIDA_INST_RANGES` - See `AFL_QEMU_INST_RANGES`.
* `AFL_FRIDA_PERSISTENT_ADDR` - See `AFL_QEMU_PERSISTENT_ADDR`.
* `AFL_FRIDA_PERSISTENT_CNT` - See `AFL_QEMU_PERSISTENT_CNT`.
* `AFL_FRIDA_PERSISTENT_HOOK` - See `AFL_QEMU_PERSISTENT_HOOK`.
* `AFL_FRIDA_PERSISTENT_RET` - See `AFL_QEMU_PERSISTENT_RET`.
To enable the powerful CMPLOG mechanism, set `-c 0` for `afl-fuzz`.
## Scripting
One of the more powerful features of FRIDA mode is it's support for configuration by JavaScript, rather than using environment variables. For details of how this works see [here](Scripting.md).
One of the more powerful features of FRIDA mode is it's support for
configuration by JavaScript, rather than using environment variables. For
details of how this works, see [Scripting.md](Scripting.md).
## Performance
@ -104,16 +112,18 @@ FRIDA mode is supported by using `LD_PRELOAD` (`DYLD_INSERT_LIBRARIES` on macOS)
to inject a shared library (`afl-frida-trace.so`) into the target. This shared
library is built using the [frida-gum](https://github.com/frida/frida-gum)
devkit from the [FRIDA](https://github.com/frida/frida) project. One of the
components of frida-gum is [Stalker](https://medium.com/@oleavr/anatomy-of-a-code-tracer-b081aadb0df8),
components of frida-gum is
[Stalker](https://medium.com/@oleavr/anatomy-of-a-code-tracer-b081aadb0df8),
this allows the dynamic instrumentation of running code for AARCH32, AARCH64,
x86 and x64 architectures. Implementation details can be found
[here](https://frida.re/docs/stalker/).
Dynamic instrumentation is used to augment the target application with similar
coverage information to that inserted by `afl-gcc` or `afl-clang`. The shared
library is also linked to the `compiler-rt` component of AFLplusplus to feedback
this coverage information to AFL++ and also provide a fork server. It also makes
use of the FRIDA [prefetch](https://github.com/frida/frida-gum/blob/56dd9ba3ee9a5511b4b0c629394bf122775f1ab7/gum/gumstalker.h#L115)
library is also linked to the `compiler-rt` component of AFL++ to feedback this
coverage information to AFL++ and also provide a fork server. It also makes use
of the FRIDA
[prefetch](https://github.com/frida/frida-gum/blob/56dd9ba3ee9a5511b4b0c629394bf122775f1ab7/gum/gumstalker.h#L115)
support to feedback instrumented blocks from the child to the parent using a
shared memory region to avoid the need to regenerate instrumented blocks on each
fork.
@ -131,210 +141,239 @@ instances run CMPLOG mode and instrumentation of the binary is less frequent
(only on CMP, SUB and CALL instructions) performance is not quite so critical.
## Advanced configuration options
* `AFL_FRIDA_INST_COVERAGE_FILE` - File to write DynamoRio format coverage
information (e.g. to be loaded within IDA lighthouse).
* `AFL_FRIDA_DRIVER_NO_HOOK` - See `AFL_QEMU_DRIVER_NO_HOOK`. When using the
QEMU driver to provide a `main` loop for a user provided
`LLVMFuzzerTestOneInput`, this option configures the driver to read input from
`stdin` rather than using in-memory test cases.
* `AFL_FRIDA_INST_COVERAGE_FILE` - File to write DynamoRIO format coverage
information (e.g., to be loaded within IDA lighthouse).
* `AFL_FRIDA_INST_DEBUG_FILE` - File to write raw assembly of original blocks
and their instrumented counterparts during block compilation.
```
***
and their instrumented counterparts during block compilation.
Creating block for 0x7ffff7953313:
0x7ffff7953313 mov qword ptr [rax], 0
0x7ffff795331a add rsp, 8
0x7ffff795331e ret
```
***
Generated block 0x7ffff75e98e2
0x7ffff75e98e2 mov qword ptr [rax], 0
0x7ffff75e98e9 add rsp, 8
0x7ffff75e98ed lea rsp, [rsp - 0x80]
0x7ffff75e98f5 push rcx
0x7ffff75e98f6 movabs rcx, 0x7ffff795331e
0x7ffff75e9900 jmp 0x7ffff75e9384
Creating block for 0x7ffff7953313:
0x7ffff7953313 mov qword ptr [rax], 0
0x7ffff795331a add rsp, 8
0x7ffff795331e ret
Generated block 0x7ffff75e98e2
0x7ffff75e98e2 mov qword ptr [rax], 0
0x7ffff75e98e9 add rsp, 8
0x7ffff75e98ed lea rsp, [rsp - 0x80]
0x7ffff75e98f5 push rcx
0x7ffff75e98f6 movabs rcx, 0x7ffff795331e
0x7ffff75e9900 jmp 0x7ffff75e9384
***
```
***
```
* `AFL_FRIDA_INST_JIT` - Enable the instrumentation of Just-In-Time compiled
code. Code is considered to be JIT if the executable segment is not backed by a
file.
code. Code is considered to be JIT if the executable segment is not backed by
a file.
* `AFL_FRIDA_INST_NO_OPTIMIZE` - Don't use optimized inline assembly coverage
instrumentation (the default where available). Required to use
`AFL_FRIDA_INST_TRACE`.
* `AFL_FRIDA_INST_NO_PREFETCH` - Disable prefetching. By default the child will
report instrumented blocks back to the parent so that it can also instrument
them and they be inherited by the next child on fork, implies
`AFL_FRIDA_INST_NO_PREFETCH_BACKPATCH`.
instrumentation (the default where available). Required to use
`AFL_FRIDA_INST_TRACE`.
* `AFL_FRIDA_INST_NO_BACKPATCH` - Disable backpatching. At the end of executing
each block, control will return to FRIDA to identify the next block to
execute.
* `AFL_FRIDA_INST_NO_PREFETCH` - Disable prefetching. By default, the child will
report instrumented blocks back to the parent so that it can also instrument
them and they be inherited by the next child on fork, implies
`AFL_FRIDA_INST_NO_PREFETCH_BACKPATCH`.
* `AFL_FRIDA_INST_NO_PREFETCH_BACKPATCH` - Disable prefetching of stalker
backpatching information. By default the child will report applied backpatches
to the parent so that they can be applied and then be inherited by the next
child on fork.
backpatching information. By default, the child will report applied
backpatches to the parent so that they can be applied and then be inherited by
the next child on fork.
* `AFL_FRIDA_INST_SEED` - Sets the initial seed for the hash function used to
generate block (and hence edge) IDs. Setting this to a constant value may be
useful for debugging purposes, e.g. investigating unstable edges.
* `AFL_FRIDA_INST_TRACE` - Log to stdout the address of executed blocks,
implies `AFL_FRIDA_INST_NO_OPTIMIZE`.
generate block (and hence edge) IDs. Setting this to a constant value may be
useful for debugging purposes, e.g., investigating unstable edges.
* `AFL_FRIDA_INST_TRACE` - Log to stdout the address of executed blocks, implies
`AFL_FRIDA_INST_NO_OPTIMIZE`.
* `AFL_FRIDA_INST_TRACE_UNIQUE` - As per `AFL_FRIDA_INST_TRACE`, but each edge
is logged only once, requires `AFL_FRIDA_INST_NO_OPTIMIZE`.
* `AFL_FRIDA_INST_UNSTABLE_COVERAGE_FILE` - File to write DynamoRio format
coverage information for unstable edges (e.g. to be loaded within IDA
lighthouse).
is logged only once, requires `AFL_FRIDA_INST_NO_OPTIMIZE`.
* `AFL_FRIDA_INST_UNSTABLE_COVERAGE_FILE` - File to write DynamoRIO format
coverage information for unstable edges (e.g., to be loaded within IDA
lighthouse).
* `AFL_FRIDA_JS_SCRIPT` - Set the script to be loaded by the FRIDA scripting
engine. See [Scipting.md](Scripting.md) for details.
* `AFL_FRIDA_OUTPUT_STDOUT` - Redirect the standard output of the target
application to the named file (supersedes the setting of `AFL_DEBUG_CHILD`)
application to the named file (supersedes the setting of `AFL_DEBUG_CHILD`).
* `AFL_FRIDA_OUTPUT_STDERR` - Redirect the standard error of the target
application to the named file (supersedes the setting of `AFL_DEBUG_CHILD`)
application to the named file (supersedes the setting of `AFL_DEBUG_CHILD`).
* `AFL_FRIDA_PERSISTENT_DEBUG` - Insert a Breakpoint into the instrumented code
at `AFL_FRIDA_PERSISTENT_HOOK` and `AFL_FRIDA_PERSISTENT_RET` to allow the user
to detect issues in the persistent loop using a debugger.
at `AFL_FRIDA_PERSISTENT_HOOK` and `AFL_FRIDA_PERSISTENT_RET` to allow the
user to detect issues in the persistent loop using a debugger.
```
```
gdb \
--ex 'set environment AFL_FRIDA_PERSISTENT_ADDR=XXXXXXXXXX' \
--ex 'set environment AFL_FRIDA_PERSISTENT_RET=XXXXXXXXXX' \
--ex 'set environment AFL_FRIDA_PERSISTENT_DEBUG=1' \
--ex 'set environment AFL_DEBUG_CHILD=1' \
--ex 'set environment LD_PRELOAD=afl-frida-trace.so' \
--args <my-executable> [my arguments]
```
gdb \
--ex 'set environment AFL_FRIDA_PERSISTENT_ADDR=XXXXXXXXXX' \
--ex 'set environment AFL_FRIDA_PERSISTENT_RET=XXXXXXXXXX' \
--ex 'set environment AFL_FRIDA_PERSISTENT_DEBUG=1' \
--ex 'set environment AFL_DEBUG_CHILD=1' \
--ex 'set environment LD_PRELOAD=afl-frida-trace.so' \
--args <my-executable> [my arguments]
```
* `AFL_FRIDA_SECCOMP_FILE` - Write a log of any syscalls made by the target to
the specified file.
the specified file.
* `AFL_FRIDA_STALKER_ADJACENT_BLOCKS` - Configure the number of adjacent blocks
to fetch when generating instrumented code. By fetching blocks in the same
order they appear in the original program, rather than the order of execution
should help reduce locality and adjacency. This includes allowing us to vector
between adjacent blocks using a NOP slide rather than an immediate branch.
* `AFL_FRIDA_STALKER_IC_ENTRIES` - Configure the number of inline cache entries
stored along-side branch instructions which provide a cache to avoid having to
call back into FRIDA to find the next block. Default is 32.
stored along-side branch instructions which provide a cache to avoid having to
call back into FRIDA to find the next block. Default is 32.
* `AFL_FRIDA_STATS_FILE` - Write statistics information about the code being
instrumented to the given file name. The statistics are written only for the
child process when new block is instrumented (when the
`AFL_FRIDA_STATS_INTERVAL` has expired). Note that simply because a new path is
found does not mean a new block needs to be compiled. It could simply be that
the existing blocks instrumented have been executed in a different order.
```
stats
-----
Time 2021-07-21 11:45:49
Elapsed 1 seconds
instrumented to the given file name. The statistics are written only for the
child process when new block is instrumented (when the
`AFL_FRIDA_STATS_INTERVAL` has expired). Note that just because a new path is
found does not mean a new block needs to be compiled. It could be that the
existing blocks instrumented have been executed in a different order.
```
stats
-----
Time 2021-07-21 11:45:49
Elapsed 1 seconds
Transitions cumulative delta
----------- ---------- -----
total 753619 17645
call_imm 9193 ( 1.22%) 344 ( 1.95%) [ 344/s]
call_reg 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
call_mem 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
ret_slow_path 67974 ( 9.02%) 2988 (16.93%) [ 2988/s]
post_call_invoke 7996 ( 1.06%) 299 ( 1.69%) [ 299/s]
excluded_call_imm 3804 ( 0.50%) 200 ( 1.13%) [ 200/s]
jmp_imm 5445 ( 0.72%) 255 ( 1.45%) [ 255/s]
jmp_reg 42081 ( 5.58%) 1021 ( 5.79%) [ 1021/s]
jmp_mem 578092 (76.71%) 10956 (62.09%) [ 10956/s]
jmp_cond_imm 38951 ( 5.17%) 1579 ( 8.95%) [ 1579/s]
jmp_cond_mem 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
jmp_cond_reg 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
jmp_cond_jcxz 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
jmp_continuation 84 ( 0.01%) 3 ( 0.02%) [ 3/s]
Transitions cumulative delta
----------- ---------- -----
total 753619 17645
call_imm 9193 ( 1.22%) 344 ( 1.95%) [ 344/s]
call_reg 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
call_mem 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
ret_slow_path 67974 ( 9.02%) 2988 (16.93%) [ 2988/s]
post_call_invoke 7996 ( 1.06%) 299 ( 1.69%) [ 299/s]
excluded_call_imm 3804 ( 0.50%) 200 ( 1.13%) [ 200/s]
jmp_imm 5445 ( 0.72%) 255 ( 1.45%) [ 255/s]
jmp_reg 42081 ( 5.58%) 1021 ( 5.79%) [ 1021/s]
jmp_mem 578092 (76.71%) 10956 (62.09%) [ 10956/s]
jmp_cond_imm 38951 ( 5.17%) 1579 ( 8.95%) [ 1579/s]
jmp_cond_mem 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
jmp_cond_reg 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
jmp_cond_jcxz 0 ( 0.00%) 0 ( 0.00%) [ 0/s]
jmp_continuation 84 ( 0.01%) 3 ( 0.02%) [ 3/s]
Instrumentation
---------------
Instructions 7907
Blocks 1764
Avg Instructions / Block 4
Instrumentation
---------------
Instructions 7907
Blocks 1764
Avg Instructions / Block 4
EOB Instructions
----------------
Total 1763 (22.30%)
Call Immediates 358 ( 4.53%)
Call Immediates Excluded 74 ( 0.94%)
Call Register 0 ( 0.00%)
Call Memory 0 ( 0.00%)
Jump Immediates 176 ( 2.23%)
Jump Register 8 ( 0.10%)
Jump Memory 10 ( 0.13%)
Conditional Jump Immediates 1051 (13.29%)
Conditional Jump CX Immediate 0 ( 0.00%)
Conditional Jump Register 0 ( 0.00%)
Conditional Jump Memory 0 ( 0.00%)
Returns 160 ( 2.02%)
EOB Instructions
----------------
Total 1763 (22.30%)
Call Immediates 358 ( 4.53%)
Call Immediates Excluded 74 ( 0.94%)
Call Register 0 ( 0.00%)
Call Memory 0 ( 0.00%)
Jump Immediates 176 ( 2.23%)
Jump Register 8 ( 0.10%)
Jump Memory 10 ( 0.13%)
Conditional Jump Immediates 1051 (13.29%)
Conditional Jump CX Immediate 0 ( 0.00%)
Conditional Jump Register 0 ( 0.00%)
Conditional Jump Memory 0 ( 0.00%)
Returns 160 ( 2.02%)
Relocated Instructions
----------------------
Total 232 ( 2.93%)
addsd 2 ( 0.86%)
cmp 46 (19.83%)
comisd 2 ( 0.86%)
divsd 2 ( 0.86%)
divss 2 ( 0.86%)
lea 142 (61.21%)
mov 32 (13.79%)
movsd 2 ( 0.86%)
ucomisd 2 ( 0.86%)
```
Relocated Instructions
----------------------
Total 232 ( 2.93%)
addsd 2 ( 0.86%)
cmp 46 (19.83%)
comisd 2 ( 0.86%)
divsd 2 ( 0.86%)
divss 2 ( 0.86%)
lea 142 (61.21%)
mov 32 (13.79%)
movsd 2 ( 0.86%)
ucomisd 2 ( 0.86%)
```
* `AFL_FRIDA_STATS_INTERVAL` - The maximum frequency to output statistics
information. Stats will be written whenever they are updated if the given
interval has elapsed since last time they were written.
information. Stats will be written whenever they are updated if the given
interval has elapsed since last time they were written.
* `AFL_FRIDA_TRACEABLE` - Set the child process to be traceable by any process
to aid debugging and overcome the restrictions imposed by YAMA. Supported on
Linux only. Permits a non-root user to use `gcore` or similar to collect a
core dump of the instrumented target. Note that in order to capture the core
dump you must set a sufficient timeout (using `-t`) to avoid `afl-fuzz`
killing the process whilst it is being dumped.
## FASAN - Frida Address Sanitizer Mode
Frida mode also supports FASAN. The design of this is actually quite simple and
## FASAN - FRIDA Address Sanitizer mode
FRIDA mode also supports FASAN. The design of this is actually quite simple and
very similar to that used when instrumenting applications compiled from source.
### Address Sanitizer Basics
### Address Sanitizer basics
When Address Sanitizer is used to instrument programs built from source, the
compiler first adds a dependency (`DT_NEEDED` entry) for the Address Sanitizer
dynamic shared object (DSO). This shared object contains the main logic for Address
Sanitizer, including setting and managing up the shadow memory. It also provides
replacement implementations for a number of functions in standard libraries.
dynamic shared object (DSO). This shared object contains the main logic for
Address Sanitizer, including setting and managing up the shadow memory. It also
provides replacement implementations for a number of functions in standard
libraries.
These replacements include things like `malloc` and `free` which allows for those
allocations to be marked in the shadow memory, but also a number of other fuctions.
Consider `memcpy` for example, this is instrumented to validate the paramters
(test the source and destination buffers against the shadow memory. This is much
easier than instrumenting those standard libraries since, first it would require
you to re-compile them and secondly it would mean that the instrumentation would
be applied at a more expensive granular level. Lastly, load-widening (typically
found in highy optimized code) can also make this instrumentation more difficult.
These replacements include things like `malloc` and `free` which allows for
those allocations to be marked in the shadow memory, but also a number of other
functions. Consider `memcpy`, for example. This is instrumented to validate the
parameters (test the source and destination buffers against the shadow memory).
This is much easier than instrumenting those standard libraries, since first, it
would require you to re-compile them and secondly it would mean that the
instrumentation would be applied at a more expensive granular level. Lastly,
load-widening (typically found in highly optimized code) can also make this
instrumentation more difficult.
Since the DSO is loaded before all of the standard libraries (in fact it insists
on being first), the dynamic loader will use it to resolve imports from other
modules which depend on it.
### FASAN Implementation
### FASAN implementation
FASAN takes a similar approach. It requires the user to add the Address Sanitizer
DSO to the `AFL_PRELOAD` environment variable such that it is loaded into the target.
Again, it must be first in the list. This means that it is not necessary to
instrument the standard libraries to detect when an application has provided an
incorrect argument to `memcpy` for example. This avoids issues with load-widening
and should also mean a huge improvement in performance.
FASAN takes a similar approach. It requires the user to add the Address
Sanitizer DSO to the `AFL_PRELOAD` environment variable such that it is loaded
into the target. Again, it must be first in the list. This means that it is not
necessary to instrument the standard libraries to detect when an application has
provided an incorrect argument to `memcpy`, for example. This avoids issues with
load-widening and should also mean a huge improvement in performance.
FASAN then adds instrumentation for any instrucutions which use memory operands and
then calls into the `__asan_loadN` and `__asan_storeN` functions provided by the DSO
to validate memory accesses against the shadow memory.
FASAN then adds instrumentation for any instructions which use memory operands
and then calls into the `__asan_loadN` and `__asan_storeN` functions provided by
the DSO to validate memory accesses against the shadow memory.
# Collisions
FRIDA mode has also introduced some improvements to reduce collisions in the map.
See [here](MapDensity.md) for details.
## Collisions
# OSX Library Fuzzing
An example of how to fuzz a dynamic library on OSX is included [here](test/osx-lib).
This requires the use of a simple test harness executable which will load the
library and call a target function within it. The dependent library can either
be loaded in using `dlopen` and `dlsym` in a function marked
`__attribute__((constructor()))` or the test harness can simply be linked
against it. It is important that the target library is loaded before execution
of `main`, since this is the point where FRIDA mode is initialized. Otherwise, it
will not be possible to configure coverage for the test library using
`AFL_FRIDA_INST_RANGES` or similar.
FRIDA mode has also introduced some improvements to reduce collisions in the
map. For details, see [MapDensity.md](MapDensity.md).
# Debugging
Please refer to the [debugging](#debugging) guide for assistant should you
encounter problems with FRIDA mode.
## OSX library fuzzing
## TODO
An example of how to fuzz a dynamic library on OSX is included, see
[test/osx-lib](test/osx-lib). This requires the use of a simple test harness
executable which will load the library and call a target function within it. The
dependent library can either be loaded in using `dlopen` and `dlsym` in a
function marked `__attribute__((constructor()))` or the test harness can be
linked against it. It is important that the target library is loaded before
execution of `main`, since this is the point where FRIDA mode is initialized.
Otherwise, it will not be possible to configure coverage for the test library
using `AFL_FRIDA_INST_RANGES` or similar.
## Debugging
Should you encounter problems with FRIDA mode, refer to
[DEBUGGING.md](DEBUGGING.md) for assistance.
## To do
The next features to be added are Aarch32 support as well as looking at
potential performance improvements. The intention is to achieve feature parity with
QEMU mode in due course. Contributions are welcome, but please get in touch to
ensure that efforts are deconflicted.
potential performance improvements. The intention is to achieve feature parity
with QEMU mode in due course. Contributions are welcome, but please get in touch
to ensure that efforts are deconflicted.

View File

@ -1,25 +1,32 @@
# Scripting
FRIDA now supports the ability to configure itself using JavaScript. This allows
the user to make use of the convenience of FRIDA's scripting engine (along with
it's support for debug symbols and exports) to configure all of the things which
were traditionally configured using environment variables.
By default FRIDA mode will look for the file `afl.js` in the current working
By default, FRIDA mode will look for the file `afl.js` in the current working
directory of the target. Alternatively, a script file can be configured using
the environment variable `AFL_FRIDA_JS_SCRIPT`.
This script can make use of all of the standard [frida api functions](https://frida.re/docs/javascript-api/), but FRIDA mode adds some additional functions to allow
you to interact with FRIDA mode itself. These can all be accessed via the global
`Afl` parameter. e.g. `Afl.print("HELLO WORLD");`,
This script can make use of all of the standard [frida api
functions](https://frida.re/docs/javascript-api/), but FRIDA mode adds some
additional functions to allow you to interact with FRIDA mode itself. These can
all be accessed via the global `Afl` parameter, e.g., `Afl.print("HELLO
WORLD");`.
If you encounter a problem with your script, then you should set the environment
variable `AFL_DEBUG_CHILD=1` to view any diagnostic information.
## Example
# Example
Most of the time, users will likely be wanting to call the functions which configure an address (e.g. for the entry point, or the persistent address).
Most of the time, users will likely be wanting to call the functions which
configure an address (e.g., for the entry point or the persistent address).
The example below uses the API [`DebugSymbol.fromName()`](https://frida.re/docs/javascript-api/#debugsymbol). Another use API is [`Module.getExportByName()`](https://frida.re/docs/javascript-api/#module).
The example below uses the API
[`DebugSymbol.fromName()`](https://frida.re/docs/javascript-api/#debugsymbol).
Another use API is
[`Module.getExportByName()`](https://frida.re/docs/javascript-api/#module).
```js
/* Use Afl.print instead of console.log */
@ -86,9 +93,9 @@ Afl.done();
Afl.print("done");
```
# Stripped Binaries
## Stripped binaries
Lastly, if the binary you attempting to fuzz has no symbol information, and no
Lastly, if the binary you attempting to fuzz has no symbol information and no
exports, then the following approach can be used.
```js
@ -98,11 +105,12 @@ const address = module.base.add(0xdeadface);
Afl.setPersistentAddress(address);
```
# Persisent Hook
## Persistent hook
A persistent hook can be implemented using a conventional shared object, sample
source code for a hook suitable for the prototype of `LLVMFuzzerTestOneInput`
can be found [here](hook/hook.c). This can be configured using code similar to
the following.
can be found in [hook/](hook/). This can be configured using code similar to the
following.
```js
const path = Afl.module.path;
@ -112,7 +120,8 @@ const hook = mod.getExportByName('afl_persistent_hook');
Afl.setPersistentHook(hook);
```
Alternatively, the hook can be provided by using FRIDAs built in support for `CModule`, powered by TinyCC.
Alternatively, the hook can be provided by using FRIDA's built-in support for
`CModule`, powered by TinyCC.
```js
const cm = new CModule(`
@ -134,8 +143,10 @@ const cm = new CModule(`
Afl.setPersistentHook(cm.afl_persistent_hook);
```
# Advanced Persistence
## Advanced persistence
Consider the following target code...
```c
#include <fcntl.h>
@ -246,7 +257,7 @@ FRIDA mode supports the replacement of any function, with an implementation
generated by CModule. This allows for a bespoke harness to be written as
follows:
```
```js
const slow = DebugSymbol.fromName('slow').address;
Afl.print(`slow: ${slow}`);
@ -281,15 +292,96 @@ Afl.done();
Here, we replace the function `slow` with our own code. This code is then
selected as the entry point as well as the persistent loop address.
**WARNING** There are two key limitations in replacing a function in this way:
- The function which is to be replaced must not be `main` this is because this
is the point at which FRIDA mode is initialized and at the point the the JS has
been run, the start of the `main` function has already been instrumented and
cached.
- The replacement function must not call itself. e.g. in this example we
couldn't replace `LLVMFuzzerTestOneInput` and call itself.
### Replacing LLVMFuzzerTestOneInput
The function `LLVMFuzzerTestOneInput` can be replaced just like any other. Also,
any replaced function can also call itself. In the example below, we replace
`LLVMFuzzerTestOneInput` with `My_LLVMFuzzerTestOneInput` which ignores the
parameters `buf` and `len` and then calls the original `LLVMFuzzerTestOneInput`
with the parameters `__afl_fuzz_ptr` and `__afl_fuzz_len`. This allows us to
carry out in-memory fuzzing without the need for any hook function. It should be
noted that the replacement function and the original *CANNOT* share the same
name, since otherwise the `C` code in the `CModule` will not compile due to a
symbol name collision.
```js
const LLVMFuzzerTestOneInput = DebugSymbol.fromName('LLVMFuzzerTestOneInput').address;
Afl.print(`LLVMFuzzerTestOneInput: ${LLVMFuzzerTestOneInput}`);
const cm = new CModule(`
extern unsigned char * __afl_fuzz_ptr;
extern unsigned int * __afl_fuzz_len;
extern void LLVMFuzzerTestOneInput(char *buf, int len);
void My_LLVMFuzzerTestOneInput(char *buf, int len) {
LLVMFuzzerTestOneInput(__afl_fuzz_ptr, *__afl_fuzz_len);
}
`,
{
LLVMFuzzerTestOneInput: LLVMFuzzerTestOneInput,
__afl_fuzz_ptr: Afl.getAflFuzzPtr(),
__afl_fuzz_len: Afl.getAflFuzzLen()
});
Afl.setEntryPoint(cm.My_LLVMFuzzerTestOneInput);
Afl.setPersistentAddress(cm.My_LLVMFuzzerTestOneInput);
Afl.setInMemoryFuzzing();
Interceptor.replace(LLVMFuzzerTestOneInput, cm.My_LLVMFuzzerTestOneInput);
```
### Hooking `main`
Lastly, it should be noted that using FRIDA mode's scripting support to hook the
`main` function is a special case. This is because the `main` function is
already hooked by the FRIDA mode engine itself and hence the function `main` (or
at least the first basic block already been compiled by Stalker ready for
execution). Hence any attempt to use `Interceptor.replace` like in the example
above will not work. Instead the JS bindings provide a function `setJsMainHook`
for just this scenario as demonstrated in the example below.
```js
const main = DebugSymbol.fromName('main').address;
Afl.print(`main: ${main}`);
const LLVMFuzzerTestOneInput = DebugSymbol.fromName('LLVMFuzzerTestOneInput').address;
Afl.print(`LLVMFuzzerTestOneInput: ${LLVMFuzzerTestOneInput}`);
const cm = new CModule(`
extern unsigned char * __afl_fuzz_ptr;
extern unsigned int * __afl_fuzz_len;
extern void LLVMFuzzerTestOneInput(char *buf, int len);
int main(int argc, char **argv) {
LLVMFuzzerTestOneInput(__afl_fuzz_ptr, *__afl_fuzz_len);
}
`,
{
LLVMFuzzerTestOneInput: LLVMFuzzerTestOneInput,
__afl_fuzz_ptr: Afl.getAflFuzzPtr(),
__afl_fuzz_len: Afl.getAflFuzzLen()
});
Afl.setEntryPoint(cm.main);
Afl.setPersistentAddress(cm.main);
Afl.setInMemoryFuzzing();
Afl.setJsMainHook(cm.main);
```
### Library Fuzzing
It doesn't take too much imagination to see that the above example can be
extended to use FRIDA's `Module.load` API so that the replaced `main` function
can then call an arbitrary function. In this way, if we have a library which we
wish to fuzz rather than an executable, then a surrogate executable can be used.
## Patching
# Patching
Consider the [following](test/js/test2.c) test code...
```c
@ -302,7 +394,7 @@ Consider the [following](test/js/test2.c) test code...
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
https://www.apache.org/licenses/LICENSE-2.0
*/
#include <fcntl.h>
@ -313,22 +405,22 @@ Consider the [following](test/js/test2.c) test code...
#include <unistd.h>
const uint32_t crc32_tab[] = {
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
...
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
uint32_t
crc32(const void *buf, size_t size)
{
const uint8_t *p = buf;
uint32_t crc;
crc = ~0U;
while (size--)
crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
return crc ^ ~0U;
const uint8_t *p = buf;
uint32_t crc;
crc = ~0U;
while (size--)
crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8);
return crc ^ ~0U;
}
/*
@ -419,9 +511,9 @@ int main(int argc, char **argv) {
```
There are a couple of obstacles with our target application. Unlike when fuzzing
source code, though, we can't simply edit it and recompile it. The following
source code, though, we can't just edit it and recompile it. The following
script shows how we can use the normal functionality of FRIDA to modify any
troublesome behaviour.
troublesome behavior.
```js
Afl.print('******************');
@ -460,8 +552,10 @@ Afl.done();
Afl.print("done");
```
# Advanced Patching
## Advanced patching
Consider the following code fragment...
```c
extern void some_boring_bug2(char c);
@ -488,7 +582,7 @@ void LLVMFuzzerTestOneInput(char *buf, int len) {
}
```
Rather than using FRIDAs `Interceptor.replace` or `Interceptor.attach` APIs, it
Rather than using FRIDA's `Interceptor.replace` or `Interceptor.attach` APIs, it
is possible to apply much more fine grained modification to the target
application by means of using the Stalker APIs.
@ -572,39 +666,43 @@ Afl.setStalkerCallback(cm.js_stalker_callback)
Afl.setStdErr("/tmp/stderr.txt");
```
Note that you will more likely want to find the
patch address by using:
Note that you will more likely want to find the patch address by using:
```js
const module = Process.getModuleByName('target.exe');
/* Hardcoded offset within the target image */
const address = module.base.add(0xdeadface);
```
OR
```
const address = DebugSymbol.fromName("my_function").address.add(0xdeadface);
```
OR
```
const address = Module.getExportByName(null, "my_function").add(0xdeadface);
```
The function `js_stalker_callback` should return `TRUE` if the original
instruction should be emitted in the instrumented code, or `FALSE` otherwise.
In the example above, we can see it is replaced with a `NOP`.
instruction should be emitted in the instrumented code or `FALSE` otherwise. In
the example above, we can see it is replaced with a `NOP`.
Lastly, note that the same callback will be called when compiling instrumented
code both in the child of the forkserver (as it is executed) and also in the
parent of the forserver (when prefetching is enabled) so that it can be
parent of the forkserver (when prefetching is enabled) so that it can be
inherited by the next forked child. It is **VERY** important that the same
instructions be generated in both the parent and the child, or if prefetching is
instructions be generated in both the parent and the child or if prefetching is
disabled that the same instructions are generated every time the block is
compiled. Failure to do so will likely lead to bugs which are incredibly
difficult to diagnose. The code above only prints the instructions when running
in the parent process (the one provided by `Process.id` when the JS script is
executed).
# OSX
## OSX
Note that the JavaScript debug symbol api for OSX makes use of the
`CoreSymbolication` APIs and as such the `CoreFoundation` module must be loaded
into the target to make use of it. This can be done by setting:
@ -614,47 +712,38 @@ AFL_PRELOAD=/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation
```
It should be noted that `CoreSymbolication` API may take a while to initialize
and build its caches. For this reason, it may be nescessary to also increase the
and build its caches. For this reason, it may be necessary to also increase the
value of the `-t` flag passed to `afl-fuzz`.
# API
## API
```js
class Afl {
/**
* Field containing the `Module` object for `afl-frida-trace.so` (the FRIDA mode
* implementation).
*/
public static module: Module = Process.getModuleByName("afl-frida-trace.so");
/**
* This is equivalent to setting a value in `AFL_FRIDA_EXCLUDE_RANGES`,
* it takes as arguments a `NativePointer` and a `number`. It can be
* called multiple times to exclude several ranges.
*/
public static addExcludedRange(addressess: NativePointer, size: number): void {
Afl.jsApiAddExcludeRange(addressess, size);
static addExcludedRange(addressess, size) {
Afl.jsApiAddExcludeRange(addressess, size);
}
/**
* This is equivalent to setting a value in `AFL_FRIDA_INST_RANGES`,
* it takes as arguments a `NativePointer` and a `number`. It can be
* called multiple times to include several ranges.
*/
public static addIncludedRange(addressess: NativePointer, size: number): void {
Afl.jsApiAddIncludeRange(addressess, size);
static addIncludedRange(addressess, size) {
Afl.jsApiAddIncludeRange(addressess, size);
}
/**
* This must always be called at the end of your script. This lets
* FRIDA mode know that your configuration is finished and that
* execution has reached the end of your script. Failure to call
* this will result in a fatal error.
*/
public static done(): void {
Afl.jsApiDone();
static done() {
Afl.jsApiDone();
}
/**
* This function can be called within your script to cause FRIDA
* mode to trigger a fatal error. This is useful if for example you
@ -662,49 +751,48 @@ class Afl {
* stop. The user will need to enable `AFL_DEBUG_CHILD=1` to view
* this error message.
*/
public static error(msg: string): void {
const buf = Memory.allocUtf8String(msg);
Afl.jsApiError(buf);
static error(msg) {
const buf = Memory.allocUtf8String(msg);
Afl.jsApiError(buf);
}
/**
* Function used to provide access to `__afl_fuzz_ptr`, which contains the length of
* fuzzing data when using in-memory test case fuzzing.
*/
public static getAflFuzzLen(): NativePointer {
return Afl.jsApiGetSymbol("__afl_fuzz_len");
static getAflFuzzLen() {
return Afl.jsApiGetSymbol("__afl_fuzz_len");
}
/**
* Function used to provide access to `__afl_fuzz_ptr`, which contains the fuzzing
* data when using in-memory test case fuzzing.
*/
public static getAflFuzzPtr(): NativePointer {
return Afl.jsApiGetSymbol("__afl_fuzz_ptr");
static getAflFuzzPtr() {
return Afl.jsApiGetSymbol("__afl_fuzz_ptr");
}
/**
* Print a message to the STDOUT. This should be preferred to
* FRIDA's `console.log` since FRIDA will queue it's log messages.
* If `console.log` is used in a callback in particular, then there
* may no longer be a thread running to service this queue.
*/
public static print(msg: string): void {
const STDOUT_FILENO = 2;
const log = `${msg}\n`;
const buf = Memory.allocUtf8String(log);
Afl.jsApiWrite(STDOUT_FILENO, buf, log.length);
static print(msg) {
const STDOUT_FILENO = 2;
const log = `${msg}\n`;
const buf = Memory.allocUtf8String(log);
Afl.jsApiWrite(STDOUT_FILENO, buf, log.length);
}
/**
* See `AFL_FRIDA_INST_NO_BACKPATCH`.
*/
static setBackpatchDisable() {
Afl.jsApiSetBackpatchDisable();
}
/**
* See `AFL_FRIDA_DEBUG_MAPS`.
*/
public static setDebugMaps(): void {
Afl.jsApiSetDebugMaps();
static setDebugMaps() {
Afl.jsApiSetDebugMaps();
}
/**
* This has the same effect as setting `AFL_ENTRYPOINT`, but has the
* convenience of allowing you to use FRIDAs APIs to determine the
@ -713,143 +801,198 @@ class Afl {
* function should be called with a `NativePointer` as its
* argument.
*/
public static setEntryPoint(address: NativePointer): void {
Afl.jsApiSetEntryPoint(address);
static setEntryPoint(address) {
Afl.jsApiSetEntryPoint(address);
}
/**
* Function used to enable in-memory test cases for fuzzing.
*/
public static setInMemoryFuzzing(): void {
Afl.jsApiAflSharedMemFuzzing.writeInt(1);
static setInMemoryFuzzing() {
Afl.jsApiAflSharedMemFuzzing.writeInt(1);
}
/**
* See `AFL_FRIDA_INST_COVERAGE_FILE`. This function takes a single `string`
* as an argument.
*/
static setInstrumentCoverageFile(file) {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetInstrumentCoverageFile(buf);
}
/**
* See `AFL_FRIDA_INST_DEBUG_FILE`. This function takes a single `string` as
* an argument.
*/
public static setInstrumentDebugFile(file: string): void {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetInstrumentDebugFile(buf);
static setInstrumentDebugFile(file) {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetInstrumentDebugFile(buf);
}
/**
* See `AFL_FRIDA_INST_TRACE`.
*/
public static setInstrumentEnableTracing(): void {
Afl.jsApiSetInstrumentTrace();
static setInstrumentEnableTracing() {
Afl.jsApiSetInstrumentTrace();
}
/**
* See `AFL_FRIDA_INST_JIT`.
*/
static setInstrumentJit() {
Afl.jsApiSetInstrumentJit();
}
/**
* See `AFL_INST_LIBS`.
*/
public static setInstrumentLibraries(): void {
Afl.jsApiSetInstrumentLibraries();
static setInstrumentLibraries() {
Afl.jsApiSetInstrumentLibraries();
}
/**
* See `AFL_FRIDA_INST_NO_OPTIMIZE`
*/
public static setInstrumentNoOptimize(): void {
Afl.jsApiSetInstrumentNoOptimize();
static setInstrumentNoOptimize() {
Afl.jsApiSetInstrumentNoOptimize();
}
/*
* See `AFL_FRIDA_INST_SEED`
*/
static setInstrumentSeed(seed) {
Afl.jsApiSetInstrumentSeed(seed);
}
/**
* See `AFL_FRIDA_INST_TRACE_UNIQUE`.
*/
public static setInstrumentTracingUnique(): void {
Afl.jsApiSetInstrumentTraceUnique();
static setInstrumentTracingUnique() {
Afl.jsApiSetInstrumentTraceUnique();
}
/**
* See `AFL_FRIDA_INST_UNSTABLE_COVERAGE_FILE`. This function takes a single
* `string` as an argument.
*/
static setInstrumentUnstableCoverageFile(file) {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetInstrumentUnstableCoverageFile(buf);
}
/*
* Set a callback to be called in place of the usual `main` function. This see
* `Scripting.md` for details.
*/
static setJsMainHook(address) {
Afl.jsApiSetJsMainHook(address);
}
/**
* This is equivalent to setting `AFL_FRIDA_PERSISTENT_ADDR`, again a
* `NativePointer` should be provided as it's argument.
*/
public static setPersistentAddress(address: NativePointer): void {
Afl.jsApiSetPersistentAddress(address);
static setPersistentAddress(address) {
Afl.jsApiSetPersistentAddress(address);
}
/**
* This is equivalent to setting `AFL_FRIDA_PERSISTENT_CNT`, a
* `number` should be provided as it's argument.
*/
public static setPersistentCount(count: number): void {
Afl.jsApiSetPersistentCount(count);
static setPersistentCount(count) {
Afl.jsApiSetPersistentCount(count);
}
/**
* See `AFL_FRIDA_PERSISTENT_DEBUG`.
*/
public static setPersistentDebug(): void {
Afl.jsApiSetPersistentDebug();
static setPersistentDebug() {
Afl.jsApiSetPersistentDebug();
}
/**
* See `AFL_FRIDA_PERSISTENT_ADDR`. This function takes a NativePointer as an
* argument. See above for examples of use.
*/
public static setPersistentHook(address: NativePointer): void {
Afl.jsApiSetPersistentHook(address);
static setPersistentHook(address) {
Afl.jsApiSetPersistentHook(address);
}
/**
* This is equivalent to setting `AFL_FRIDA_PERSISTENT_RET`, again a
* `NativePointer` should be provided as it's argument.
*/
public static setPersistentReturn(address: NativePointer): void {
Afl.jsApiSetPersistentReturn(address);
static setPersistentReturn(address) {
Afl.jsApiSetPersistentReturn(address);
}
/**
* See `AFL_FRIDA_INST_NO_PREFETCH_BACKPATCH`.
*/
static setPrefetchBackpatchDisable() {
Afl.jsApiSetPrefetchBackpatchDisable();
}
/**
* See `AFL_FRIDA_INST_NO_PREFETCH`.
*/
public static setPrefetchDisable(): void {
Afl.jsApiSetPrefetchDisable();
static setPrefetchDisable() {
Afl.jsApiSetPrefetchDisable();
}
/*
* Set a function to be called for each instruction which is instrumented
* by AFL FRIDA mode.
/**
* See `AFL_FRIDA_SECCOMP_FILE`. This function takes a single `string` as
* an argument.
*/
public static setStalkerCallback(callback: NativePointer): void {
Afl.jsApiSetStalkerCallback(callback);
static setSeccompFile(file) {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetSeccompFile(buf);
}
/**
* See `AFL_FRIDA_STALKER_ADJACENT_BLOCKS`.
*/
static setStalkerAdjacentBlocks(val) {
Afl.jsApiSetStalkerAdjacentBlocks(val);
}
/*
* Set a function to be called for each instruction which is instrumented
* by AFL FRIDA mode.
*/
static setStalkerCallback(callback) {
Afl.jsApiSetStalkerCallback(callback);
}
/**
* See `AFL_FRIDA_STALKER_IC_ENTRIES`.
*/
static setStalkerIcEntries(val) {
Afl.jsApiSetStalkerIcEntries(val);
}
/**
* See `AFL_FRIDA_STATS_FILE`. This function takes a single `string` as
* an argument.
*/
public static setStatsFile(file: string): void {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetStatsFile(buf);
static setStatsFile(file) {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetStatsFile(buf);
}
/**
* See `AFL_FRIDA_STATS_INTERVAL`. This function takes a `number` as an
* argument
*/
public static setStatsInterval(interval: number): void {
Afl.jsApiSetStatsInterval(interval);
static setStatsInterval(interval) {
Afl.jsApiSetStatsInterval(interval);
}
/**
* See `AFL_FRIDA_OUTPUT_STDERR`. This function takes a single `string` as
* an argument.
*/
public static setStdErr(file: string): void {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetStdErr(buf);
static setStdErr(file) {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetStdErr(buf);
}
/**
* See `AFL_FRIDA_OUTPUT_STDOUT`. This function takes a single `string` as
* an argument.
*/
public static setStdOut(file: string): void {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetStdOut(buf);
static setStdOut(file) {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetStdOut(buf);
}
/**
* See `AFL_FRIDA_TRACEABLE`.
*/
static setTraceable() {
Afl.jsApiSetTraceable();
}
static jsApiGetFunction(name, retType, argTypes) {
const addr = Afl.module.getExportByName(name);
return new NativeFunction(addr, retType, argTypes);
}
static jsApiGetSymbol(name) {
return Afl.module.getExportByName(name);
}
}
```
```

View File

@ -8,6 +8,7 @@
js_api_add_include_range;
js_api_done;
js_api_error;
js_api_set_backpatch_disable;
js_api_set_debug_maps;
js_api_set_entrypoint;
js_api_set_instrument_coverage_file;
@ -19,6 +20,7 @@
js_api_set_instrument_trace;
js_api_set_instrument_trace_unique;
js_api_set_instrument_unstable_coverage_file;
js_api_set_js_main_hook;
js_api_set_persistent_address;
js_api_set_persistent_count;
js_api_set_persistent_debug;
@ -28,11 +30,13 @@
js_api_set_prefetch_disable;
js_api_set_seccomp_file;
js_api_set_stalker_callback;
js_api_set_stalker_adjacent_blocks;
js_api_set_stalker_ic_entries;
js_api_set_stats_file;
js_api_set_stats_interval;
js_api_set_stderr;
js_api_set_stdout;
js_api_set_traceable;
local:
*;

View File

@ -9,6 +9,7 @@ void asan_config(void);
void asan_init(void);
void asan_arch_init(void);
void asan_instrument(const cs_insn *instr, GumStalkerIterator *iterator);
void asan_exclude_module_by_symbol(gchar *symbol_name);
#endif

View File

@ -4,6 +4,7 @@
#include "frida-gumjs.h"
extern guint64 entry_point;
extern gboolean traceable;
extern gboolean entry_compiled;
extern gboolean entry_run;
@ -15,5 +16,7 @@ void entry_start(void);
void entry_prologue(GumStalkerIterator *iterator, GumStalkerOutput *output);
void entry_on_fork(void);
#endif

View File

@ -29,13 +29,15 @@ GumStalkerTransformer *instrument_get_transformer(void);
/* Functions to be implemented by the different architectures */
gboolean instrument_is_coverage_optimize_supported(void);
void instrument_coverage_optimize_init(void);
void instrument_coverage_optimize(const cs_insn * instr,
GumStalkerOutput *output);
void instrument_debug_config(void);
void instrument_debug_init(void);
void instrument_debug_start(uint64_t address, GumStalkerOutput *output);
void instrument_debug_instruction(uint64_t address, uint16_t size);
void instrument_debug_instruction(uint64_t address, uint16_t size,
GumStalkerOutput *output);
void instrument_debug_end(GumStalkerOutput *output);
void instrument_flush(GumStalkerOutput *output);
gpointer instrument_cur(GumStalkerOutput *output);

View File

@ -7,11 +7,14 @@ typedef gboolean (*js_api_stalker_callback_t)(const cs_insn *insn,
gboolean begin, gboolean excluded,
GumStalkerOutput *output);
typedef int (*js_main_hook_t)(int argc, char **argv, char **envp);
extern unsigned char api_js[];
extern unsigned int api_js_len;
extern gboolean js_done;
extern js_api_stalker_callback_t js_user_callback;
extern js_main_hook_t js_main_hook;
/* Frida Mode */

View File

@ -10,6 +10,8 @@ extern gboolean ranges_inst_jit;
void ranges_config(void);
void ranges_init(void);
void ranges_print_debug_maps(void);
gboolean range_is_excluded(GumAddress address);
void ranges_exclude();

View File

@ -1,15 +1,95 @@
#ifndef _SECCOMP_H
#define _SECCOMP_H
#include <linux/seccomp.h>
#ifndef __APPLE__
#include "frida-gumjs.h"
#include <stdint.h>
#include <linux/filter.h>
#define SECCOMP_SOCKET_SEND_FD 0x1D3
#define SECCOMP_SOCKET_RECV_FD 0x1D4
#include "frida-gumjs.h"
#define SECCOMP_OUTPUT_FILE_FD 0x1D5
#define SECCOMP_PARENT_EVENT_FD 0x1D6
/******************************************************************************/
#define PR_SET_NO_NEW_PRIVS 38
#define SECCOMP_SET_MODE_STRICT 0
#define SECCOMP_SET_MODE_FILTER 1
#define SECCOMP_GET_ACTION_AVAIL 2
#define SECCOMP_GET_NOTIF_SIZES 3
#define SECCOMP_IOC_MAGIC '!'
#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr)
#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type)
#define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type)
#define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type)
/* Flags for seccomp notification fd ioctl. */
#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, struct seccomp_notif_resp)
#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
#define SECCOMP_RET_ALLOW 0x7fff0000U
#define SECCOMP_RET_USER_NOTIF 0x7fc00000U
#define SYS_seccomp __NR_seccomp
#ifndef __NR_seccomp
#if defined(__arm__)
#define __NR_seccomp 383
#elif defined(__aarch64__)
#define __NR_seccomp 277
#elif defined(__x86_64__)
#define __NR_seccomp 317
#elif defined(__i386__)
#define __NR_seccomp 354
#else
#pragma error "Unsupported architecture"
#endif
#endif
#define SECCOMP_USER_NOTIF_FLAG_CONTINUE (1UL << 0)
struct seccomp_notif_resp {
__u64 id;
__s64 val;
__s32 error;
__u32 flags;
};
struct seccomp_data {
int nr;
__u32 arch;
__u64 instruction_pointer;
__u64 args[6];
};
struct seccomp_notif {
__u64 id;
__u32 pid;
__u32 flags;
struct seccomp_data data;
};
struct seccomp_notif_sizes {
__u16 seccomp_notif;
__u16 seccomp_notif_resp;
__u16 seccomp_data;
};
/******************************************************************************/
#define SECCOMP_SOCKET_SEND_FD 0x1D3
#define SECCOMP_SOCKET_RECV_FD 0x1D4
#define SECCOMP_OUTPUT_FILE_FD 0x1D5
#define SECCOMP_PARENT_EVENT_FD 0x1D6
enum {
@ -319,23 +399,19 @@ enum {
};
extern char *seccomp_filename;
typedef void (*seccomp_child_func_t)(int event_fd, void *ctx);
typedef void (*seccomp_filter_callback_t)(struct seccomp_notif * req,
struct seccomp_notif_resp *resp,
GumReturnAddressArray * frames);
void seccomp_config(void);
void seccomp_init(void);
void seccomp_on_fork(void);
void seccomp_print(char *format, ...);
void seccomp_atomic_set(volatile bool *ptr, bool val);
bool seccomp_atomic_try_set(volatile bool *ptr, bool val);
void seccomp_atomic_wait(volatile bool *ptr, bool val);
void seccomp_callback_parent(void);
void seccomp_callback_initialize(void);
void seccomp_child_run(seccomp_child_func_t child_func, void *ctx, pid_t *child,
int *event_fd);
void seccomp_child_wait(int event_fd);
@ -349,6 +425,8 @@ int seccomp_filter_install(pid_t child);
void seccomp_filter_child_install(void);
void seccomp_filter_run(int fd, seccomp_filter_callback_t callback);
void seccomp_print(char *format, ...);
void seccomp_socket_create(int *sock);
void seccomp_socket_send(int sockfd, int fd);
int seccomp_socket_recv(int sockfd);
@ -356,4 +434,11 @@ int seccomp_socket_recv(int sockfd);
char *seccomp_syscall_lookup(int id);
#endif
extern char *seccomp_filename;
void seccomp_config(void);
void seccomp_init(void);
void seccomp_on_fork(void);
#endif

View File

@ -3,7 +3,9 @@
#include "frida-gumjs.h"
extern guint stalker_ic_entries;
extern guint stalker_ic_entries;
extern gboolean backpatch_enable;
extern guint stalker_adjacent_blocks;
void stalker_config(void);
void stalker_init(void);

View File

@ -3,12 +3,39 @@
#include "frida-gumjs.h"
#include "debug.h"
#define UNUSED_PARAMETER(x) (void)(x)
#define IGNORED_RETURN(x) (void)!(x)
guint64 util_read_address(char *key);
guint64 util_read_address(char *key, guint64 default_value);
guint64 util_read_num(char *key, guint64 default_value);
gboolean util_output_enabled(void);
gsize util_rotate(gsize val, gsize shift, gsize size);
gsize util_log2(gsize val);
guint64 util_read_num(char *key);
#define FOKF(x...) \
do { \
\
if (!util_output_enabled()) { break; } \
\
OKF(x); \
\
} while (0)
#define FWARNF(x...) \
do { \
\
WARNF(x); \
\
} while (0)
#define FFATAL(x...) \
do { \
\
FATAL(x); \
\
} while (0)
#endif

View File

@ -1,10 +1,6 @@
FROM fridadotre/manylinux-x86_64
COPY realpath /bin/realpath
RUN chmod +x /bin/realpath
RUN yum -y install xz
RUN yum -y install vim-common
WORKDIR /AFLplusplus
ENV CFLAGS="\

View File

@ -2,17 +2,22 @@ PWD:=$(shell pwd)/
ROOT:=$(PWD)../../
BUILD_DIR:=$(PWD)build/
.PHONY: all clean shell
.PHONY: all build docker clean shell
all:
docker build --tag many-afl-frida .
all: docker
docker run --rm \
-v $(ROOT):/AFLplusplus \
many-afl-frida \
make -C /AFLplusplus/frida_mode clean all
$(BUILD_DIR):
mkdir -p $@
build:
docker run --rm \
-v $(ROOT):/AFLplusplus \
many-afl-frida \
make -C /AFLplusplus/frida_mode
docker:
docker build --tag many-afl-frida .
clean:
docker images --filter 'dangling=true' -q --no-trunc | xargs -L1 docker rmi --force

View File

@ -1,2 +0,0 @@
#!/bin/sh
readlink -f -- "$@"

View File

@ -1,8 +1,8 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "asan.h"
#include "ranges.h"
#include "util.h"
static gboolean asan_enabled = FALSE;
gboolean asan_initialized = FALSE;
@ -11,12 +11,12 @@ void asan_config(void) {
if (getenv("AFL_USE_FASAN") != NULL) {
OKF("Frida ASAN mode enabled");
FOKF("Frida ASAN mode enabled");
asan_enabled = TRUE;
} else {
OKF("Frida ASAN mode disabled");
FOKF("Frida ASAN mode disabled");
}
@ -33,3 +33,23 @@ void asan_init(void) {
}
static gboolean asan_exclude_module(const GumModuleDetails *details,
gpointer user_data) {
gchar * symbol_name = (gchar *)user_data;
GumAddress address;
address = gum_module_find_export_by_name(details->name, symbol_name);
if (address == 0) { return TRUE; }
ranges_add_exclude((GumMemoryRange *)details->range);
return FALSE;
}
void asan_exclude_module_by_symbol(gchar *symbol_name) {
gum_process_enumerate_modules(asan_exclude_module, symbol_name);
}

View File

@ -1,7 +1,5 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "asan.h"
#include "util.h"
@ -12,7 +10,7 @@ void asan_instrument(const cs_insn *instr, GumStalkerIterator *iterator) {
UNUSED_PARAMETER(iterator);
if (asan_initialized) {
FATAL("ASAN mode not supported on this architecture");
FFATAL("ASAN mode not supported on this architecture");
}
@ -20,7 +18,7 @@ void asan_instrument(const cs_insn *instr, GumStalkerIterator *iterator) {
void asan_arch_init(void) {
FATAL("ASAN mode not supported on this architecture");
FFATAL("ASAN mode not supported on this architecture");
}

View File

@ -1,8 +1,6 @@
#include <dlfcn.h>
#include "frida-gumjs.h"
#include "debug.h"
#include "asan.h"
#include "ctx.h"
#include "util.h"
@ -86,10 +84,12 @@ void asan_arch_init(void) {
asan_storeN = (asan_loadN_t)dlsym(RTLD_DEFAULT, "__asan_storeN");
if (asan_loadN == NULL || asan_storeN == NULL) {
FATAL("Frida ASAN failed to find '__asan_loadN' or '__asan_storeN'");
FFATAL("Frida ASAN failed to find '__asan_loadN' or '__asan_storeN'");
}
asan_exclude_module_by_symbol("__asan_loadN");
}
#endif

View File

@ -1,8 +1,6 @@
#include <dlfcn.h>
#include "frida-gumjs.h"
#include "debug.h"
#include "asan.h"
#include "ctx.h"
#include "util.h"
@ -83,10 +81,12 @@ void asan_arch_init(void) {
asan_storeN = (asan_loadN_t)dlsym(RTLD_DEFAULT, "__asan_storeN");
if (asan_loadN == NULL || asan_storeN == NULL) {
FATAL("Frida ASAN failed to find '__asan_loadN' or '__asan_storeN'");
FFATAL("Frida ASAN failed to find '__asan_loadN' or '__asan_storeN'");
}
asan_exclude_module_by_symbol("__asan_loadN");
}
#endif

View File

@ -1,8 +1,6 @@
#include <dlfcn.h>
#include "frida-gumjs.h"
#include "debug.h"
#include "asan.h"
#include "ctx.h"
#include "util.h"
@ -83,10 +81,12 @@ void asan_arch_init(void) {
asan_storeN = (asan_loadN_t)dlsym(RTLD_DEFAULT, "__asan_storeN");
if (asan_loadN == NULL || asan_storeN == NULL) {
FATAL("Frida ASAN failed to find '__asan_loadN' or '__asan_storeN'");
FFATAL("Frida ASAN failed to find '__asan_loadN' or '__asan_storeN'");
}
asan_exclude_module_by_symbol("__asan_loadN");
}
#endif

View File

@ -7,8 +7,6 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "util.h"
#define DEFAULT_MMAP_MIN_ADDR (32UL << 10)
@ -35,14 +33,28 @@ static gboolean cmplog_range(const GumRangeDetails *details,
static gint cmplog_sort(gconstpointer a, gconstpointer b) {
return ((GumMemoryRange *)b)->base_address -
((GumMemoryRange *)a)->base_address;
GumMemoryRange *ra = (GumMemoryRange *)a;
GumMemoryRange *rb = (GumMemoryRange *)b;
if (ra->base_address < rb->base_address) {
return -1;
} else if (ra->base_address > rb->base_address) {
return 1;
} else {
return 0;
}
}
static void cmplog_get_ranges(void) {
OKF("CMPLOG - Collecting ranges");
FOKF("CMPLOG - Collecting ranges");
cmplog_ranges = g_array_sized_new(false, false, sizeof(GumMemoryRange), 100);
gum_process_enumerate_ranges(GUM_PAGE_READ, cmplog_range, cmplog_ranges);
@ -56,7 +68,7 @@ void cmplog_config(void) {
void cmplog_init(void) {
OKF("CMPLOG - Enabled [%c]", __afl_cmp_map == NULL ? ' ' : 'X');
FOKF("CMPLOG - Enabled [%c]", __afl_cmp_map == NULL ? ' ' : 'X');
if (__afl_cmp_map == NULL) { return; }
@ -65,9 +77,9 @@ void cmplog_init(void) {
for (guint i = 0; i < cmplog_ranges->len; i++) {
GumMemoryRange *range = &g_array_index(cmplog_ranges, GumMemoryRange, i);
OKF("CMPLOG Range - %3u: 0x%016" G_GINT64_MODIFIER
"X - 0x%016" G_GINT64_MODIFIER "X",
i, range->base_address, range->base_address + range->size);
FOKF("CMPLOG Range - %3u: 0x%016" G_GINT64_MODIFIER
"X - 0x%016" G_GINT64_MODIFIER "X",
i, range->base_address, range->base_address + range->size);
}
@ -78,14 +90,14 @@ void cmplog_init(void) {
hash_yes = g_hash_table_new(g_direct_hash, g_direct_equal);
if (hash_yes == NULL) {
FATAL("Failed to g_hash_table_new, errno: %d", errno);
FFATAL("Failed to g_hash_table_new, errno: %d", errno);
}
hash_no = g_hash_table_new(g_direct_hash, g_direct_equal);
if (hash_no == NULL) {
FATAL("Failed to g_hash_table_new, errno: %d", errno);
FFATAL("Failed to g_hash_table_new, errno: %d", errno);
}
@ -117,7 +129,7 @@ gboolean cmplog_test_addr(guint64 addr, size_t size) {
if (!g_hash_table_add(hash_no, GSIZE_TO_POINTER(addr))) {
FATAL("Failed - g_hash_table_add");
FFATAL("Failed - g_hash_table_add");
}
@ -127,7 +139,7 @@ gboolean cmplog_test_addr(guint64 addr, size_t size) {
if (!g_hash_table_add(hash_yes, GSIZE_TO_POINTER(addr))) {
FATAL("Failed - g_hash_table_add");
FFATAL("Failed - g_hash_table_add");
}
@ -139,7 +151,7 @@ gboolean cmplog_test_addr(guint64 addr, size_t size) {
gboolean cmplog_is_readable(guint64 addr, size_t size) {
if (cmplog_ranges == NULL) FATAL("CMPLOG not initialized");
if (cmplog_ranges == NULL) FFATAL("CMPLOG not initialized");
/*
* The Linux kernel prevents mmap from allocating from the very bottom of the

View File

@ -1,7 +1,5 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "frida_cmplog.h"
#include "util.h"
@ -11,7 +9,7 @@ void cmplog_instrument(const cs_insn *instr, GumStalkerIterator *iterator) {
UNUSED_PARAMETER(instr);
UNUSED_PARAMETER(iterator);
if (__afl_cmp_map == NULL) { return; }
FATAL("CMPLOG mode not supported on this architecture");
FFATAL("CMPLOG mode not supported on this architecture");
}

View File

@ -5,6 +5,7 @@
#include "ctx.h"
#include "frida_cmplog.h"
#include "instrument.h"
#include "util.h"
#if defined(__aarch64__)
@ -66,7 +67,7 @@ static gboolean cmplog_read_mem(GumCpuContext *ctx, uint8_t size,
*val = *((guint64 *)GSIZE_TO_POINTER(address));
return TRUE;
default:
FATAL("Invalid operand size: %d\n", size);
FFATAL("Invalid operand size: %d\n", size);
}
@ -88,7 +89,7 @@ static gboolean cmplog_get_operand_value(GumCpuContext *context,
case ARM64_OP_MEM:
return cmplog_read_mem(context, ctx->size, &ctx->mem, val);
default:
FATAL("Invalid operand type: %d\n", ctx->type);
FFATAL("Invalid operand type: %d\n", ctx->type);
}
@ -104,30 +105,45 @@ static void cmplog_call_callout(GumCpuContext *context, gpointer user_data) {
gsize x0 = ctx_read_reg(context, ARM64_REG_X0);
gsize x1 = ctx_read_reg(context, ARM64_REG_X1);
if (((G_MAXULONG - x0) < 32) || ((G_MAXULONG - x1) < 32)) return;
if (((G_MAXULONG - x0) < 31) || ((G_MAXULONG - x1) < 31)) return;
if (!cmplog_is_readable(x0, 32) || !cmplog_is_readable(x1, 32)) return;
if (!cmplog_is_readable(x0, 31) || !cmplog_is_readable(x1, 31)) return;
void *ptr1 = GSIZE_TO_POINTER(x0);
void *ptr2 = GSIZE_TO_POINTER(x1);
uintptr_t k = address;
guint64 k = instrument_get_offset_hash(GUM_ADDRESS(address));
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
__afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
__afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
__afl_cmp_map->headers[k].hits = 0;
}
u32 hits = 0;
if (__afl_cmp_map->headers[k].hits == 0) {
__afl_cmp_map->headers[k].shape = 30;
} else {
hits = __afl_cmp_map->headers[k].hits;
}
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = 31;
__afl_cmp_map->headers[k].shape = 30;
hits &= CMP_MAP_RTN_H - 1;
((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v0_len = 31;
((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v1_len = 31;
gum_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v0, ptr1,
32);
31);
gum_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v1, ptr2,
32);
31);
}
@ -147,7 +163,7 @@ static void cmplog_instrument_put_operand(cmplog_ctx_t *ctx,
gum_memcpy(&ctx->mem, &operand->mem, sizeof(arm64_op_mem));
break;
default:
FATAL("Invalid operand type: %d\n", operand->type);
FFATAL("Invalid operand type: %d\n", operand->type);
}
@ -193,13 +209,24 @@ static void cmplog_handle_cmp_sub(GumCpuContext *context, gsize operand1,
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS)
__afl_cmp_map->headers[k].hits = 0;
u32 hits = 0;
if (__afl_cmp_map->headers[k].hits == 0) {
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
__afl_cmp_map->headers[k].shape = (size - 1);
} else {
hits = __afl_cmp_map->headers[k].hits;
}
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = (size - 1);
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = operand1;
__afl_cmp_map->log[k][hits].v1 = operand2;

View File

@ -5,6 +5,7 @@
#include "ctx.h"
#include "frida_cmplog.h"
#include "instrument.h"
#include "util.h"
#if defined(__x86_64__)
@ -61,7 +62,7 @@ static gboolean cmplog_read_mem(GumCpuContext *ctx, uint8_t size,
*val = *((guint64 *)GSIZE_TO_POINTER(address));
return TRUE;
default:
FATAL("Invalid operand size: %d\n", size);
FFATAL("Invalid operand size: %d\n", size);
}
@ -83,7 +84,7 @@ static gboolean cmplog_get_operand_value(GumCpuContext *context,
case X86_OP_MEM:
return cmplog_read_mem(context, ctx->size, &ctx->mem, val);
default:
FATAL("Invalid operand type: %d\n", ctx->type);
FFATAL("Invalid operand type: %d\n", ctx->type);
}
@ -99,30 +100,43 @@ static void cmplog_call_callout(GumCpuContext *context, gpointer user_data) {
gsize rdi = ctx_read_reg(context, X86_REG_RDI);
gsize rsi = ctx_read_reg(context, X86_REG_RSI);
if (((G_MAXULONG - rdi) < 32) || ((G_MAXULONG - rsi) < 32)) return;
if (((G_MAXULONG - rdi) < 31) || ((G_MAXULONG - rsi) < 31)) return;
if (!cmplog_is_readable(rdi, 32) || !cmplog_is_readable(rsi, 32)) return;
if (!cmplog_is_readable(rdi, 31) || !cmplog_is_readable(rsi, 31)) return;
void *ptr1 = GSIZE_TO_POINTER(rdi);
void *ptr2 = GSIZE_TO_POINTER(rsi);
uintptr_t k = address;
guint64 k = instrument_get_offset_hash(GUM_ADDRESS(address));
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
__afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
__afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
__afl_cmp_map->headers[k].hits = 0;
}
u32 hits = 0;
if (__afl_cmp_map->headers[k].hits == 0) {
__afl_cmp_map->headers[k].shape = 30;
} else {
hits = __afl_cmp_map->headers[k].hits;
}
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = 31;
hits &= CMP_MAP_RTN_H - 1;
((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v0_len = 31;
((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v1_len = 31;
gum_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v0, ptr1,
32);
31);
gum_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v1, ptr2,
32);
31);
}
@ -143,7 +157,7 @@ static void cmplog_instrument_put_operand(cmplog_ctx_t *ctx,
gum_memcpy(&ctx->mem, &operand->mem, sizeof(x86_op_mem));
break;
default:
FATAL("Invalid operand type: %d\n", operand->type);
FFATAL("Invalid operand type: %d\n", operand->type);
}
@ -179,13 +193,23 @@ static void cmplog_handle_cmp_sub(GumCpuContext *context, gsize operand1,
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 7;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS)
__afl_cmp_map->headers[k].hits = 0;
u32 hits = 0;
if (__afl_cmp_map->headers[k].hits == 0) {
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
__afl_cmp_map->headers[k].shape = (size - 1);
} else {
hits = __afl_cmp_map->headers[k].hits;
}
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = (size - 1);
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = operand1;
__afl_cmp_map->log[k][hits].v1 = operand2;

View File

@ -5,6 +5,7 @@
#include "ctx.h"
#include "frida_cmplog.h"
#include "instrument.h"
#include "util.h"
#if defined(__i386__)
@ -58,7 +59,7 @@ static gboolean cmplog_read_mem(GumCpuContext *ctx, uint8_t size,
*val = *((guint32 *)GSIZE_TO_POINTER(address));
return TRUE;
default:
FATAL("Invalid operand size: %d\n", size);
FFATAL("Invalid operand size: %d\n", size);
}
@ -80,7 +81,7 @@ static gboolean cmplog_get_operand_value(GumCpuContext *context,
case X86_OP_MEM:
return cmplog_read_mem(context, ctx->size, &ctx->mem, val);
default:
FATAL("Invalid operand type: %d\n", ctx->type);
FFATAL("Invalid operand type: %d\n", ctx->type);
}
@ -104,30 +105,43 @@ static void cmplog_call_callout(GumCpuContext *context, gpointer user_data) {
gsize arg1 = esp[0];
gsize arg2 = esp[1];
if (((G_MAXULONG - arg1) < 32) || ((G_MAXULONG - arg2) < 32)) return;
if (((G_MAXULONG - arg1) < 31) || ((G_MAXULONG - arg2) < 31)) return;
if (!cmplog_is_readable(arg1, 32) || !cmplog_is_readable(arg2, 32)) return;
if (!cmplog_is_readable(arg1, 31) || !cmplog_is_readable(arg2, 31)) return;
void *ptr1 = GSIZE_TO_POINTER(arg1);
void *ptr2 = GSIZE_TO_POINTER(arg2);
uintptr_t k = address;
guint64 k = instrument_get_offset_hash(GUM_ADDRESS(address));
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
__afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
__afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
__afl_cmp_map->headers[k].hits = 0;
}
u32 hits = 0;
if (__afl_cmp_map->headers[k].hits == 0) {
__afl_cmp_map->headers[k].shape = 30;
} else {
hits = __afl_cmp_map->headers[k].hits;
}
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = 31;
hits &= CMP_MAP_RTN_H - 1;
((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v0_len = 31;
((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v1_len = 31;
gum_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v0, ptr1,
32);
31);
gum_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v1, ptr2,
32);
31);
}
@ -148,7 +162,7 @@ static void cmplog_instrument_put_operand(cmplog_ctx_t *ctx,
gum_memcpy(&ctx->mem, &operand->mem, sizeof(x86_op_mem));
break;
default:
FATAL("Invalid operand type: %d\n", operand->type);
FFATAL("Invalid operand type: %d\n", operand->type);
}
@ -184,13 +198,24 @@ static void cmplog_handle_cmp_sub(GumCpuContext *context, gsize operand1,
k = (k >> 4) ^ (k << 8);
k &= CMP_MAP_W - 1;
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS)
__afl_cmp_map->headers[k].hits = 0;
u32 hits = 0;
if (__afl_cmp_map->headers[k].hits == 0) {
__afl_cmp_map->headers[k].type = CMP_TYPE_INS;
__afl_cmp_map->headers[k].shape = (size - 1);
} else {
hits = __afl_cmp_map->headers[k].hits;
}
u32 hits = __afl_cmp_map->headers[k].hits;
__afl_cmp_map->headers[k].hits = hits + 1;
__afl_cmp_map->headers[k].shape = (size - 1);
hits &= CMP_MAP_H - 1;
__afl_cmp_map->log[k][hits].v0 = operand1;
__afl_cmp_map->log[k][hits].v1 = operand2;
@ -203,7 +228,7 @@ static void cmplog_cmp_sub_callout(GumCpuContext *context, gpointer user_data) {
gsize operand1;
gsize operand2;
if (ctx->operand1.size != ctx->operand2.size) FATAL("Operand size mismatch");
if (ctx->operand1.size != ctx->operand2.size) FFATAL("Operand size mismatch");
if (!cmplog_get_operand_value(context, &ctx->operand1, &operand1)) { return; }
if (!cmplog_get_operand_value(context, &ctx->operand2, &operand2)) { return; }

View File

@ -1,14 +1,13 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "ctx.h"
#include "util.h"
#if defined(__arm__)
gsize ctx_read_reg(GumArmCpuContext *ctx, arm_reg reg) {
FATAL("ctx_read_reg unimplemented for this architecture");
FFATAL("ctx_read_reg unimplemented for this architecture");
}

View File

@ -1,8 +1,7 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "ctx.h"
#include "util.h"
#if defined(__aarch64__)
@ -174,7 +173,7 @@ gsize ctx_read_reg(GumArm64CpuContext *ctx, arm64_reg reg) {
ARM64_REG_64(ARM64_REG_SP, ctx->sp)
default:
FATAL("Failed to read register: %d", reg);
FFATAL("Failed to read register: %d", reg);
return 0;
}
@ -206,7 +205,7 @@ size_t ctx_get_size(const cs_insn *instr, cs_arm64_op *operand) {
}
mnemonic_len = strlen(instr->mnemonic);
if (mnemonic_len == 0) { FATAL("No mnemonic found"); };
if (mnemonic_len == 0) { FFATAL("No mnemonic found"); };
char last = instr->mnemonic[mnemonic_len - 1];
switch (last) {
@ -252,14 +251,14 @@ size_t ctx_get_size(const cs_insn *instr, cs_arm64_op *operand) {
if (mnemonic_len < 3) {
FATAL("VAS Mnemonic too short: %s\n", instr->mnemonic);
FFATAL("VAS Mnemonic too short: %s\n", instr->mnemonic);
}
vas_digit = instr->mnemonic[2];
if (vas_digit < '0' || vas_digit > '9') {
FATAL("VAS Mnemonic digit out of range: %s\n", instr->mnemonic);
FFATAL("VAS Mnemonic digit out of range: %s\n", instr->mnemonic);
}
@ -293,7 +292,7 @@ size_t ctx_get_size(const cs_insn *instr, cs_arm64_op *operand) {
case ARM64_VAS_16B:
return 16 * count_byte;
default:
FATAL("Unexpected VAS type: %s %d", instr->mnemonic, operand->vas);
FFATAL("Unexpected VAS type: %s %d", instr->mnemonic, operand->vas);
}

View File

@ -1,8 +1,7 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "ctx.h"
#include "util.h"
#if defined(__x86_64__)
@ -121,7 +120,7 @@ gsize ctx_read_reg(GumX64CpuContext *ctx, x86_reg reg) {
X86_REG_64(X86_REG_RIP, ctx->rip)
default:
FATAL("Failed to read register: %d", reg);
FFATAL("Failed to read register: %d", reg);
return 0;
}

View File

@ -1,8 +1,7 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "ctx.h"
#include "util.h"
#if defined(__i386__)
@ -72,7 +71,7 @@ gsize ctx_read_reg(GumIA32CpuContext *ctx, x86_reg reg) {
X86_REG_32(X86_REG_EIP, ctx->eip)
default:
FATAL("Failed to read register: %d", reg);
FFATAL("Failed to read register: %d", reg);
return 0;
}

View File

@ -1,8 +1,10 @@
#include <dlfcn.h>
#include "frida-gumjs.h"
#if defined(__linux__) && !defined(__ANDROID__)
#include <sys/prctl.h>
#endif
#include "debug.h"
#include "frida-gumjs.h"
#include "entry.h"
#include "instrument.h"
@ -16,33 +18,61 @@
extern void __afl_manual_init();
guint64 entry_point = 0;
gboolean traceable = FALSE;
gboolean entry_compiled = FALSE;
gboolean entry_run = FALSE;
static void entry_launch(void) {
OKF("Entry point reached");
FOKF("Entry point reached");
__afl_manual_init();
/* Child here */
entry_run = TRUE;
entry_on_fork();
instrument_on_fork();
seccomp_on_fork();
stats_on_fork();
}
#if defined(__linux__) && defined(PR_SET_PTRACER) && !defined(__ANDROID__)
void entry_on_fork(void) {
if (traceable) {
if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) < 0) {
FFATAL("Failed to PR_SET_PTRACER");
}
}
}
#else
void entry_on_fork(void) {
if (traceable) { FWARNF("AFL_FRIDA_TRACEABLE unsupported"); }
}
#endif
void entry_config(void) {
entry_point = util_read_address("AFL_ENTRYPOINT");
entry_point = util_read_address("AFL_ENTRYPOINT", 0);
if (getenv("AFL_FRIDA_TRACEABLE") != NULL) { traceable = TRUE; }
}
void entry_init(void) {
OKF("entry_point: 0x%016" G_GINT64_MODIFIER "X", entry_point);
FOKF("entry_point: 0x%016" G_GINT64_MODIFIER "X", entry_point);
FOKF("dumpable: [%c]", traceable ? 'X' : ' ');
if (dlopen(NULL, RTLD_NOW) == NULL) { FATAL("Failed to dlopen: %d", errno); }
if (dlopen(NULL, RTLD_NOW) == NULL) { FFATAL("Failed to dlopen: %d", errno); }
}
@ -64,7 +94,7 @@ static void entry_callout(GumCpuContext *cpu_context, gpointer user_data) {
void entry_prologue(GumStalkerIterator *iterator, GumStalkerOutput *output) {
UNUSED_PARAMETER(output);
OKF("AFL_ENTRYPOINT reached");
FOKF("AFL_ENTRYPOINT reached");
if (persistent_start == 0) {

View File

@ -6,7 +6,6 @@
#include "frida-gumjs.h"
#include "config.h"
#include "debug.h"
#include "hash.h"
#include "asan.h"
@ -69,7 +68,8 @@ guint64 instrument_get_offset_hash(GumAddress current_rip) {
guint64 area_offset = hash64((unsigned char *)&current_rip,
sizeof(GumAddress), instrument_hash_seed);
return area_offset &= MAP_SIZE - 1;
gsize map_size_pow2 = util_log2(__afl_map_size);
return area_offset &= ((1 << map_size_pow2) - 1);
}
@ -135,8 +135,8 @@ __attribute__((hot)) static void on_basic_block(GumCpuContext *context,
previous_rip = current_rip;
previous_end = current_end;
instrument_previous_pc = ((current_pc & (MAP_SIZE - 1) >> 1)) |
((current_pc & 0x1) << (MAP_SIZE_POW2 - 1));
gsize map_size_pow2 = util_log2(__afl_map_size);
instrument_previous_pc = util_rotate(current_pc, 1, map_size_pow2);
}
@ -193,7 +193,20 @@ static void instrument_basic_block(GumStalkerIterator *iterator,
instrument_debug_start(instr->address, output);
instrument_coverage_start(instr->address);
#if defined(__arm__)
if (output->encoding == GUM_INSTRUCTION_SPECIAL) {
prefetch_write(GSIZE_TO_POINTER(instr->address + 1));
} else {
prefetch_write(GSIZE_TO_POINTER(instr->address));
}
#else
prefetch_write(GSIZE_TO_POINTER(instr->address));
#endif
if (likely(!excluded)) {
@ -213,7 +226,7 @@ static void instrument_basic_block(GumStalkerIterator *iterator,
}
instrument_debug_instruction(instr->address, instr->size);
instrument_debug_instruction(instr->address, instr->size, output);
if (likely(!excluded)) {
@ -246,7 +259,7 @@ void instrument_config(void) {
instrument_tracing = (getenv("AFL_FRIDA_INST_TRACE") != NULL);
instrument_unique = (getenv("AFL_FRIDA_INST_TRACE_UNIQUE") != NULL);
instrument_use_fixed_seed = (getenv("AFL_FRIDA_INST_SEED") != NULL);
instrument_fixed_seed = util_read_num("AFL_FRIDA_INST_SEED");
instrument_fixed_seed = util_read_num("AFL_FRIDA_INST_SEED", 0);
instrument_coverage_unstable_filename =
(getenv("AFL_FRIDA_INST_UNSTABLE_COVERAGE_FILE"));
@ -261,14 +274,14 @@ void instrument_init(void) {
if (!instrument_is_coverage_optimize_supported()) instrument_optimize = false;
OKF("Instrumentation - optimize [%c]", instrument_optimize ? 'X' : ' ');
OKF("Instrumentation - tracing [%c]", instrument_tracing ? 'X' : ' ');
OKF("Instrumentation - unique [%c]", instrument_unique ? 'X' : ' ');
OKF("Instrumentation - fixed seed [%c] [0x%016" G_GINT64_MODIFIER "x]",
instrument_use_fixed_seed ? 'X' : ' ', instrument_fixed_seed);
OKF("Instrumentation - unstable coverage [%c] [%s]",
instrument_coverage_unstable_filename == NULL ? ' ' : 'X',
instrument_coverage_unstable_filename);
FOKF("Instrumentation - optimize [%c]", instrument_optimize ? 'X' : ' ');
FOKF("Instrumentation - tracing [%c]", instrument_tracing ? 'X' : ' ');
FOKF("Instrumentation - unique [%c]", instrument_unique ? 'X' : ' ');
FOKF("Instrumentation - fixed seed [%c] [0x%016" G_GINT64_MODIFIER "x]",
instrument_use_fixed_seed ? 'X' : ' ', instrument_fixed_seed);
FOKF("Instrumentation - unstable coverage [%c] [%s]",
instrument_coverage_unstable_filename == NULL ? ' ' : 'X',
instrument_coverage_unstable_filename);
if (instrument_tracing && instrument_optimize) {
@ -304,7 +317,8 @@ void instrument_init(void) {
if (instrument_unique) {
int shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600);
int shm_id =
shmget(IPC_PRIVATE, __afl_map_size, IPC_CREAT | IPC_EXCL | 0600);
if (shm_id < 0) { FATAL("shm_id < 0 - errno: %d\n", errno); }
edges_notified = shmat(shm_id, NULL, 0);
@ -321,7 +335,7 @@ void instrument_init(void) {
}
/* Clear it, not sure it's necessary, just seems like good practice */
memset(edges_notified, '\0', MAP_SIZE);
memset(edges_notified, '\0', __afl_map_size);
}
@ -341,15 +355,22 @@ void instrument_init(void) {
* parallel fuzzing. The seed itself, doesn't have to be random, it
* just needs to be different for each instance.
*/
instrument_hash_seed = g_get_monotonic_time() ^
(((guint64)getpid()) << 32) ^ syscall(SYS_gettid);
guint64 tid;
#if defined(__APPLE__)
pthread_threadid_np(NULL, &tid);
#else
tid = syscall(SYS_gettid);
#endif
instrument_hash_seed =
g_get_monotonic_time() ^ (((guint64)getpid()) << 32) ^ tid;
}
OKF("Instrumentation - seed [0x%016" G_GINT64_MODIFIER "x]",
instrument_hash_seed);
FOKF("Instrumentation - seed [0x%016" G_GINT64_MODIFIER "x]",
instrument_hash_seed);
instrument_hash_zero = instrument_get_offset_hash(0);
instrument_coverage_optimize_init();
instrument_debug_init();
instrument_coverage_init();
asan_init();

View File

@ -1,7 +1,5 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "instrument.h"
#include "util.h"
@ -18,13 +16,27 @@ void instrument_coverage_optimize(const cs_insn * instr,
UNUSED_PARAMETER(instr);
UNUSED_PARAMETER(output);
FATAL("Optimized coverage not supported on this architecture");
FFATAL("Optimized coverage not supported on this architecture");
}
void instrument_coverage_optimize_init(void) {
FWARNF("Optimized coverage not supported on this architecture");
}
void instrument_flush(GumStalkerOutput *output) {
gum_arm_writer_flush(output->writer.arm);
if (output->encoding == GUM_INSTRUCTION_SPECIAL) {
gum_thumb_writer_flush(output->writer.thumb);
} else {
gum_arm_writer_flush(output->writer.arm);
}
}

View File

@ -1,7 +1,6 @@
#include "frida-gumjs.h"
#include "config.h"
#include "debug.h"
#include "instrument.h"
@ -95,6 +94,10 @@ void instrument_coverage_optimize(const cs_insn * instr,
}
void instrument_coverage_optimize_init(void) {
}
void instrument_flush(GumStalkerOutput *output) {
gum_arm64_writer_flush(output->writer.arm64);

View File

@ -5,8 +5,6 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "instrument.h"
#include "util.h"
@ -239,7 +237,7 @@ static void instrument_coverage_mark(void *key, void *value, void *user_data) {
}
static void coverage_write(void *data, size_t size) {
static void coverage_write(int fd, void *data, size_t size) {
ssize_t written;
size_t remain = size;
@ -247,11 +245,11 @@ static void coverage_write(void *data, size_t size) {
for (char *cursor = (char *)data; remain > 0;
remain -= written, cursor += written) {
written = write(normal_coverage_fd, cursor, remain);
written = write(fd, cursor, remain);
if (written < 0) {
FATAL("Coverage - Failed to write: %s (%d)\n", (char *)data, errno);
FFATAL("Coverage - Failed to write: %s (%d)\n", (char *)data, errno);
}
@ -259,7 +257,7 @@ static void coverage_write(void *data, size_t size) {
}
static void coverage_format(char *format, ...) {
static void coverage_format(int fd, char *format, ...) {
va_list ap;
char buffer[4096] = {0};
@ -274,11 +272,11 @@ static void coverage_format(char *format, ...) {
len = strnlen(buffer, sizeof(buffer));
coverage_write(buffer, len);
coverage_write(fd, buffer, len);
}
static void coverage_write_modules(GArray *coverage_modules) {
static void coverage_write_modules(int fd, GArray *coverage_modules) {
guint emitted = 0;
for (guint i = 0; i < coverage_modules->len; i++) {
@ -287,16 +285,16 @@ static void coverage_write_modules(GArray *coverage_modules) {
&g_array_index(coverage_modules, coverage_range_t, i);
if (module->count == 0) continue;
coverage_format("%3u, ", emitted);
coverage_format("%016" G_GINT64_MODIFIER "X, ", module->base_address);
coverage_format("%016" G_GINT64_MODIFIER "X, ", module->limit);
coverage_format(fd, "%3u, ", emitted);
coverage_format(fd, "%016" G_GINT64_MODIFIER "X, ", module->base_address);
coverage_format(fd, "%016" G_GINT64_MODIFIER "X, ", module->limit);
/* entry */
coverage_format("%016" G_GINT64_MODIFIER "X, ", 0);
coverage_format(fd, "%016" G_GINT64_MODIFIER "X, ", 0);
/* checksum */
coverage_format("%016" G_GINT64_MODIFIER "X, ", 0);
coverage_format(fd, "%016" G_GINT64_MODIFIER "X, ", 0);
/* timestamp */
coverage_format("%08" G_GINT32_MODIFIER "X, ", 0);
coverage_format("%s\n", module->path);
coverage_format(fd, "%08" G_GINT32_MODIFIER "X, ", 0);
coverage_format(fd, "%s\n", module->path);
emitted++;
}
@ -306,7 +304,7 @@ static void coverage_write_modules(GArray *coverage_modules) {
static void coverage_write_events(void *key, void *value, void *user_data) {
UNUSED_PARAMETER(key);
UNUSED_PARAMETER(user_data);
int fd = *((int *)user_data);
normal_coverage_data_t *val = (normal_coverage_data_t *)value;
if (val->module == NULL) { return; }
@ -319,20 +317,20 @@ static void coverage_write_events(void *key, void *value, void *user_data) {
};
coverage_write(&evt, sizeof(coverage_event_t));
coverage_write(fd, &evt, sizeof(coverage_event_t));
}
static void coverage_write_header(guint coverage_marked_modules) {
static void coverage_write_header(int fd, guint coverage_marked_modules) {
char version[] = "DRCOV VERSION: 2\n";
char flavour[] = "DRCOV FLAVOR: frida\n";
char columns[] = "Columns: id, base, end, entry, checksum, timestamp, path\n";
coverage_write(version, sizeof(version) - 1);
coverage_write(flavour, sizeof(flavour) - 1);
coverage_format("Module Table: version 2, count %u\n",
coverage_write(fd, version, sizeof(version) - 1);
coverage_write(fd, flavour, sizeof(flavour) - 1);
coverage_format(fd, "Module Table: version 2, count %u\n",
coverage_marked_modules);
coverage_write(columns, sizeof(columns) - 1);
coverage_write(fd, columns, sizeof(columns) - 1);
}
@ -371,7 +369,7 @@ static void instrument_coverage_normal_run() {
if (close(normal_coverage_pipes[STDOUT_FILENO]) != 0) {
FATAL("Failed to close parent read pipe");
FFATAL("Failed to close parent read pipe");
}
@ -379,7 +377,7 @@ static void instrument_coverage_normal_run() {
g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, g_free);
if (coverage_hash == NULL) {
FATAL("Failed to g_hash_table_new, errno: %d", errno);
FFATAL("Failed to g_hash_table_new, errno: %d", errno);
}
@ -396,7 +394,7 @@ static void instrument_coverage_normal_run() {
}
if (bytes != 0) { FATAL("Coverage data truncated"); }
if (bytes != 0) { FFATAL("Coverage data truncated"); }
instrument_coverage_print("Coverage - Preparing\n");
@ -414,10 +412,11 @@ static void instrument_coverage_normal_run() {
instrument_coverage_print("Coverage - Marked Modules: %u\n",
coverage_marked_modules);
coverage_write_header(coverage_marked_modules);
coverage_write_modules(coverage_modules);
coverage_format("BB Table: %u bbs\n", ctx.count);
g_hash_table_foreach(coverage_hash, coverage_write_events, NULL);
coverage_write_header(normal_coverage_fd, coverage_marked_modules);
coverage_write_modules(normal_coverage_fd, coverage_modules);
coverage_format(normal_coverage_fd, "BB Table: %u bbs\n", ctx.count);
g_hash_table_foreach(coverage_hash, coverage_write_events,
&normal_coverage_fd);
g_hash_table_unref(coverage_hash);
@ -435,7 +434,7 @@ static GArray *instrument_coverage_unstable_read_unstable_ids(void) {
if (!g_file_get_contents(unstable_coverage_fuzzer_stats, &contents, &length,
NULL)) {
FATAL("Failed to read fuzzer_stats");
FFATAL("Failed to read fuzzer_stats");
}
@ -526,7 +525,7 @@ static GHashTable *instrument_collect_unstable_blocks(
GHashTable *child =
(GHashTable *)g_hash_table_lookup(unstable_coverage_hash, *id);
if (child == NULL) { FATAL("Failed to find edge ID"); }
if (child == NULL) { FFATAL("Failed to find edge ID"); }
GHashTableIter iter = {0};
gpointer value;
@ -565,7 +564,7 @@ static void instrument_coverage_unstable_run(void) {
if (close(unstable_coverage_pipes[STDOUT_FILENO]) != 0) {
FATAL("Failed to close parent read pipe");
FFATAL("Failed to close parent read pipe");
}
@ -573,7 +572,7 @@ static void instrument_coverage_unstable_run(void) {
g_direct_hash, g_direct_equal, NULL, (GDestroyNotify)g_hash_table_unref);
if (unstable_coverage_hash == NULL) {
FATAL("Failed to g_hash_table_new, errno: %d", errno);
FFATAL("Failed to g_hash_table_new, errno: %d", errno);
}
@ -599,7 +598,7 @@ static void instrument_coverage_unstable_run(void) {
if (!g_hash_table_insert(unstable_coverage_hash,
GSIZE_TO_POINTER(value->edge), hash_value)) {
FATAL("Entry already in hashtable");
FFATAL("Entry already in hashtable");
}
@ -613,7 +612,7 @@ static void instrument_coverage_unstable_run(void) {
}
if (bytes != 0) { FATAL("Unstable coverage data truncated"); }
if (bytes != 0) { FFATAL("Unstable coverage data truncated"); }
instrument_coverage_print("Coverage - Preparing\n");
@ -638,10 +637,11 @@ static void instrument_coverage_unstable_run(void) {
instrument_coverage_print("Coverage - Marked Modules: %u\n",
coverage_marked_modules);
coverage_write_header(coverage_marked_modules);
coverage_write_modules(coverage_modules);
coverage_format("BB Table: %u bbs\n", ctx.count);
g_hash_table_foreach(unstable_blocks, coverage_write_events, NULL);
coverage_write_header(unstable_coverage_fd, coverage_marked_modules);
coverage_write_modules(unstable_coverage_fd, coverage_modules);
coverage_format(unstable_coverage_fd, "BB Table: %u bbs\n", ctx.count);
g_hash_table_foreach(unstable_blocks, coverage_write_events,
&unstable_coverage_fd);
g_hash_table_unref(unstable_blocks);
g_array_free(unstable_edge_ids, TRUE);
@ -659,33 +659,33 @@ void instrument_coverage_config(void) {
void instrument_coverage_normal_init(void) {
OKF("Coverage - enabled [%c]",
instrument_coverage_filename == NULL ? ' ' : 'X');
FOKF("Coverage - enabled [%c]",
instrument_coverage_filename == NULL ? ' ' : 'X');
if (instrument_coverage_filename == NULL) { return; }
OKF("Coverage - file [%s]", instrument_coverage_filename);
FOKF("Coverage - file [%s]", instrument_coverage_filename);
char *path = g_canonicalize_filename(instrument_coverage_filename,
g_get_current_dir());
OKF("Coverage - path [%s]", path);
FOKF("Coverage - path [%s]", path);
normal_coverage_fd = open(path, O_RDWR | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
if (normal_coverage_fd < 0) {
FATAL("Failed to open coverage file '%s'", path);
FFATAL("Failed to open coverage file '%s'", path);
}
g_free(path);
if (pipe(normal_coverage_pipes) != 0) { FATAL("Failed to create pipes"); }
if (pipe(normal_coverage_pipes) != 0) { FFATAL("Failed to create pipes"); }
pid_t pid = fork();
if (pid == -1) { FATAL("Failed to start coverage process"); }
if (pid == -1) { FFATAL("Failed to start coverage process"); }
if (pid == 0) {
@ -697,13 +697,13 @@ void instrument_coverage_normal_init(void) {
if (close(normal_coverage_fd) < 0) {
FATAL("Failed to close coverage output file");
FFATAL("Failed to close coverage output file");
}
if (close(normal_coverage_pipes[STDIN_FILENO]) != 0) {
FATAL("Failed to close parent read pipe");
FFATAL("Failed to close parent read pipe");
}
@ -711,15 +711,14 @@ void instrument_coverage_normal_init(void) {
void instrument_coverage_unstable_find_output(void) {
pid_t parent = getpid();
gchar *fds_name = g_strdup_printf("/proc/%d/fd/", getppid());
gchar *root = g_file_read_link("/proc/self/root", NULL);
if (root == NULL) { FATAL("Failed to read link"); }
if (root == NULL) { FFATAL("Failed to read link"); }
GDir *dir = g_dir_open(fds_name, 0, NULL);
OKF("Coverage Unstable - fds: %s", fds_name);
FOKF("Coverage Unstable - fds: %s", fds_name);
for (const gchar *filename = g_dir_read_name(dir); filename != NULL;
filename = g_dir_read_name(dir)) {
@ -727,7 +726,7 @@ void instrument_coverage_unstable_find_output(void) {
gchar *fullname = g_build_path("/", fds_name, filename, NULL);
gchar *link = g_file_read_link(fullname, NULL);
if (link == NULL) { FATAL("Failed to read link: %s", fullname); }
if (link == NULL) { FFATAL("Failed to read link: %s", fullname); }
gchar *basename = g_path_get_basename(link);
if (g_strcmp0(basename, "default") != 0) {
@ -779,11 +778,11 @@ void instrument_coverage_unstable_find_output(void) {
if (unstable_coverage_fuzzer_stats == NULL) {
FATAL("Failed to find fuzzer stats");
FFATAL("Failed to find fuzzer stats");
}
OKF("Fuzzer stats: %s", unstable_coverage_fuzzer_stats);
FOKF("Fuzzer stats: %s", unstable_coverage_fuzzer_stats);
}
@ -794,14 +793,14 @@ void instrument_coverage_unstable_init(void) {
char *path = g_canonicalize_filename(instrument_coverage_unstable_filename,
g_get_current_dir());
OKF("Coverage - unstable path [%s]", instrument_coverage_unstable_filename);
FOKF("Coverage - unstable path [%s]", instrument_coverage_unstable_filename);
unstable_coverage_fd = open(path, O_RDWR | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
if (unstable_coverage_fd < 0) {
FATAL("Failed to open unstable coverage file '%s'", path);
FFATAL("Failed to open unstable coverage file '%s'", path);
}
@ -811,12 +810,12 @@ void instrument_coverage_unstable_init(void) {
if (pipe(unstable_coverage_pipes) != 0) {
FATAL("Failed to create unstable pipes");
FFATAL("Failed to create unstable pipes");
}
pid_t pid = fork();
if (pid == -1) { FATAL("Failed to start coverage process"); }
if (pid == -1) { FFATAL("Failed to start coverage process"); }
if (pid == 0) {
@ -828,13 +827,13 @@ void instrument_coverage_unstable_init(void) {
if (close(unstable_coverage_fd) < 0) {
FATAL("Failed to close unstable coverage output file");
FFATAL("Failed to close unstable coverage output file");
}
if (close(unstable_coverage_pipes[STDIN_FILENO]) != 0) {
FATAL("Failed to close parent read pipe");
FFATAL("Failed to close parent read pipe");
}
@ -866,7 +865,7 @@ void instrument_coverage_end(uint64_t address) {
if (write(normal_coverage_pipes[STDOUT_FILENO], &data,
sizeof(normal_coverage_data_t)) != sizeof(normal_coverage_data_t)) {
FATAL("Coverage I/O error");
FFATAL("Coverage I/O error");
}
@ -889,7 +888,7 @@ void instrument_coverage_unstable(guint64 edge, guint64 previous_rip,
sizeof(unstable_coverage_data_t)) !=
sizeof(unstable_coverage_data_t)) {
FATAL("Unstable coverage I/O error");
FFATAL("Unstable coverage I/O error");
}

View File

@ -5,8 +5,6 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "instrument.h"
#include "util.h"
@ -34,18 +32,27 @@ static void instrument_debug(char *format, ...) {
}
static void instrument_disasm(guint8 *start, guint8 *end) {
static void instrument_disasm(guint8 *start, guint8 *end,
GumStalkerOutput *output) {
csh capstone;
cs_err err;
cs_mode mode;
uint16_t size;
cs_insn *insn;
size_t count = 0;
size_t i;
uint16_t len;
mode = GUM_DEFAULT_CS_MODE | GUM_DEFAULT_CS_ENDIAN;
#if defined(__arm__)
if (output->encoding == GUM_INSTRUCTION_SPECIAL) { mode |= CS_MODE_THUMB; }
#endif
err = cs_open(GUM_DEFAULT_CS_ARCH,
GUM_DEFAULT_CS_MODE | GUM_DEFAULT_CS_ENDIAN, &capstone);
CS_MODE_THUMB | GUM_DEFAULT_CS_MODE | GUM_DEFAULT_CS_ENDIAN,
&capstone);
g_assert(err == CS_ERR_OK);
size = GPOINTER_TO_SIZE(end) - GPOINTER_TO_SIZE(start);
@ -89,24 +96,24 @@ void instrument_debug_config(void) {
void instrument_debug_init(void) {
OKF("Instrumentation debugging - enabled [%c]",
instrument_debug_filename == NULL ? ' ' : 'X');
FOKF("Instrumentation debugging - enabled [%c]",
instrument_debug_filename == NULL ? ' ' : 'X');
if (instrument_debug_filename == NULL) { return; }
OKF("Instrumentation debugging - file [%s]", instrument_debug_filename);
FOKF("Instrumentation debugging - file [%s]", instrument_debug_filename);
if (instrument_debug_filename == NULL) { return; }
char *path =
g_canonicalize_filename(instrument_debug_filename, g_get_current_dir());
OKF("Instrumentation debugging - path [%s]", path);
FOKF("Instrumentation debugging - path [%s]", path);
debugging_fd = open(path, O_RDWR | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
if (debugging_fd < 0) { FATAL("Failed to open stats file '%s'", path); }
if (debugging_fd < 0) { FFATAL("Failed to open stats file '%s'", path); }
g_free(path);
@ -123,11 +130,12 @@ void instrument_debug_start(uint64_t address, GumStalkerOutput *output) {
}
void instrument_debug_instruction(uint64_t address, uint16_t size) {
void instrument_debug_instruction(uint64_t address, uint16_t size,
GumStalkerOutput *output) {
if (likely(debugging_fd < 0)) { return; }
uint8_t *start = (uint8_t *)GSIZE_TO_POINTER(address);
instrument_disasm(start, start + size);
instrument_disasm(start, start + size, output);
}
@ -138,7 +146,7 @@ void instrument_debug_end(GumStalkerOutput *output) {
instrument_debug("\nGenerated block %p-%p\n", instrument_gen_start,
instrument_gen_end);
instrument_disasm(instrument_gen_start, instrument_gen_end);
instrument_disasm(instrument_gen_start, instrument_gen_end, output);
}

View File

@ -1,45 +1,37 @@
#include <fcntl.h>
#include <stddef.h>
#include <sys/mman.h>
#include <sys/shm.h>
#if defined(__linux__)
#if !defined(__ANDROID__)
#include <sys/prctl.h>
#include <sys/syscall.h>
#else
#include <linux/ashmem.h>
#endif
#endif
#include "frida-gumjs.h"
#include "config.h"
#include "instrument.h"
#include "ranges.h"
#include "stalker.h"
#include "util.h"
#if defined(__x86_64__)
static GumAddress current_log_impl = GUM_ADDRESS(0);
#ifndef MAP_FIXED_NOREPLACE
#ifdef MAP_EXCL
#define MAP_FIXED_NOREPLACE MAP_EXCL | MAP_FIXED
#else
#define MAP_FIXED_NOREPLACE MAP_FIXED
#endif
#endif
static const guint8 afl_log_code[] = {
0x9c, /* pushfq */
0x51, /* push rcx */
0x52, /* push rdx */
0x48, 0x8b, 0x0d, 0x26,
0x00, 0x00, 0x00, /* mov rcx, sym.&previous_pc */
0x48, 0x8b, 0x11, /* mov rdx, qword [rcx] */
0x48, 0x31, 0xfa, /* xor rdx, rdi */
0x48, 0x03, 0x15, 0x11,
0x00, 0x00, 0x00, /* add rdx, sym._afl_area_ptr_ptr */
0x80, 0x02, 0x01, /* add byte ptr [rdx], 1 */
0x80, 0x12, 0x00, /* adc byte ptr [rdx], 0 */
0x66, 0xd1, 0xcf, /* ror di, 1 */
0x48, 0x89, 0x39, /* mov qword [rcx], rdi */
0x5a, /* pop rdx */
0x59, /* pop rcx */
0x9d, /* popfq */
0xc3, /* ret */
0x90
/* Read-only data goes here: */
/* uint8_t* __afl_area_ptr */
/* uint64_t* &previous_pc */
};
static GHashTable *coverage_blocks = NULL;
gboolean instrument_is_coverage_optimize_supported(void) {
@ -47,39 +39,305 @@ gboolean instrument_is_coverage_optimize_supported(void) {
}
static guint8 align_pad[] = {0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90};
static gboolean instrument_coverage_in_range(gssize offset) {
static void instrument_coverate_write_function(GumStalkerOutput *output) {
return (offset >= G_MININT32 && offset <= G_MAXINT32);
guint64 misalign = 0;
GumX86Writer *cw = output->writer.x86;
}
if (current_log_impl == 0 ||
!gum_x86_writer_can_branch_directly_between(cw->pc, current_log_impl) ||
!gum_x86_writer_can_branch_directly_between(cw->pc + 128,
current_log_impl)) {
#pragma pack(push, 1)
typedef struct {
gconstpointer after_log_impl = cw->code + 1;
// cur_location = (block_address >> 4) ^ (block_address << 8);
// shared_mem[cur_location ^ prev_location]++;
// prev_location = cur_location >> 1;
gum_x86_writer_put_jmp_near_label(cw, after_log_impl);
// mov QWORD PTR [rsp-0x80],rax
// lahf
// mov QWORD PTR [rsp-0x88],rax
// mov QWORD PTR [rsp-0x90],rbx
// mov eax,DWORD PTR [rip+0x333d5a] # 0x7ffff6ff2740
// mov DWORD PTR [rip+0x333d3c],0x9fbb # 0x7ffff6ff2740
// xor eax,0x103f77
// mov bl,BYTE PTR [rax]
// add bl,0x1
// adc bl,0x0
// mov BYTE PTR [rax],bl
// mov rbx,QWORD PTR [rsp-0x90]
// mov rax,QWORD PTR [rsp-0x88]
// sahf
// mov rax,QWORD PTR [rsp-0x80]
misalign = (cw->pc & 0x7);
if (misalign != 0) {
uint8_t mov_rax_rsp_88[8];
uint8_t lahf;
uint8_t mov_rax_rsp_90[8];
uint8_t mov_rbx_rsp_98[8];
gum_x86_writer_put_bytes(cw, align_pad, 8 - misalign);
uint8_t mov_eax_prev_loc[6];
uint8_t mov_prev_loc_curr_loc_shr1[10];
uint8_t xor_eax_curr_loc[5];
uint8_t mov_rbx_ptr_rax[2];
uint8_t add_bl_1[3];
uint8_t adc_bl_0[3];
uint8_t mov_ptr_rax_rbx[2];
uint8_t mov_rsp_98_rbx[8];
uint8_t mov_rsp_90_rax[8];
uint8_t sahf;
uint8_t mov_rsp_88_rax[8];
} afl_log_code_asm_t;
#pragma pack(pop)
typedef union {
afl_log_code_asm_t code;
uint8_t bytes[0];
} afl_log_code;
static const afl_log_code_asm_t template =
{
.mov_rax_rsp_88 = {0x48, 0x89, 0x84, 0x24, 0x78, 0xFF, 0xFF, 0xFF},
.lahf = 0x9f,
.mov_rax_rsp_90 = {0x48, 0x89, 0x84, 0x24, 0x70, 0xFF, 0xFF, 0xFF},
.mov_rbx_rsp_98 = {0x48, 0x89, 0x9C, 0x24, 0x68, 0xFF, 0xFF, 0xFF},
.mov_eax_prev_loc = {0x8b, 0x05},
.mov_prev_loc_curr_loc_shr1 = {0xc7, 0x05},
.xor_eax_curr_loc = {0x35},
.mov_rbx_ptr_rax = {0x8a, 0x18},
.add_bl_1 = {0x80, 0xc3, 0x01},
.adc_bl_0 = {0x80, 0xd3, 0x00},
.mov_ptr_rax_rbx = {0x88, 0x18},
.mov_rsp_98_rbx = {0x48, 0x8B, 0x9C, 0x24, 0x68, 0xFF, 0xFF, 0xFF},
.mov_rsp_90_rax = {0x48, 0x8B, 0x84, 0x24, 0x70, 0xFF, 0xFF, 0xFF},
.sahf = 0x9e,
.mov_rsp_88_rax = {0x48, 0x8B, 0x84, 0x24, 0x78, 0xFF, 0xFF, 0xFF},
}
;
static gboolean instrument_coverage_find_low(const GumRangeDetails *details,
gpointer user_data) {
static GumAddress last_limit = (64ULL << 10);
gpointer * address = (gpointer *)user_data;
if ((details->range->base_address - last_limit) > __afl_map_size) {
*address = GSIZE_TO_POINTER(last_limit);
return FALSE;
}
if (details->range->base_address > ((2ULL << 30) - __afl_map_size)) {
return FALSE;
}
/*
* Align our buffer on a 64k boundary so that the low 16-bits of the address
* are zero, then we can just XOR the base address in, when we XOR with the
* current block ID.
*/
last_limit = GUM_ALIGN_SIZE(
details->range->base_address + details->range->size, (64ULL << 10));
return TRUE;
}
static void instrument_coverage_optimize_map_mmap_anon(gpointer address) {
__afl_area_ptr =
mmap(address, __afl_map_size, PROT_READ | PROT_WRITE,
MAP_FIXED_NOREPLACE | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (__afl_area_ptr != address) {
FATAL("Failed to map mmap __afl_area_ptr: %d", errno);
}
}
static void instrument_coverage_optimize_map_mmap(char * shm_file_path,
gpointer address) {
int shm_fd = -1;
if (munmap(__afl_area_ptr, __afl_map_size) != 0) {
FATAL("Failed to unmap previous __afl_area_ptr");
}
__afl_area_ptr = NULL;
#if !defined(__ANDROID__)
shm_fd = shm_open(shm_file_path, O_RDWR, DEFAULT_PERMISSION);
if (shm_fd == -1) { FATAL("shm_open() failed\n"); }
#else
shm_fd = open("/dev/ashmem", O_RDWR);
if (shm_fd == -1) { FATAL("open() failed\n"); }
if (ioctl(shm_fd, ASHMEM_SET_NAME, shm_file_path) == -1) {
FATAL("ioctl(ASHMEM_SET_NAME) failed");
}
if (ioctl(shm_fd, ASHMEM_SET_SIZE, __afl_map_size) == -1) {
FATAL("ioctl(ASHMEM_SET_SIZE) failed");
}
#endif
__afl_area_ptr = mmap(address, __afl_map_size, PROT_READ | PROT_WRITE,
MAP_FIXED_NOREPLACE | MAP_SHARED, shm_fd, 0);
if (__afl_area_ptr != address) {
FATAL("Failed to map mmap __afl_area_ptr: %d", errno);
}
if (close(shm_fd) != 0) { FATAL("Failed to close shm_fd"); }
}
static void instrument_coverage_optimize_map_shm(guint64 shm_env_val,
gpointer address) {
if (shmdt(__afl_area_ptr) != 0) {
FATAL("Failed to detach previous __afl_area_ptr");
}
__afl_area_ptr = shmat(shm_env_val, address, 0);
if (__afl_area_ptr != address) {
FATAL("Failed to map shm __afl_area_ptr: %d", errno);
}
}
static void instrument_coverage_switch(GumStalkerObserver *self,
gpointer start_address,
const cs_insn * from_insn,
gpointer * target) {
UNUSED_PARAMETER(self);
UNUSED_PARAMETER(start_address);
cs_x86 * x86;
cs_x86_op *op;
if (from_insn == NULL) { return; }
x86 = &from_insn->detail->x86;
op = x86->operands;
if (!g_hash_table_contains(coverage_blocks, GSIZE_TO_POINTER(*target))) {
return;
}
switch (from_insn->id) {
case X86_INS_CALL:
case X86_INS_JMP:
if (x86->op_count != 1) {
FATAL("Unexpected operand count: %d", x86->op_count);
}
if (op[0].type != X86_OP_IMM) { return; }
break;
case X86_INS_RET:
break;
default:
return;
}
*target = (guint8 *)*target + sizeof(afl_log_code);
}
void instrument_coverage_optimize_init(void) {
gpointer low_address = NULL;
gum_process_enumerate_ranges(GUM_PAGE_NO_ACCESS, instrument_coverage_find_low,
&low_address);
FOKF("Low address: %p", low_address);
if (low_address == 0 ||
GPOINTER_TO_SIZE(low_address) > ((2UL << 20) - __afl_map_size)) {
FATAL("Invalid low_address: %p", low_address);
}
ranges_print_debug_maps();
char *shm_env = getenv(SHM_ENV_VAR);
FOKF("SHM_ENV_VAR: %s", shm_env);
if (shm_env == NULL) {
FWARNF("SHM_ENV_VAR not set, using anonymous map for debugging purposes");
instrument_coverage_optimize_map_mmap_anon(low_address);
} else {
guint64 shm_env_val = g_ascii_strtoull(shm_env, NULL, 10);
if (shm_env_val == 0) {
instrument_coverage_optimize_map_mmap(shm_env, low_address);
} else {
instrument_coverage_optimize_map_shm(shm_env_val, low_address);
}
current_log_impl = cw->pc;
gum_x86_writer_put_bytes(cw, afl_log_code, sizeof(afl_log_code));
}
uint64_t *afl_prev_loc_ptr = &instrument_previous_pc;
gum_x86_writer_put_bytes(cw, (const guint8 *)&__afl_area_ptr,
sizeof(__afl_area_ptr));
gum_x86_writer_put_bytes(cw, (const guint8 *)&afl_prev_loc_ptr,
sizeof(afl_prev_loc_ptr));
FOKF("__afl_area_ptr: %p", __afl_area_ptr);
FOKF("instrument_previous_pc: %p", &instrument_previous_pc);
gum_x86_writer_put_label(cw, after_log_impl);
}
static void instrument_coverage_suppress_init(void) {
static gboolean initialized = false;
if (initialized) { return; }
initialized = true;
GumStalkerObserver * observer = stalker_get_observer();
GumStalkerObserverInterface *iface = GUM_STALKER_OBSERVER_GET_IFACE(observer);
iface->switch_callback = instrument_coverage_switch;
coverage_blocks = g_hash_table_new(g_direct_hash, g_direct_equal);
if (coverage_blocks == NULL) {
FATAL("Failed to g_hash_table_new, errno: %d", errno);
}
@ -88,18 +346,73 @@ static void instrument_coverate_write_function(GumStalkerOutput *output) {
void instrument_coverage_optimize(const cs_insn * instr,
GumStalkerOutput *output) {
afl_log_code code = {0};
GumX86Writer *cw = output->writer.x86;
guint64 area_offset = instrument_get_offset_hash(GUM_ADDRESS(instr->address));
instrument_coverate_write_function(output);
gsize map_size_pow2;
gsize area_offset_ror;
GumAddress code_addr = 0;
gum_x86_writer_put_lea_reg_reg_offset(cw, GUM_REG_RSP, GUM_REG_RSP,
-GUM_RED_ZONE_SIZE);
gum_x86_writer_put_push_reg(cw, GUM_REG_RDI);
gum_x86_writer_put_mov_reg_address(cw, GUM_REG_RDI, area_offset);
gum_x86_writer_put_call_address(cw, current_log_impl);
gum_x86_writer_put_pop_reg(cw, GUM_REG_RDI);
gum_x86_writer_put_lea_reg_reg_offset(cw, GUM_REG_RSP, GUM_REG_RSP,
GUM_RED_ZONE_SIZE);
instrument_coverage_suppress_init();
// gum_x86_writer_put_breakpoint(cw);
code_addr = cw->pc;
if (!g_hash_table_add(coverage_blocks, GSIZE_TO_POINTER(cw->code))) {
FATAL("Failed - g_hash_table_add");
}
code.code = template;
gssize curr_loc_shr_1_offset =
offsetof(afl_log_code, code.mov_prev_loc_curr_loc_shr1) +
sizeof(code.code.mov_prev_loc_curr_loc_shr1) - sizeof(guint32);
map_size_pow2 = util_log2(__afl_map_size);
area_offset_ror = util_rotate(area_offset, 1, map_size_pow2);
*((guint32 *)&code.bytes[curr_loc_shr_1_offset]) = (guint32)(area_offset_ror);
gssize prev_loc_value =
GPOINTER_TO_SIZE(&instrument_previous_pc) -
(code_addr + offsetof(afl_log_code, code.mov_prev_loc_curr_loc_shr1) +
sizeof(code.code.mov_prev_loc_curr_loc_shr1));
gssize prev_loc_value_offset =
offsetof(afl_log_code, code.mov_prev_loc_curr_loc_shr1) +
sizeof(code.code.mov_prev_loc_curr_loc_shr1) - sizeof(gint) -
sizeof(guint32);
if (!instrument_coverage_in_range(prev_loc_value)) {
FATAL("Patch out of range (current_pc_value1): 0x%016lX", prev_loc_value);
}
*((gint *)&code.bytes[prev_loc_value_offset]) = (gint)prev_loc_value;
gssize prev_loc_value2 =
GPOINTER_TO_SIZE(&instrument_previous_pc) -
(code_addr + offsetof(afl_log_code, code.mov_eax_prev_loc) +
sizeof(code.code.mov_eax_prev_loc));
gssize prev_loc_value_offset2 =
offsetof(afl_log_code, code.mov_eax_prev_loc) +
sizeof(code.code.mov_eax_prev_loc) - sizeof(gint);
if (!instrument_coverage_in_range(prev_loc_value)) {
FATAL("Patch out of range (current_pc_value1): 0x%016lX", prev_loc_value2);
}
*((gint *)&code.bytes[prev_loc_value_offset2]) = (gint)prev_loc_value2;
gssize xor_curr_loc_offset = offsetof(afl_log_code, code.xor_eax_curr_loc) +
sizeof(code.code.xor_eax_curr_loc) -
sizeof(guint32);
*((guint32 *)&code.bytes[xor_curr_loc_offset]) =
(guint32)(GPOINTER_TO_SIZE(__afl_area_ptr) | area_offset);
gum_x86_writer_put_bytes(cw, code.bytes, sizeof(afl_log_code));
}

View File

@ -1,69 +1,144 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "instrument.h"
#include "stalker.h"
#include "util.h"
#if defined(__i386__)
static GumAddress current_log_impl = GUM_ADDRESS(0);
static GHashTable *coverage_blocks = NULL;
static void instrument_coverage_function(GumX86Writer *cw) {
#pragma pack(push, 1)
typedef struct {
gum_x86_writer_put_pushfx(cw);
gum_x86_writer_put_push_reg(cw, GUM_REG_ECX);
gum_x86_writer_put_push_reg(cw, GUM_REG_EDX);
// cur_location = (block_address >> 4) ^ (block_address << 8);
// shared_mem[cur_location ^ prev_location]++;
// prev_location = cur_location >> 1;
gum_x86_writer_put_mov_reg_address(cw, GUM_REG_ECX,
GUM_ADDRESS(&instrument_previous_pc));
gum_x86_writer_put_mov_reg_reg_ptr(cw, GUM_REG_EDX, GUM_REG_ECX);
gum_x86_writer_put_xor_reg_reg(cw, GUM_REG_EDX, GUM_REG_EDI);
uint8_t mov_eax_esp_4[4];
uint8_t lahf;
uint8_t mov_eax_esp_8[4];
uint8_t mov_ebx_esp_c[4];
gum_x86_writer_put_add_reg_imm(cw, GUM_REG_EDX, GUM_ADDRESS(__afl_area_ptr));
uint8_t mov_eax_prev_loc[5];
uint8_t mov_prev_loc_curr_loc_shr1[10];
/* add byte ptr [edx], 1 */
uint8_t add_byte_ptr_edx_1[] = {0x80, 0x02, 0x01};
gum_x86_writer_put_bytes(cw, add_byte_ptr_edx_1, sizeof(add_byte_ptr_edx_1));
uint8_t xor_eax_curr_loc[5];
uint8_t add_eax_area_ptr[5];
/* adc byte ptr [edx], 0 */
uint8_t adc_byte_ptr_edx_0[] = {0x80, 0x12, 0x00};
gum_x86_writer_put_bytes(cw, adc_byte_ptr_edx_0, sizeof(adc_byte_ptr_edx_0));
uint8_t mov_ebx_ptr_eax[2];
uint8_t add_bl_1[3];
uint8_t adc_bl_0[3];
uint8_t mov_ptr_eax_ebx[2];
uint8_t ror_di_1[] = {0x66, 0xd1, 0xcf};
gum_x86_writer_put_bytes(cw, ror_di_1, sizeof(ror_di_1));
gum_x86_writer_put_mov_reg_ptr_reg(cw, GUM_REG_ECX, GUM_REG_EDI);
uint8_t mov_esp_c_ebx[4];
uint8_t mov_esp_8_eax[4];
uint8_t sahf;
uint8_t mov_esp_4_eax[4];
gum_x86_writer_put_pop_reg(cw, GUM_REG_EDX);
gum_x86_writer_put_pop_reg(cw, GUM_REG_ECX);
gum_x86_writer_put_popfx(cw);
gum_x86_writer_put_ret(cw);
} afl_log_code_asm_t;
#pragma pack(pop)
typedef union {
afl_log_code_asm_t code;
uint8_t bytes[0];
} afl_log_code;
static const afl_log_code_asm_t template =
{
.mov_eax_esp_4 = {0x89, 0x44, 0x24, 0xFC},
.lahf = 0x9f,
.mov_eax_esp_8 = {0x89, 0x44, 0x24, 0xF8},
.mov_ebx_esp_c = {0x89, 0x5C, 0x24, 0xF4},
.mov_eax_prev_loc = {0xA1},
.mov_prev_loc_curr_loc_shr1 = {0xc7, 0x05},
.xor_eax_curr_loc = {0x35},
.add_eax_area_ptr = {0x05},
.mov_ebx_ptr_eax = {0x8a, 0x18},
.add_bl_1 = {0x80, 0xc3, 0x01},
.adc_bl_0 = {0x80, 0xd3, 0x00},
.mov_ptr_eax_ebx = {0x88, 0x18},
.mov_esp_c_ebx = {0x8B, 0x5C, 0x24, 0xF4},
.mov_esp_8_eax = {0x8B, 0x44, 0x24, 0xF8},
.sahf = 0x9e,
.mov_esp_4_eax = {0x8B, 0x44, 0x24, 0xFC},
}
;
gboolean instrument_is_coverage_optimize_supported(void) {
return true;
}
static void instrument_coverate_write_function(GumStalkerOutput *output) {
static void instrument_coverage_switch(GumStalkerObserver *self,
gpointer start_address,
const cs_insn * from_insn,
gpointer * target) {
GumX86Writer *cw = output->writer.x86;
UNUSED_PARAMETER(self);
UNUSED_PARAMETER(start_address);
if (current_log_impl == 0 ||
!gum_x86_writer_can_branch_directly_between(cw->pc, current_log_impl) ||
!gum_x86_writer_can_branch_directly_between(cw->pc + 128,
current_log_impl)) {
cs_x86 * x86;
cs_x86_op *op;
if (from_insn == NULL) { return; }
gconstpointer after_log_impl = cw->code + 1;
x86 = &from_insn->detail->x86;
op = x86->operands;
gum_x86_writer_put_jmp_near_label(cw, after_log_impl);
if (!g_hash_table_contains(coverage_blocks, GSIZE_TO_POINTER(*target))) {
current_log_impl = cw->pc;
instrument_coverage_function(cw);
return;
gum_x86_writer_put_label(cw, after_log_impl);
}
switch (from_insn->id) {
case X86_INS_CALL:
case X86_INS_JMP:
if (x86->op_count != 1) {
FATAL("Unexpected operand count: %d", x86->op_count);
}
if (op[0].type != X86_OP_IMM) { return; }
break;
case X86_INS_RET:
break;
default:
return;
}
*target = (guint8 *)*target + sizeof(afl_log_code);
}
static void instrument_coverage_suppress_init(void) {
static gboolean initialized = false;
if (initialized) { return; }
initialized = true;
GumStalkerObserver * observer = stalker_get_observer();
GumStalkerObserverInterface *iface = GUM_STALKER_OBSERVER_GET_IFACE(observer);
iface->switch_callback = instrument_coverage_switch;
coverage_blocks = g_hash_table_new(g_direct_hash, g_direct_equal);
if (coverage_blocks == NULL) {
FATAL("Failed to g_hash_table_new, errno: %d", errno);
}
@ -72,14 +147,65 @@ static void instrument_coverate_write_function(GumStalkerOutput *output) {
void instrument_coverage_optimize(const cs_insn * instr,
GumStalkerOutput *output) {
afl_log_code code = {0};
GumX86Writer *cw = output->writer.x86;
guint64 area_offset = instrument_get_offset_hash(GUM_ADDRESS(instr->address));
instrument_coverate_write_function(output);
gsize map_size_pow2;
gsize area_offset_ror;
gum_x86_writer_put_push_reg(cw, GUM_REG_EDI);
gum_x86_writer_put_mov_reg_address(cw, GUM_REG_EDI, area_offset);
gum_x86_writer_put_call_address(cw, current_log_impl);
gum_x86_writer_put_pop_reg(cw, GUM_REG_EDI);
code.code = template;
instrument_coverage_suppress_init();
// gum_x86_writer_put_breakpoint(cw);
if (!g_hash_table_add(coverage_blocks, GSIZE_TO_POINTER(cw->code))) {
FATAL("Failed - g_hash_table_add");
}
gssize prev_loc_value_offset2 =
offsetof(afl_log_code, code.mov_eax_prev_loc) +
sizeof(code.code.mov_eax_prev_loc) - sizeof(gint);
*((gint *)&code.bytes[prev_loc_value_offset2]) =
(gint)GPOINTER_TO_SIZE(&instrument_previous_pc);
gssize curr_loc_shr_1_offset =
offsetof(afl_log_code, code.mov_prev_loc_curr_loc_shr1) +
sizeof(code.code.mov_prev_loc_curr_loc_shr1) - sizeof(guint32);
map_size_pow2 = util_log2(__afl_map_size);
area_offset_ror = util_rotate(area_offset, 1, map_size_pow2);
*((guint32 *)&code.bytes[curr_loc_shr_1_offset]) = (guint32)(area_offset_ror);
gssize prev_loc_value_offset =
offsetof(afl_log_code, code.mov_prev_loc_curr_loc_shr1) +
sizeof(code.code.mov_prev_loc_curr_loc_shr1) - sizeof(gint) -
sizeof(guint32);
*((gint *)&code.bytes[prev_loc_value_offset]) =
(gint)GPOINTER_TO_SIZE(&instrument_previous_pc);
gssize xor_curr_loc_offset = offsetof(afl_log_code, code.xor_eax_curr_loc) +
sizeof(code.code.xor_eax_curr_loc) -
sizeof(guint32);
*((guint32 *)&code.bytes[xor_curr_loc_offset]) = (guint32)area_offset;
gssize add_area_ptr_offset = offsetof(afl_log_code, code.add_eax_area_ptr) +
sizeof(code.code.add_eax_area_ptr) -
sizeof(guint32);
*((guint32 *)&code.bytes[add_area_ptr_offset]) = (guint32)__afl_area_ptr;
gum_x86_writer_put_bytes(cw, code.bytes, sizeof(afl_log_code));
}
void instrument_coverage_optimize_init(void) {
}

View File

@ -1,8 +1,7 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "intercept.h"
#include "util.h"
void intercept_hook(void *address, gpointer replacement, gpointer user_data) {
@ -10,7 +9,7 @@ void intercept_hook(void *address, gpointer replacement, gpointer user_data) {
gum_interceptor_begin_transaction(interceptor);
GumReplaceReturn ret =
gum_interceptor_replace(interceptor, address, replacement, user_data);
if (ret != GUM_REPLACE_OK) { FATAL("gum_interceptor_attach: %d", ret); }
if (ret != GUM_REPLACE_OK) { FFATAL("gum_interceptor_attach: %d", ret); }
gum_interceptor_end_transaction(interceptor);
}

View File

@ -62,6 +62,12 @@ class Afl {
const buf = Memory.allocUtf8String(log);
Afl.jsApiWrite(STDOUT_FILENO, buf, log.length);
}
/**
* See `AFL_FRIDA_INST_NO_BACKPATCH`.
*/
static setBackpatchDisable() {
Afl.jsApiSetBackpatchDisable();
}
/**
* See `AFL_FRIDA_DEBUG_MAPS`.
*/
@ -145,6 +151,13 @@ class Afl {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetInstrumentUnstableCoverageFile(buf);
}
/*
* Set a callback to be called in place of the usual `main` function. This see
* `Scripting.md` for details.
*/
static setJsMainHook(address) {
Afl.jsApiSetJsMainHook(address);
}
/**
* This is equivalent to setting `AFL_FRIDA_PERSISTENT_ADDR`, again a
* `NativePointer` should be provided as it's argument.
@ -199,6 +212,12 @@ class Afl {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetSeccompFile(buf);
}
/**
* See `AFL_FRIDA_STALKER_ADJACENT_BLOCKS`.
*/
static setStalkerAdjacentBlocks(val) {
Afl.jsApiSetStalkerAdjacentBlocks(val);
}
/*
* Set a function to be called for each instruction which is instrumented
* by AFL FRIDA mode.
@ -243,6 +262,12 @@ class Afl {
const buf = Memory.allocUtf8String(file);
Afl.jsApiSetStdOut(buf);
}
/**
* See `AFL_FRIDA_TRACEABLE`.
*/
static setTraceable() {
Afl.jsApiSetTraceable();
}
static jsApiGetFunction(name, retType, argTypes) {
const addr = Afl.module.getExportByName(name);
return new NativeFunction(addr, retType, argTypes);
@ -261,6 +286,7 @@ Afl.jsApiAddIncludeRange = Afl.jsApiGetFunction("js_api_add_include_range", "voi
Afl.jsApiAflSharedMemFuzzing = Afl.jsApiGetSymbol("__afl_sharedmem_fuzzing");
Afl.jsApiDone = Afl.jsApiGetFunction("js_api_done", "void", []);
Afl.jsApiError = Afl.jsApiGetFunction("js_api_error", "void", ["pointer"]);
Afl.jsApiSetBackpatchDisable = Afl.jsApiGetFunction("js_api_set_backpatch_disable", "void", []);
Afl.jsApiSetDebugMaps = Afl.jsApiGetFunction("js_api_set_debug_maps", "void", []);
Afl.jsApiSetEntryPoint = Afl.jsApiGetFunction("js_api_set_entrypoint", "void", ["pointer"]);
Afl.jsApiSetInstrumentCoverageFile = Afl.jsApiGetFunction("js_api_set_instrument_coverage_file", "void", ["pointer"]);
@ -272,6 +298,7 @@ Afl.jsApiSetInstrumentSeed = Afl.jsApiGetFunction("js_api_set_instrument_seed",
Afl.jsApiSetInstrumentTrace = Afl.jsApiGetFunction("js_api_set_instrument_trace", "void", []);
Afl.jsApiSetInstrumentTraceUnique = Afl.jsApiGetFunction("js_api_set_instrument_trace_unique", "void", []);
Afl.jsApiSetInstrumentUnstableCoverageFile = Afl.jsApiGetFunction("js_api_set_instrument_unstable_coverage_file", "void", ["pointer"]);
Afl.jsApiSetJsMainHook = Afl.jsApiGetFunction("js_api_set_js_main_hook", "void", ["pointer"]);
Afl.jsApiSetPersistentAddress = Afl.jsApiGetFunction("js_api_set_persistent_address", "void", ["pointer"]);
Afl.jsApiSetPersistentCount = Afl.jsApiGetFunction("js_api_set_persistent_count", "void", ["uint64"]);
Afl.jsApiSetPersistentDebug = Afl.jsApiGetFunction("js_api_set_persistent_debug", "void", []);
@ -280,12 +307,14 @@ Afl.jsApiSetPersistentReturn = Afl.jsApiGetFunction("js_api_set_persistent_retur
Afl.jsApiSetPrefetchBackpatchDisable = Afl.jsApiGetFunction("js_api_set_prefetch_backpatch_disable", "void", []);
Afl.jsApiSetPrefetchDisable = Afl.jsApiGetFunction("js_api_set_prefetch_disable", "void", []);
Afl.jsApiSetSeccompFile = Afl.jsApiGetFunction("js_api_set_seccomp_file", "void", ["pointer"]);
Afl.jsApiSetStalkerAdjacentBlocks = Afl.jsApiGetFunction("js_api_set_stalker_adjacent_blocks", "void", ["uint32"]);
Afl.jsApiSetStalkerCallback = Afl.jsApiGetFunction("js_api_set_stalker_callback", "void", ["pointer"]);
Afl.jsApiSetStalkerIcEntries = Afl.jsApiGetFunction("js_api_set_stalker_ic_entries", "void", ["uint32"]);
Afl.jsApiSetStatsFile = Afl.jsApiGetFunction("js_api_set_stats_file", "void", ["pointer"]);
Afl.jsApiSetStatsInterval = Afl.jsApiGetFunction("js_api_set_stats_interval", "void", ["uint64"]);
Afl.jsApiSetStdErr = Afl.jsApiGetFunction("js_api_set_stderr", "void", ["pointer"]);
Afl.jsApiSetStdOut = Afl.jsApiGetFunction("js_api_set_stdout", "void", ["pointer"]);
Afl.jsApiSetTraceable = Afl.jsApiGetFunction("js_api_set_traceable", "void", []);
Afl.jsApiWrite = new NativeFunction(
/* tslint:disable-next-line:no-null-keyword */
Module.getExportByName(null, "write"), "int", ["int", "pointer", "int"]);

View File

@ -1,14 +1,13 @@
#include "frida-gumjs.h"
#include "debug.h"
#include "js.h"
#include "util.h"
static char * js_script = NULL;
gboolean js_done = FALSE;
js_api_stalker_callback_t js_user_callback = NULL;
js_main_hook_t js_main_hook = NULL;
static char * js_script = NULL;
static gchar * filename = "afl.js";
static gchar * contents;
static GumScriptBackend * backend;
@ -25,7 +24,7 @@ static void js_msg(GumScript *script, const gchar *message, GBytes *data,
UNUSED_PARAMETER(script);
UNUSED_PARAMETER(data);
UNUSED_PARAMETER(user_data);
OKF("%s", message);
FOKF("%s", message);
}
@ -50,14 +49,14 @@ static gchar *js_get_script() {
} else {
FATAL("Could not load script file: %s", filename);
FFATAL("Could not load script file: %s", filename);
}
} else {
OKF("Loaded AFL script: %s, %" G_GSIZE_MODIFIER "d bytes", filename,
length);
FOKF("Loaded AFL script: %s, %" G_GSIZE_MODIFIER "d bytes", filename,
length);
gchar *source = g_malloc0(api_js_len + length + 1);
memcpy(source, api_js, api_js_len);
@ -75,7 +74,7 @@ static void js_print_script(gchar *source) {
for (size_t i = 0; split[i] != NULL; i++) {
OKF("%3" G_GSIZE_MODIFIER "d. %s", i + 1, split[i]);
FOKF("%3" G_GSIZE_MODIFIER "d. %s", i + 1, split[i]);
}
@ -89,7 +88,7 @@ static void load_cb(GObject *source_object, GAsyncResult *result,
UNUSED_PARAMETER(source_object);
UNUSED_PARAMETER(user_data);
gum_script_load_finish(script, result);
if (error != NULL) { FATAL("Failed to load script - %s", error->message); }
if (error != NULL) { FFATAL("Failed to load script - %s", error->message); }
}
@ -99,7 +98,7 @@ static void create_cb(GObject *source_object, GAsyncResult *result,
UNUSED_PARAMETER(source_object);
UNUSED_PARAMETER(user_data);
script = gum_script_backend_create_finish(backend, result, &error);
if (error != NULL) { FATAL("Failed to create script: %s", error->message); }
if (error != NULL) { FFATAL("Failed to create script: %s", error->message); }
gum_script_set_message_handler(script, js_msg, NULL, NULL);
@ -128,7 +127,7 @@ void js_start(void) {
while (g_main_context_pending(context))
g_main_context_iteration(context, FALSE);
if (!js_done) { FATAL("Script didn't call Afl.done()"); }
if (!js_done) { FFATAL("Script didn't call Afl.done()"); }
}

View File

@ -1,4 +1,3 @@
#include "debug.h"
#include "entry.h"
#include "instrument.h"
@ -12,6 +11,10 @@
#include "stats.h"
#include "util.h"
typedef uint8_t u8;
extern void __afl_set_persistent_mode(u8 mode);
__attribute__((visibility("default"))) void js_api_done() {
js_done = TRUE;
@ -20,7 +23,7 @@ __attribute__((visibility("default"))) void js_api_done() {
__attribute__((visibility("default"))) void js_api_error(char *msg) {
FATAL("%s", msg);
FFATAL("%s", msg);
}
@ -48,6 +51,8 @@ __attribute__((visibility("default"))) void js_api_set_persistent_address(
persistent_start = GPOINTER_TO_SIZE(address);
__afl_set_persistent_mode(1);
}
__attribute__((visibility("default"))) void js_api_set_persistent_return(
@ -231,3 +236,29 @@ __attribute__((visibility("default"))) void js_api_set_stalker_ic_entries(
}
__attribute__((visibility("default"))) void js_api_set_traceable(void) {
traceable = TRUE;
}
__attribute__((visibility("default"))) void js_api_set_backpatch_disable(void) {
backpatch_enable = FALSE;
}
__attribute__((visibility("default"))) void js_api_set_stalker_adjacent_blocks(
guint val) {
stalker_adjacent_blocks = val;
}
__attribute__((visibility("default"))) void js_api_set_js_main_hook(
const js_main_hook_t hook) {
js_main_hook = hook;
}

Some files were not shown because too many files have changed in this diff Show More