Compare commits

...

134 Commits
cg ... 3.10c

Author SHA1 Message Date
bd0a23de73 Merge pull request #767 from AFLplusplus/dev
Final push for release
2021-03-01 10:12:04 +01:00
4619a1395b ensure proper aligning for skim patch 2021-03-01 09:57:57 +01:00
0c38850f95 3.10c release 2021-03-01 09:43:35 +01:00
07884e0054 feodora qemu lib fix 2021-03-01 09:37:07 +01:00
bdadbb7207 Merge pull request #770 from rchildre3/qasan-print-formats
Fix printf specifiers of (s)size_t in QAsan hooks
2021-02-28 22:02:35 +01:00
e389eb9842 Fix printf specifiers of (s)size_t in QAsan hooks
* size_t specifier is %zu or %zx
* ssize_t specifier is %zd

Helpful for cross compiling
2021-02-28 15:08:59 -05:00
5cf0655071 metrics 2021-02-28 10:01:35 +01:00
f81ef4abf4 fix afl-common compile 2021-02-28 00:12:39 +01:00
6036cf8437 BSD: how to avoid core dumps 2021-02-28 00:24:29 +01:00
1cad645400 fix %ld in hooks.c 2021-02-27 18:30:04 +01:00
36846836ed libqasan: read and write hooks 2021-02-27 18:26:57 +01:00
79f1a44a01 fix qasan search path 2021-02-27 18:14:58 +01:00
c2127e3ff7 disable the generation of core files in DragonFly BSD 2021-02-27 17:58:25 +01:00
2ad495ad0a reworked unicornafl documentation 2021-02-27 17:19:00 +01:00
8e051fd075 fixed rust bindings placement 2021-02-27 16:37:00 +01:00
af628b16d1 added rust binding reference 2021-02-27 16:29:29 +01:00
c219502f0f some rust cleanup 2021-02-27 15:52:36 +01:00
a5da9ce42c custom mutator rust support (#752)
* custom mutator rust support

* clarify how to view documentation for rust mutators

* remove `FuzzResult` hack and clarify lifetimes of CustomMutator::fuzz

* rename TErr associated tyep to Error to be more idiomatic

* fix warnings

* add example for fallible custom mutator

* make Fallible Custom Mutator the default and implement it's handle_err method by default

* rename CustomMutator::handle_err to handle_error

* add example mutator using lain
2021-02-27 15:05:13 +01:00
79e02c2a9b remove debug output 2021-02-26 22:54:35 +01:00
3a461944ec fine tune cmplog 2021-02-26 22:36:19 +01:00
78d96c4dc8 Merge pull request #759 from AFLplusplus/dev
push to stable
2021-02-26 09:45:43 +01:00
ee0ca07f3c changing the -t ...+ meaning to "auto-calculate buth this is the max" 2021-02-25 12:19:46 +01:00
7ae7b0f373 docs update 2021-02-25 11:08:37 +01:00
e2b4bc9310 update changelog 2021-02-25 11:02:59 +01:00
6c9777de13 edges in afl-plot 2021-02-25 10:42:39 +01:00
2f7e57f6aa helper_min3 func 2021-02-25 10:04:41 +01:00
5c239451cf cmplog finetuning 2021-02-25 09:15:54 +01:00
35ca51c5a8 Merge branch 'dev' of github.com:AFLplusplus/AFLplusplus into dev 2021-02-24 21:29:09 +01:00
047f3436e9 edges in plot file 2021-02-24 21:29:00 +01:00
5d181950eb fixes 2021-02-24 21:17:58 +01:00
48a1a29baa typos 2021-02-24 20:34:33 +01:00
c05d392cd9 Merge branch 'dev' of github.com:AFLplusplus/AFLplusplus into dev 2021-02-24 18:07:18 +01:00
cc7c651dc9 tidied up env suggestions 2021-02-24 18:07:08 +01:00
e6ef2ee338 typo 2021-02-24 12:14:10 +01:00
a090b2013f docs 2021-02-24 12:02:44 +01:00
564f491566 redqueen settings opt 2021-02-24 09:29:19 +01:00
2daeeab844 encode double quote in dict 2021-02-23 10:06:15 +01:00
4ab90e739f code format 2021-02-22 22:24:59 +01:00
745bc083d1 fix error msg 2021-02-22 18:22:09 +01:00
7674dac1a1 auto mode for CTX + NGRAM 2021-02-22 18:17:35 +01:00
fb2a6b6941 minimum sync time 2021-02-22 16:56:35 +01:00
70fe872940 ensure a valid seed exists 2021-02-22 16:39:38 +01:00
a252943236 another fix for disabled entries 2021-02-22 12:59:01 +01:00
8c133b607c stdstring fix attempt 2021-02-22 12:43:39 +01:00
2785c8b197 crash fix 2021-02-22 12:34:37 +01:00
a81b5aa921 dict2file fix 2021-02-22 11:08:25 +01:00
8ad78f5b65 fix 2021-02-21 23:42:08 +01:00
ac9cfd89da how to compare afl++ 2021-02-21 23:27:07 +01:00
c67c4ce757 doc update 2021-02-21 23:17:40 +01:00
974aab6cf6 cmplog config.h -> -l option 2021-02-21 17:53:09 +01:00
b957218a3a more attuned colorize repace 2021-02-21 11:30:05 +01:00
f629f4e341 Merge pull request #755 from rchildre3/improve-llvm-build-instructions
Improve LLVM build instructions
2021-02-21 09:15:20 +01:00
871c3c91ec Improve LLVM build instructions
* Enable shell highlighting on code block
* Shallow clone of source due to extensive history
* Line break and sort the CMake options for visibility
* Disable most extraneous options (e.g. docs, tests, benchmarks,
  clang-tools-extra, OpenCL interface)
* Only build for the host architecture by default
* Support other sub-make interfaces, like the recommended Ninja Build
  System
* Harden against paths with spaces
* Prefer linking against the newly built LLVM libraries by prepending to
  LD_LIBRARY_PATH
2021-02-20 19:24:28 -05:00
100aac4dd3 -t help 2021-02-20 14:15:38 +01:00
d941da33ae qemuafl 2021-02-19 21:20:33 +01:00
62767a42dc improved env suggestions 2021-02-19 20:40:38 +01:00
89cf94f0e6 suggested env vars for lazy ppl 2021-02-19 20:33:12 +01:00
17211253b2 libhfcommon not anymore a symlink 2021-02-19 18:25:24 +01:00
6998489b26 Revert "Remove self-referential symlink"
This reverts commit aaf5fcd98a.
2021-02-19 18:21:10 +01:00
4290cb5877 fix hongg mutator API change 2021-02-19 16:07:47 +01:00
801f2449ec Merge pull request #753 from frewsxcv/frewsxcv-libhf
Remove self-referential symlink
2021-02-19 16:02:25 +01:00
aaf5fcd98a Remove self-referential symlink 2021-02-19 09:18:59 -05:00
5edfb7ba85 nicer bib 2021-02-19 15:04:53 +01:00
a5cb522f01 Merge branch 'dev' of github.com:AFLplusplus/AFLplusplus into dev 2021-02-19 15:03:22 +01:00
3195119dad pointer to cite in readme 2021-02-19 15:03:10 +01:00
d6fe6b9537 qemuafl 2021-02-19 10:49:36 +01:00
c0f9fba6d6 Merge branch 'dev' of https://github.com/AFLplusplus/AFLplusplus into dev 2021-02-18 19:40:22 +01:00
1a713ff420 fix qemu AFL_ENTRYPOINT for arm 32 and 64 bit 2021-02-18 19:39:46 +01:00
89af2ef7a9 update honggfuzz custom mutator 2021-02-18 11:15:59 +01:00
907c5d4276 Merge pull request #751 from AFLplusplus/dev
fix a rare i2s illegal memory access
2021-02-17 19:19:25 +01:00
5dd35f5281 fix a rare i2s illegal memory access 2021-02-17 19:10:05 +01:00
857229654e Merge pull request #750 from AFLplusplus/dev
Push to stable
2021-02-17 17:43:11 +01:00
4c47b242eb fix FPE in colorization 2021-02-17 17:40:01 +01:00
938512a6b9 minor fixes 2021-02-17 09:48:04 +01:00
7444cfa450 Merge pull request #748 from b1gr3db/dev
Regression fix
2021-02-16 17:52:44 +01:00
f091b8d692 Regression fix
Previous fix was undone in a subsequent commit
2021-02-16 11:17:55 -05:00
7d97ffb1e8 check for setuptools instead of easy_install 2021-02-16 16:27:56 +01:00
80bdbf7be0 minor cmplog fix 2021-02-16 09:54:15 +01:00
686719cdca Merge pull request #746 from f0rki/cmplog-intcast-fix
CMPLOG llvm pass: use CreateIntCast to cast to the right integer type
2021-02-16 09:51:31 +01:00
6caec2169c Revert "llvm bug workaround for lto extint"
This reverts commit e3a5c31307.
2021-02-15 19:14:28 +01:00
5212481352 CMPLOG llvm pass: use CreateIntCast to cast to the right integer type 2021-02-15 19:13:50 +01:00
d999725de2 Merge pull request #737 from AFLplusplus/dev
push to stable
2021-02-15 15:20:06 +01:00
145c673a80 finished merge 2021-02-15 15:04:34 +01:00
c5017945f7 merged 2021-02-15 14:07:10 +01:00
5c4c49d9ca black 2021-02-15 14:00:08 +01:00
cebde1f9e6 ran black on python scripts 2021-02-15 13:52:23 +01:00
0298ae82b0 code-format 2021-02-15 13:52:03 +01:00
512f53984c fixed scan-build issues 2021-02-15 13:51:32 +01:00
e3a5c31307 llvm bug workaround for lto extint 2021-02-15 13:25:15 +01:00
dd2fd80274 doc updates 2021-02-15 12:40:10 +01:00
ffc1fc655f qenuafl 2021-02-15 10:27:44 +01:00
fe477e96ae fix configure arguments 2021-02-15 09:24:13 +01:00
98559ea8b0 fix compiler warning 2021-02-15 08:54:53 +01:00
f31d8b8401 redqueen fix compiler warnings for 32 bit 2021-02-15 08:46:19 +01:00
389e348826 fixes 2021-02-14 22:43:52 +01:00
98fd50f78f Merge branch 'stable' into dev 2021-02-14 22:42:13 +01:00
95561ec5a7 dockerfile fix 2021-02-14 21:47:42 +01:00
fe9da70705 disabling march=native due problems on intel platforms 2021-02-14 18:43:43 +01:00
95c77c8486 try qemu build fix 2021-02-14 11:46:23 +01:00
e45333bcf9 Merge branch 'dev' of https://github.com/AFLplusplus/AFLplusplus into dev 2021-02-13 23:29:04 +01:00
c906c042be fix compiler warning 2021-02-13 23:28:15 +01:00
9bd1e19d7f added AFL_IGNORE_UNKNOWN_ENVS 2021-02-13 22:43:56 +01:00
6ce9230ed6 afl-cmin/afl-showmap -f 2021-02-13 14:29:22 +01:00
1d60c39191 fix new compiler warning 2021-02-13 13:42:37 +01:00
70651d60bd Merge branch 'dev' of https://github.com/AFLplusplus/AFLplusplus into dev 2021-02-13 13:32:42 +01:00
385312c658 fix issue #732 afl-cmin and afl-showmap should support '-f' 2021-02-13 13:31:17 +01:00
87a607c7d0 update doc 2021-02-13 11:17:53 +01:00
1ba5d1008e fuck you llvm 13 2021-02-13 10:53:40 +01:00
129a5adaf1 fix 2021-02-13 09:29:35 +01:00
d827bc4580 dont break on llvm 13 2021-02-13 09:12:36 +01:00
64e46dcefc remvoe libcompcov 32 bits warnings 2021-02-12 22:07:17 +01:00
c0b3127b9d remove travis badge 2021-02-12 14:54:24 +01:00
7cfa690d1c typo 2021-02-12 11:05:46 +01:00
22a3c7f7d0 fix #736 (ty b1gr3db) 2021-02-12 09:42:22 +01:00
16ffbb37f5 typo 2021-02-11 22:09:19 +01:00
ea05f3f4cd typos 2021-02-11 21:55:14 +01:00
91f2f057e4 fix #723 2021-02-11 21:17:08 +01:00
d44cf1344d typo 2021-02-11 21:08:10 +01:00
756206e4d7 typo 2021-02-11 20:26:02 +01:00
2ff6e5023f typos 2021-02-11 20:14:48 +01:00
223bd70f1f typo 2021-02-11 20:08:28 +01:00
dd3f4bb41c typos & formatting 2021-02-11 20:05:06 +01:00
f3e783d343 typo 2021-02-11 19:55:21 +01:00
f4cac37b04 typos 2021-02-11 10:20:36 +01:00
5b2634f711 update changelog + ideas 2021-02-10 17:56:27 +01:00
267b085f80 dlmalloc only for non glibc qasan and AFL_QEMU_FORCE_DFL 2021-02-10 15:15:16 +01:00
b6643743d6 fix laf for potential crashes 2021-02-10 10:13:08 +01:00
17cbb03ba7 more cmplog options in config.h 2021-02-09 09:18:24 +01:00
2cd4624779 build docker without march=native 2021-02-09 08:38:33 +01:00
e11665564b fix docker build action 2021-02-09 08:30:49 +01:00
93cebd6c7f Merge pull request #734 from Pamplemousse/build_docker_image_in_actions
Use GitHub Actions to build Docker image
2021-02-09 08:28:31 +01:00
a124540e50 Use GitHub Actions to build Docker image
... then deploy to hub.docker.com .

Signed-off-by: Pamplemousse <xav.maso@gmail.com>
2021-02-08 20:36:06 -07:00
c465e48e27 remove AFL_CC from unset list to allow success for unusual environments 2021-02-07 16:19:27 +01:00
c2c65fd9c1 mark llvm 13 as unsupported (yet) 2021-02-07 09:42:28 +01:00
93 changed files with 4961 additions and 3181 deletions

View File

@ -33,13 +33,13 @@ if CLANG_FORMAT_BIN is None:
o, _ = p.communicate()
o = str(o, "utf-8")
o = re.sub(r".*ersion ", "", o)
#o = o[len("clang-format version "):].strip()
o = o[:o.find(".")]
# o = o[len("clang-format version "):].strip()
o = o[: o.find(".")]
o = int(o)
except:
print ("clang-format-11 is needed. Aborted.")
print("clang-format-11 is needed. Aborted.")
exit(1)
#if o < 7:
# if o < 7:
# if subprocess.call(['which', 'clang-format-7'], stdout=subprocess.PIPE) == 0:
# CLANG_FORMAT_BIN = 'clang-format-7'
# elif subprocess.call(['which', 'clang-format-8'], stdout=subprocess.PIPE) == 0:
@ -52,8 +52,8 @@ if CLANG_FORMAT_BIN is None:
# print ("clang-format 7 or above is needed. Aborted.")
# exit(1)
else:
CLANG_FORMAT_BIN = 'clang-format-11'
CLANG_FORMAT_BIN = "clang-format-11"
COLUMN_LIMIT = 80
for line in fmt.split("\n"):
line = line.split(":")
@ -69,26 +69,47 @@ def custom_format(filename):
in_define = False
last_line = None
out = ""
for line in src.split("\n"):
if line.lstrip().startswith("#"):
if line[line.find("#")+1:].lstrip().startswith("define"):
if line[line.find("#") + 1 :].lstrip().startswith("define"):
in_define = True
if "/*" in line and not line.strip().startswith("/*") and line.endswith("*/") and len(line) < (COLUMN_LIMIT-2):
if (
"/*" in line
and not line.strip().startswith("/*")
and line.endswith("*/")
and len(line) < (COLUMN_LIMIT - 2)
):
cmt_start = line.rfind("/*")
line = line[:cmt_start] + " " * (COLUMN_LIMIT-2 - len(line)) + line[cmt_start:]
line = (
line[:cmt_start]
+ " " * (COLUMN_LIMIT - 2 - len(line))
+ line[cmt_start:]
)
define_padding = 0
if last_line is not None and in_define and last_line.endswith("\\"):
last_line = last_line[:-1]
define_padding = max(0, len(last_line[last_line.rfind("\n")+1:]))
define_padding = max(0, len(last_line[last_line.rfind("\n") + 1 :]))
if last_line is not None and last_line.strip().endswith("{") and line.strip() != "":
if (
last_line is not None
and last_line.strip().endswith("{")
and line.strip() != ""
):
line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
elif last_line is not None and last_line.strip().startswith("}") and line.strip() != "":
elif (
last_line is not None
and last_line.strip().startswith("}")
and line.strip() != ""
):
line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
elif line.strip().startswith("}") and last_line is not None and last_line.strip() != "":
elif (
line.strip().startswith("}")
and last_line is not None
and last_line.strip() != ""
):
line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
if not line.endswith("\\"):
@ -97,14 +118,15 @@ def custom_format(filename):
out += line + "\n"
last_line = line
return (out)
return out
args = sys.argv[1:]
if len(args) == 0:
print ("Usage: ./format.py [-i] <filename>")
print ()
print (" The -i option, if specified, let the script to modify in-place")
print (" the source files. By default the results are written to stdout.")
print("Usage: ./format.py [-i] <filename>")
print()
print(" The -i option, if specified, let the script to modify in-place")
print(" the source files. By default the results are written to stdout.")
print()
exit(1)
@ -120,4 +142,3 @@ for filename in args:
f.write(code)
else:
print(code)

View File

@ -0,0 +1,27 @@
name: Publish Docker Images
on:
push:
branches: [ stable ]
paths:
- Dockerfile
pull_request:
branches: [ stable ]
paths:
- Dockerfile
jobs:
push_to_registry:
name: Push Docker images to Dockerhub
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Login to Dockerhub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Publish aflpp to Registry
uses: docker/build-push-action@v2
with:
context: .
push: true
tags: aflplusplus/aflplusplus:latest

View File

@ -11,6 +11,8 @@ LABEL "about"="AFLplusplus docker image"
ARG DEBIAN_FRONTEND=noninteractive
env NO_ARCH_OPT 1
RUN apt-get update && \
apt-get -y install --no-install-suggests --no-install-recommends \
automake \
@ -48,16 +50,16 @@ RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 0
ENV LLVM_CONFIG=llvm-config-12
ENV AFL_SKIP_CPUFREQ=1
ENV AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES=1
RUN git clone https://github.com/vanhauser-thc/afl-cov /afl-cov
RUN git clone --depth=1 https://github.com/vanhauser-thc/afl-cov /afl-cov
RUN cd /afl-cov && make install && cd ..
COPY . /AFLplusplus
WORKDIR /AFLplusplus
RUN export REAL_CXX=g++-10 && export CC=gcc-10 && \
export CXX=g++-10 && make clean && \
make distrib CFLAGS="-O3 -funroll-loops -D_FORTIFY_SOURCE=2" && make install && make clean
RUN export CC=gcc-10 && export CXX=g++-10 && make clean && \
make distrib && make install && make clean
RUN echo 'alias joe="jupp --wordwrap"' >> ~/.bashrc
RUN echo 'export PS1="[afl++]$PS1"' >> ~/.bashrc

View File

@ -57,8 +57,6 @@ ifdef MSAN_BUILD
override LDFLAGS += -fsanitize=memory
endif
ifeq "$(findstring android, $(shell $(CC) --version 2>/dev/null))" ""
ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=full -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
CFLAGS_FLTO ?= -flto=full
@ -77,17 +75,17 @@ ifeq "$(shell echo 'int main() {return 0; }' | $(CC) -fno-move-loop-invariants -
SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli
endif
ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
ifndef SOURCE_DATE_EPOCH
HAVE_MARCHNATIVE = 1
CFLAGS_OPT += -march=native
endif
endif
#ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
# ifndef SOURCE_DATE_EPOCH
# HAVE_MARCHNATIVE = 1
# CFLAGS_OPT += -march=native
# endif
#endif
ifneq "$(shell uname)" "Darwin"
ifeq "$(HAVE_MARCHNATIVE)" "1"
SPECIAL_PERFORMANCE += -march=native
endif
#ifeq "$(HAVE_MARCHNATIVE)" "1"
# SPECIAL_PERFORMANCE += -march=native
#endif
# OS X does not like _FORTIFY_SOURCE=2
ifndef DEBUG
CFLAGS_OPT += -D_FORTIFY_SOURCE=2
@ -366,6 +364,7 @@ help:
@echo NO_PYTHON - disable python support
@echo NO_SPLICING - disables splicing mutation in afl-fuzz, not recommended for normal fuzzing
@echo AFL_NO_X86 - if compiling on non-intel/amd platforms
@echo NO_ARCH_OPT - builds afl++ without machine architecture optimizations
@echo "LLVM_CONFIG - if your distro doesn't use the standard name for llvm-config (e.g. Debian)"
@echo "=========================================="
@echo e.g.: make ASAN_BUILD=1
@ -519,7 +518,7 @@ code-format:
ifndef AFL_NO_X86
test_build: afl-cc afl-gcc afl-as afl-showmap
@echo "[*] Testing the CC wrapper afl-cc and its instrumentation output..."
@unset AFL_MAP_SIZE AFL_USE_UBSAN AFL_USE_CFISAN AFL_USE_ASAN AFL_USE_MSAN AFL_CC; ASAN_OPTIONS=detect_leaks=0 AFL_INST_RATIO=100 AFL_PATH=. ./afl-cc test-instr.c -o test-instr 2>&1 || (echo "Oops, afl-cc failed"; exit 1 )
@unset AFL_MAP_SIZE AFL_USE_UBSAN AFL_USE_CFISAN AFL_USE_ASAN AFL_USE_MSAN; ASAN_OPTIONS=detect_leaks=0 AFL_INST_RATIO=100 AFL_PATH=. ./afl-cc test-instr.c -o test-instr 2>&1 || (echo "Oops, afl-cc failed"; exit 1 )
ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr0 ./test-instr < /dev/null
echo 1 | ASAN_OPTIONS=detect_leaks=0 ./afl-showmap -m none -q -o .test-instr1 ./test-instr
@rm -f test-instr

View File

@ -43,7 +43,8 @@ endif
LLVMVER = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/git//' | sed 's/svn//' )
LLVM_MAJOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/\..*//' )
LLVM_MINOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/.*\.//' | sed 's/git//' | sed 's/svn//' | sed 's/ .*//' )
LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^3\.[0-3]|^19' && echo 1 || echo 0 )
LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^3\.[0-3]|^[0-2]\.' && echo 1 || echo 0 )
LLVM_TOO_NEW = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[3-9]' && echo 1 || echo 0 )
LLVM_NEW_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[0-9]' && echo 1 || echo 0 )
LLVM_10_OK = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]|^10\.[1-9]|^10\.0.[1-9]' && echo 1 || echo 0 )
LLVM_HAVE_LTO = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]' && echo 1 || echo 0 )
@ -58,7 +59,11 @@ ifeq "$(LLVMVER)" ""
endif
ifeq "$(LLVM_UNSUPPORTED)" "1"
$(warning llvm_mode only supports llvm versions 3.4 up to 12)
$(error llvm_mode only supports llvm from version 3.4 onwards)
endif
ifeq "$(LLVM_TOO_NEW)" "1"
$(warning you are using an in-development llvm version - this might break llvm_mode!)
endif
LLVM_TOO_OLD=1

View File

@ -2,11 +2,9 @@
<img align="right" src="https://raw.githubusercontent.com/andreafioraldi/AFLplusplus-website/master/static/logo_256x256.png" alt="AFL++ Logo">
![Travis State](https://api.travis-ci.com/AFLplusplus/AFLplusplus.svg?branch=stable)
Release Version: [3.10c](https://github.com/AFLplusplus/AFLplusplus/releases)
Release Version: [3.00c](https://github.com/AFLplusplus/AFLplusplus/releases)
Github Version: 3.01a
Github Version: 3.11a
Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus)
@ -23,11 +21,18 @@
mutations, more and better instrumentation, custom module support, etc.
If you want to use afl++ for your academic work, check the [papers page](https://aflplus.plus/papers/)
on the website.
on the website. To cite our work, look at the [Cite](#cite) section.
For comparisons use the fuzzbench `aflplusplus` setup, or use `afl-clang-fast`
with `AFL_LLVM_CMPLOG=1`.
## Major changes in afl++ 3.0
## Major changes in afl++ 3.00 + 3.10
With afl++ 3.0 we introduced changes that break some previous afl and afl++
With afl++ 3.10 we introduced the following changes from previous behaviours:
* The '+' feature of the '-t' option now means to auto-calculate the timeout
with the value given being the maximum timeout. The original meaning of
"skipping timeouts instead of abort" is now inherent to the -t option.
With afl++ 3.00 we introduced changes that break some previous afl and afl++
behaviours and defaults:
* There are no llvm_mode and gcc_plugin subdirectories anymore and there is
@ -219,6 +224,7 @@ These build options exist:
* NO_PYTHON - disable python support
* NO_SPLICING - disables splicing mutation in afl-fuzz, not recommended for normal fuzzing
* AFL_NO_X86 - if compiling on non-intel/amd platforms
* NO_ARCH_OPT - builds afl++ without machine architecture optimizations
* LLVM_CONFIG - if your distro doesn't use the standard name for llvm-config (e.g. Debian)
e.g.: make ASAN_BUILD=1
@ -730,6 +736,9 @@ campaigns as these are much shorter runnings.
1. Always:
* LTO has a much longer compile time which is diametrical to short fuzzing -
hence use afl-clang-fast instead.
* If you compile with CMPLOG then you can save fuzzing time and reuse that
compiled target for both the -c option and the main fuzz target.
This will impact the speed by ~15% though.
* `AFL_FAST_CAL` - Enable fast calibration, this halfs the time the saturated
corpus needs to be loaded.
* `AFL_CMPLOG_ONLY_NEW` - only perform cmplog on new found paths, not the
@ -749,6 +758,10 @@ campaigns as these are much shorter runnings.
* for CMPLOG targets, 60% for `-l 2`, 40% for `-l 3`
4. Do *not* run any `-M` modes, just running `-S` modes is better for CI fuzzing.
`-M` enables deterministic fuzzing, old queue handling etc. which is good for
a fuzzing campaign but not good for short CI runs.
How this can look like can e.g. be seen at afl++'s setup in Google's [oss-fuzz](https://github.com/google/oss-fuzz/blob/4bb61df7905c6005000f5766e966e6fe30ab4559/infra/base-images/base-builder/compile_afl#L69).
## Fuzzing binary-only targets
@ -786,8 +799,7 @@ If [afl-dyninst](https://github.com/vanhauser-thc/afl-dyninst) works for
your binary, then you can use afl-fuzz normally and it will have twice
the speed compared to qemu_mode (but slower than persistent mode).
Note that several other binary rewriters exist, all with their advantages and
caveats. As rewriting a binary is much faster than Qemu this is a highly
recommended approach!
caveats.
### Unicorn
@ -1163,8 +1175,18 @@ Thank you!
## Cite
If you use AFLpluplus to compare to your work, please use either `afl-clang-lto`
or `afl-clang-fast` with `AFL_LLVM_CMPLOG=1` for building targets and
`afl-fuzz` with the command line option `-l 2` for fuzzing.
The most effective setup is the `aflplusplus` default configuration on Google's [fuzzbench](https://github.com/google/fuzzbench/tree/master/fuzzers/aflplusplus).
If you use AFLplusplus in scientific work, consider citing [our paper](https://www.usenix.org/conference/woot20/presentation/fioraldi) presented at WOOT'20:
```
+ Andrea Fioraldi, Dominik Maier, Heiko Eißfeldt, and Marc Heuse. “AFL++: Combining incremental steps of fuzzing research”. In 14th USENIX Workshop on Offensive Technologies (WOOT 20). USENIX Association, Aug. 2020.
Bibtex:
```bibtex
@inproceedings {AFLplusplus-Woot20,
author = {Andrea Fioraldi and Dominik Maier and Heiko Ei{\ss}feldt and Marc Heuse},
title = {{AFL++}: Combining Incremental Steps of Fuzzing Research},

View File

@ -6,16 +6,13 @@
- CPU affinity for many cores? There seems to be an issue > 96 cores
- afl-plot to support multiple plot_data
- afl_custom_fuzz_splice_optin()
- afl_custom_splice()
- intel-pt tracer
## Further down the road
afl-fuzz:
- setting min_len/max_len/start_offset/end_offset limits for mutation output
- add __sanitizer_cov_trace_cmp* support via shmem
llvm_mode:
- add __sanitizer_cov_trace_cmp* support
qemu_mode:
- non colliding instrumentation

View File

@ -411,8 +411,8 @@ BEGIN {
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -- \""target_bin"\" "prog_args_string)
} else {
print " Processing "in_count" files (forkserver mode)..."
# print AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -- \""target_bin"\" "prog_args_string" </dev/null"
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -- \""target_bin"\" "prog_args_string" </dev/null")
# print AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -A \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null"
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -A \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null")
}
if (retval && !AFL_CMIN_CRASHES_ONLY) {

View File

@ -99,7 +99,7 @@ if [ ! -d "$outputdir" ]; then
fi
rm -f "$outputdir/high_freq.png" "$outputdir/low_freq.png" "$outputdir/exec_speed.png"
rm -f "$outputdir/high_freq.png" "$outputdir/low_freq.png" "$outputdir/exec_speed.png" "$outputdir/edges.png"
mv -f "$outputdir/index.html" "$outputdir/index.html.orig" 2>/dev/null
echo "[*] Generating plots..."
@ -152,6 +152,12 @@ set ytics auto
plot '$inputdir/plot_data' using 1:11 with filledcurve x1 title '' linecolor rgb '#0090ff' fillstyle transparent solid 0.2 noborder, \\
'$inputdir/plot_data' using 1:11 with lines title ' execs/sec' linecolor rgb '#0090ff' linewidth 3 smooth bezier;
set terminal png truecolor enhanced size 1000,300 butt
set output '$outputdir/edges.png'
set ytics auto
plot '$inputdir/plot_data' using 1:13 with lines title ' edges' linecolor rgb '#0090ff' linewidth 3
_EOF_
) | gnuplot
@ -172,6 +178,7 @@ cat >"$outputdir/index.html" <<_EOF_
<tr><td><b>Generated on:</b></td><td>`date`</td></tr>
</table>
<p>
<img src="edges.png" width=1000 height=300>
<img src="high_freq.png" width=1000 height=300><p>
<img src="low_freq.png" width=1000 height=200><p>
<img src="exec_speed.png" width=1000 height=200>
@ -183,7 +190,7 @@ _EOF_
# sensitive, this seems like a reasonable trade-off.
chmod 755 "$outputdir"
chmod 644 "$outputdir/high_freq.png" "$outputdir/low_freq.png" "$outputdir/exec_speed.png" "$outputdir/index.html"
chmod 644 "$outputdir/high_freq.png" "$outputdir/low_freq.png" "$outputdir/exec_speed.png" "$outputdir/edges.png" "$outputdir/index.html"
echo "[+] All done - enjoy your charts!"

View File

@ -49,6 +49,12 @@ if [ "$PLATFORM" = "FreeBSD" ] ; then
sysctl kern.elf64.aslr.enable=0
} > /dev/null
echo Settings applied.
cat <<EOF
In order to suppress core file generation during fuzzing it is recommended to set
me:\\
:coredumpsize=0:
in the ~/.login_conf file for the user used for fuzzing.
EOF
echo It is recommended to boot the kernel with lots of security off - if you are running a machine that is in a secured network - so set this:
echo ' sysctl hw.ibrs_disable=1'
echo 'Setting kern.pmap.pg_ps_enabled=0 into /boot/loader.conf might be helpful too.'
@ -60,8 +66,14 @@ if [ "$PLATFORM" = "OpenBSD" ] ; then
DONE=1
fi
if [ "$PLATFORM" = "DragonFly" ] ; then
echo
echo 'System security features cannot be disabled on DragonFly.'
#/sbin/sysctl kern.corefile=/dev/null
#echo Settings applied.
cat <<EOF
In order to suppress core file generation during fuzzing it is recommended to set
me:\\
:coredumpsize=0:
in the ~/.login_conf file for the user used for fuzzing.
EOF
DONE=1
fi
if [ "$PLATFORM" = "NetBSD" ] ; then
@ -88,7 +100,7 @@ fi
if [ "$PLATFORM" = "Haiku" ] ; then
SETTINGS=~/config/settings/system/debug_server/settings
[ -r ${SETTINGS} ] && grep -qE "default_action\s+kill" ${SETTINGS} && { echo "Nothing to do"; } || { \
echo We change the debug_server default_action from user to silenty kill; \
echo We change the debug_server default_action from user to silently kill; \
[ ! -r ${SETTINGS} ] && echo "default_action kill" >${SETTINGS} || { mv ${SETTINGS} s.tmp; sed -e "s/default_action\s\s*user/default_action kill/" s.tmp > ${SETTINGS}; rm s.tmp; }; \
echo Settings applied.; \
}

View File

@ -39,7 +39,7 @@
#include "libhfcommon/util.h"
#define PROG_NAME "honggfuzz"
#define PROG_VERSION "2.3"
#define PROG_VERSION "2.4"
/* Name of the template which will be replaced with the proper name of the file */
#define _HF_FILE_PLACEHOLDER "___FILE___"
@ -208,6 +208,7 @@ typedef struct {
const char* crashDir;
const char* covDirNew;
bool saveUnique;
bool saveSmaller;
size_t dynfileqMaxSz;
size_t dynfileqCnt;
dynfile_t* dynfileqCurrent;
@ -279,9 +280,9 @@ typedef struct {
cmpfeedback_t* cmpFeedbackMap;
int cmpFeedbackFd;
bool cmpFeedback;
const char* blacklistFile;
uint64_t* blacklist;
size_t blacklistCnt;
const char* blocklistFile;
uint64_t* blocklist;
size_t blocklistCnt;
bool skipFeedbackOnTimeout;
uint64_t maxCov[4];
dynFileMethod_t dynFileMethod;

View File

@ -77,11 +77,11 @@ static inline uint64_t util_rndGet(uint64_t min, uint64_t max) {
}
static inline uint64_t util_rnd64() { return rand_below(afl_struct, 1 << 30); }
static inline size_t input_getRandomInputAsBuf(run_t *run, const uint8_t **buf) {
*buf = queue_input;
static inline const uint8_t* input_getRandomInputAsBuf(run_t* run, size_t* len) {
*len = queue_input_size;
run->dynfile->data = queue_input;
run->dynfile->size = queue_input_size;
return queue_input_size;
return queue_input;
}
static inline void input_setSize(run_t* run, size_t sz) {
run->dynfile->size = sz;

View File

@ -1 +0,0 @@
.

View File

@ -0,0 +1,3 @@
#ifndef LOG_E
#define LOG_E LOG_F
#endif

File diff suppressed because it is too large Load Diff

10
custom_mutators/rust/.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
# Generated by Cargo
# will have compiled files and executables
/target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk

View File

@ -0,0 +1,8 @@
[workspace]
members = [
"custom_mutator-sys",
"custom_mutator",
"example",
# Lain needs a nightly toolchain
# "example_lain",
]

View File

@ -0,0 +1,11 @@
# Rust Custom Mutators
Bindings to create custom mutators in Rust.
These bindings are documented with rustdoc. To view the documentation run
```cargo doc -p custom_mutator --open```.
A minimal example can be found in `example`. Build it using `cargo build --example example_mutator`.
An example using [lain](https://github.com/microsoft/lain) for structured fuzzing can be found in `example_lain`.
Since lain requires a nightly rust toolchain, you need to set one up before you can play with it.

View File

@ -0,0 +1,12 @@
[package]
name = "custom_mutator-sys"
version = "0.1.0"
authors = ["Julius Hohnerlein <julihoh@users.noreply.github.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
[build-dependencies]
bindgen = "0.56"

View File

@ -0,0 +1,42 @@
extern crate bindgen;
use std::env;
use std::path::PathBuf;
// this code is largely taken straight from the handbook: https://github.com/fitzgen/bindgen-tutorial-bzip2-sys
fn main() {
// Tell cargo to invalidate the built crate whenever the wrapper changes
println!("cargo:rerun-if-changed=wrapper.h");
// The bindgen::Builder is the main entry point
// to bindgen, and lets you build up options for
// the resulting bindings.
let bindings = bindgen::Builder::default()
// The input header we would like to generate
// bindings for.
.header("wrapper.h")
.whitelist_type("afl_state_t")
.blacklist_type(r"u\d+")
.opaque_type(r"_.*")
.opaque_type("FILE")
.opaque_type("in_addr(_t)?")
.opaque_type("in_port(_t)?")
.opaque_type("sa_family(_t)?")
.opaque_type("sockaddr_in(_t)?")
.opaque_type("time_t")
.rustfmt_bindings(true)
.size_t_is_usize(true)
// Tell cargo to invalidate the built crate whenever any of the
// included header files changed.
.parse_callbacks(Box::new(bindgen::CargoCallbacks))
// Finish the builder and generate the bindings.
.generate()
// Unwrap the Result and panic on failure.
.expect("Unable to generate bindings");
// Write the bindings to the $OUT_DIR/bindings.rs file.
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("Couldn't write bindings!");
}

View File

@ -0,0 +1,5 @@
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));

View File

@ -0,0 +1,4 @@
#include "../../../include/afl-fuzz.h"
#include "../../../include/common.h"
#include "../../../include/config.h"
#include "../../../include/debug.h"

View File

@ -0,0 +1,13 @@
[package]
name = "custom_mutator"
version = "0.1.0"
authors = ["Julius Hohnerlein <julihoh@users.noreply.github.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
afl_internals = ["custom_mutator-sys"]
[dependencies]
custom_mutator-sys = { path = "../custom_mutator-sys", optional=true }

View File

@ -0,0 +1,634 @@
//! Somewhat safe and somewhat ergonomic bindings for creating [AFL++](https://github.com/AFLplusplus/AFLplusplus) [custom mutators](https://github.com/AFLplusplus/AFLplusplus/blob/stable/docs/custom_mutators.md) in Rust.
//!
//! # Usage
//! AFL++ custom mutators are expected to be dynamic libraries which expose a set of symbols.
//! Check out [`CustomMutator`] to see which functions of the API are supported.
//! Then use [`export_mutator`] to export the correct symbols for your mutator.
//! In order to use the mutator, your crate needs to be a library crate and have a `crate-type` of `cdylib`.
//! Putting
//! ```yaml
//! [lib]
//! crate-type = ["cdylib"]
//! ```
//! into your `Cargo.toml` should do the trick.
//! The final executable can be found in `target/(debug|release)/your_crate_name.so`.
//! # Example
//! See [`export_mutator`] for an example.
//!
//! # On `panic`s
//! This binding is panic-safe in that it will prevent panics from unwinding into AFL++. Any panic will `abort` at the boundary between the custom mutator and AFL++.
//!
//! # Access to AFL++ internals
//! This crate has an optional feature "afl_internals", which gives access to AFL++'s internal state.
//! The state is passed to [`CustomMutator::init`], when the feature is activated.
//!
//! _This is completely unsafe and uses automatically generated types extracted from the AFL++ source._
use std::{ffi::CStr, fmt::Debug};
#[cfg(feature = "afl_internals")]
#[doc(hidden)]
pub use custom_mutator_sys::afl_state;
#[allow(unused_variables)]
#[doc(hidden)]
pub trait RawCustomMutator {
#[cfg(feature = "afl_internals")]
fn init(afl: &'static afl_state, seed: c_uint) -> Self
where
Self: Sized;
#[cfg(not(feature = "afl_internals"))]
fn init(seed: u32) -> Self
where
Self: Sized;
fn fuzz<'b, 's: 'b>(
&'s mut self,
buffer: &'b mut [u8],
add_buff: Option<&[u8]>,
max_size: usize,
) -> Option<&'b [u8]>;
fn fuzz_count(&mut self, buffer: &[u8]) -> u32 {
1
}
fn queue_new_entry(&mut self, filename_new_queue: &CStr, _filename_orig_queue: Option<&CStr>) {}
fn queue_get(&mut self, filename: &CStr) -> bool {
true
}
fn describe(&mut self, max_description: usize) -> Option<&CStr> {
None
}
fn introspection(&mut self) -> Option<&CStr> {
None
}
/*fn post_process(&self, buffer: &[u8], unsigned char **out_buf)-> usize;
int afl_custom_init_trim(&self, buffer: &[u8]);
size_t afl_custom_trim(&self, unsigned char **out_buf);
int afl_custom_post_trim(&self, unsigned char success);
size_t afl_custom_havoc_mutation(&self, buffer: &[u8], unsigned char **out_buf, size_t max_size);
unsigned char afl_custom_havoc_mutation_probability(&self);*/
}
/// Wrappers for the custom mutator which provide the bridging between the C API and CustomMutator.
/// These wrappers are not intended to be used directly, rather export_mutator will use them to publish the custom mutator C API.
#[doc(hidden)]
pub mod wrappers {
#[cfg(feature = "afl_internals")]
use custom_mutator_sys::afl_state;
use core::slice;
use std::{
any::Any,
convert::TryInto,
ffi::{c_void, CStr},
mem::ManuallyDrop,
os::raw::c_char,
panic::catch_unwind,
process::abort,
ptr::null,
};
use crate::RawCustomMutator;
/// A structure to be used as the data pointer for our custom mutator. This was used as additional storage and is kept for now in case its needed later.
/// Also has some convenience functions for FFI conversions (from and to ptr) and tries to make misuse hard (see [`FFIContext::from`]).
struct FFIContext<M: RawCustomMutator> {
mutator: M,
}
impl<M: RawCustomMutator> FFIContext<M> {
fn from(ptr: *mut c_void) -> ManuallyDrop<Box<Self>> {
assert!(!ptr.is_null());
ManuallyDrop::new(unsafe { Box::from_raw(ptr as *mut Self) })
}
fn into_ptr(self: Box<Self>) -> *const c_void {
Box::into_raw(self) as *const c_void
}
#[cfg(feature = "afl_internals")]
fn new(afl: &'static afl_state, seed: u32) -> Box<Self> {
Box::new(Self {
mutator: M::init(afl, seed),
})
}
#[cfg(not(feature = "afl_internals"))]
fn new(seed: u32) -> Box<Self> {
Box::new(Self {
mutator: M::init(seed),
})
}
}
/// panic handler called for every panic
fn panic_handler(method: &str, panic_info: Box<dyn Any + Send + 'static>) -> ! {
use std::ops::Deref;
let cause = panic_info
.downcast_ref::<String>()
.map(String::deref)
.unwrap_or_else(|| {
panic_info
.downcast_ref::<&str>()
.copied()
.unwrap_or("<cause unknown>")
});
eprintln!("A panic occurred at {}: {}", method, cause);
abort()
}
/// Internal function used in the macro
#[cfg(not(feature = "afl_internals"))]
pub fn afl_custom_init_<M: RawCustomMutator>(seed: u32) -> *const c_void {
match catch_unwind(|| FFIContext::<M>::new(seed).into_ptr()) {
Ok(ret) => ret,
Err(err) => panic_handler("afl_custom_init", err),
}
}
/// Internal function used in the macro
#[cfg(feature = "afl_internals")]
pub fn afl_custom_init_<M: RawCustomMutator>(
afl: Option<&'static afl_state>,
seed: u32,
) -> *const c_void {
match catch_unwind(|| {
let afl = afl.expect("mutator func called with NULL afl");
FFIContext::<M>::new(afl, seed).into_ptr()
}) {
Ok(ret) => ret,
Err(err) => panic_handler("afl_custom_init", err),
}
}
/// Internal function used in the macro
pub unsafe fn afl_custom_fuzz_<M: RawCustomMutator>(
data: *mut c_void,
buf: *mut u8,
buf_size: usize,
out_buf: *mut *const u8,
add_buf: *mut u8,
add_buf_size: usize,
max_size: usize,
) -> usize {
match catch_unwind(|| {
let mut context = FFIContext::<M>::from(data);
if buf.is_null() {
panic!("null buf passed to afl_custom_fuzz")
}
if out_buf.is_null() {
panic!("null out_buf passed to afl_custom_fuzz")
}
let buff_slice = slice::from_raw_parts_mut(buf, buf_size);
let add_buff_slice = if add_buf.is_null() {
None
} else {
Some(slice::from_raw_parts(add_buf, add_buf_size))
};
match context
.mutator
.fuzz(buff_slice, add_buff_slice, max_size.try_into().unwrap())
{
Some(buffer) => {
*out_buf = buffer.as_ptr();
buffer.len().try_into().unwrap()
}
None => {
// return the input buffer with 0-length to let AFL skip this mutation attempt
*out_buf = buf;
0
}
}
}) {
Ok(ret) => ret,
Err(err) => panic_handler("afl_custom_fuzz", err),
}
}
/// Internal function used in the macro
pub unsafe fn afl_custom_fuzz_count_<M: RawCustomMutator>(
data: *mut c_void,
buf: *const u8,
buf_size: usize,
) -> u32 {
match catch_unwind(|| {
let mut context = FFIContext::<M>::from(data);
if buf.is_null() {
panic!("null buf passed to afl_custom_fuzz")
}
let buf_slice = slice::from_raw_parts(buf, buf_size);
// see https://doc.rust-lang.org/nomicon/borrow-splitting.html
let ctx = &mut **context;
let mutator = &mut ctx.mutator;
mutator.fuzz_count(buf_slice)
}) {
Ok(ret) => ret,
Err(err) => panic_handler("afl_custom_fuzz_count", err),
}
}
/// Internal function used in the macro
pub fn afl_custom_queue_new_entry_<M: RawCustomMutator>(
data: *mut c_void,
filename_new_queue: *const c_char,
filename_orig_queue: *const c_char,
) {
match catch_unwind(|| {
let mut context = FFIContext::<M>::from(data);
if filename_new_queue.is_null() {
panic!("received null filename_new_queue in afl_custom_queue_new_entry");
}
let filename_new_queue = unsafe { CStr::from_ptr(filename_new_queue) };
let filename_orig_queue = if !filename_orig_queue.is_null() {
Some(unsafe { CStr::from_ptr(filename_orig_queue) })
} else {
None
};
context
.mutator
.queue_new_entry(filename_new_queue, filename_orig_queue);
}) {
Ok(ret) => ret,
Err(err) => panic_handler("afl_custom_queue_new_entry", err),
}
}
/// Internal function used in the macro
pub unsafe fn afl_custom_deinit_<M: RawCustomMutator>(data: *mut c_void) {
match catch_unwind(|| {
// drop the context
ManuallyDrop::into_inner(FFIContext::<M>::from(data));
}) {
Ok(ret) => ret,
Err(err) => panic_handler("afl_custom_deinit", err),
}
}
/// Internal function used in the macro
pub fn afl_custom_introspection_<M: RawCustomMutator>(data: *mut c_void) -> *const c_char {
match catch_unwind(|| {
let mut context = FFIContext::<M>::from(data);
if let Some(res) = context.mutator.introspection() {
res.as_ptr()
} else {
null()
}
}) {
Ok(ret) => ret,
Err(err) => panic_handler("afl_custom_introspection", err),
}
}
/// Internal function used in the macro
pub fn afl_custom_describe_<M: RawCustomMutator>(
data: *mut c_void,
max_description_len: usize,
) -> *const c_char {
match catch_unwind(|| {
let mut context = FFIContext::<M>::from(data);
if let Some(res) = context.mutator.describe(max_description_len) {
res.as_ptr()
} else {
null()
}
}) {
Ok(ret) => ret,
Err(err) => panic_handler("afl_custom_describe", err),
}
}
/// Internal function used in the macro
pub fn afl_custom_queue_get_<M: RawCustomMutator>(
data: *mut c_void,
filename: *const c_char,
) -> u8 {
match catch_unwind(|| {
let mut context = FFIContext::<M>::from(data);
assert!(!filename.is_null());
context
.mutator
.queue_get(unsafe { CStr::from_ptr(filename) }) as u8
}) {
Ok(ret) => ret,
Err(err) => panic_handler("afl_custom_queue_get", err),
}
}
}
/// exports the given Mutator as a custom mutator as the C interface that AFL++ expects.
/// It is not possible to call this macro multiple times, because it would define the custom mutator symbols multiple times.
/// # Example
/// ```
/// # #[macro_use] extern crate custom_mutator;
/// # #[cfg(feature = "afl_internals")]
/// # use custom_mutator::afl_state;
/// # use custom_mutator::CustomMutator;
/// struct MyMutator;
/// impl CustomMutator for MyMutator {
/// /// ...
/// # type Error = ();
/// # #[cfg(feature = "afl_internals")]
/// # fn init(_afl_state: &afl_state, _seed: u32) -> Result<Self,()> {unimplemented!()}
/// # #[cfg(not(feature = "afl_internals"))]
/// # fn init(_seed: u32) -> Result<Self, Self::Error> {unimplemented!()}
/// # fn fuzz<'b,'s:'b>(&'s mut self, _buffer: &'b mut [u8], _add_buff: Option<&[u8]>, _max_size: usize) -> Result<Option<&'b [u8]>, Self::Error> {unimplemented!()}
/// }
/// export_mutator!(MyMutator);
/// ```
#[macro_export]
macro_rules! export_mutator {
($mutator_type:ty) => {
#[cfg(feature = "afl_internals")]
#[no_mangle]
pub extern "C" fn afl_custom_init(
afl: ::std::option::Option<&'static $crate::afl_state>,
seed: ::std::os::raw::c_uint,
) -> *const ::std::os::raw::c_void {
$crate::wrappers::afl_custom_init_::<$mutator_type>(afl, seed as u32)
}
#[cfg(not(feature = "afl_internals"))]
#[no_mangle]
pub extern "C" fn afl_custom_init(
_afl: *const ::std::os::raw::c_void,
seed: ::std::os::raw::c_uint,
) -> *const ::std::os::raw::c_void {
$crate::wrappers::afl_custom_init_::<$mutator_type>(seed as u32)
}
#[no_mangle]
pub extern "C" fn afl_custom_fuzz_count(
data: *mut ::std::os::raw::c_void,
buf: *const u8,
buf_size: usize,
) -> u32 {
unsafe {
$crate::wrappers::afl_custom_fuzz_count_::<$mutator_type>(data, buf, buf_size)
}
}
#[no_mangle]
pub extern "C" fn afl_custom_fuzz(
data: *mut ::std::os::raw::c_void,
buf: *mut u8,
buf_size: usize,
out_buf: *mut *const u8,
add_buf: *mut u8,
add_buf_size: usize,
max_size: usize,
) -> usize {
unsafe {
$crate::wrappers::afl_custom_fuzz_::<$mutator_type>(
data,
buf,
buf_size,
out_buf,
add_buf,
add_buf_size,
max_size,
)
}
}
#[no_mangle]
pub extern "C" fn afl_custom_queue_new_entry(
data: *mut ::std::os::raw::c_void,
filename_new_queue: *const ::std::os::raw::c_char,
filename_orig_queue: *const ::std::os::raw::c_char,
) {
$crate::wrappers::afl_custom_queue_new_entry_::<$mutator_type>(
data,
filename_new_queue,
filename_orig_queue,
)
}
#[no_mangle]
pub extern "C" fn afl_custom_queue_get(
data: *mut ::std::os::raw::c_void,
filename: *const ::std::os::raw::c_char,
) -> u8 {
$crate::wrappers::afl_custom_queue_get_::<$mutator_type>(data, filename)
}
#[no_mangle]
pub extern "C" fn afl_custom_introspection(
data: *mut ::std::os::raw::c_void,
) -> *const ::std::os::raw::c_char {
$crate::wrappers::afl_custom_introspection_::<$mutator_type>(data)
}
#[no_mangle]
pub extern "C" fn afl_custom_describe(
data: *mut ::std::os::raw::c_void,
max_description_len: usize,
) -> *const ::std::os::raw::c_char {
$crate::wrappers::afl_custom_describe_::<$mutator_type>(data, max_description_len)
}
#[no_mangle]
pub extern "C" fn afl_custom_deinit(data: *mut ::std::os::raw::c_void) {
unsafe { $crate::wrappers::afl_custom_deinit_::<$mutator_type>(data) }
}
};
}
#[cfg(test)]
/// this sanity test is supposed to just find out whether an empty mutator being exported by the macro compiles
mod sanity_test {
#[cfg(feature = "afl_internals")]
use super::afl_state;
use super::{export_mutator, RawCustomMutator};
struct ExampleMutator;
impl RawCustomMutator for ExampleMutator {
#[cfg(feature = "afl_internals")]
fn init(_afl: &afl_state, _seed: u32) -> Self {
unimplemented!()
}
#[cfg(not(feature = "afl_internals"))]
fn init(_seed: u32) -> Self {
unimplemented!()
}
fn fuzz<'b, 's: 'b>(
&'s mut self,
_buffer: &'b mut [u8],
_add_buff: Option<&[u8]>,
_max_size: usize,
) -> Option<&'b [u8]> {
unimplemented!()
}
}
export_mutator!(ExampleMutator);
}
#[allow(unused_variables)]
/// A custom mutator.
/// [`CustomMutator::handle_error`] will be called in case any method returns an [`Result::Err`].
pub trait CustomMutator {
/// The error type. All methods must return the same error type.
type Error: Debug;
/// The method which handles errors.
/// By default, this method will log the error to stderr if the environment variable "`AFL_CUSTOM_MUTATOR_DEBUG`" is set and non-empty.
/// After logging the error, execution will continue on a best-effort basis.
///
/// This default behaviour can be customized by implementing this method.
fn handle_error(err: Self::Error) {
if std::env::var("AFL_CUSTOM_MUTATOR_DEBUG")
.map(|v| !v.is_empty())
.unwrap_or(false)
{
eprintln!("Error in custom mutator: {:?}", err)
}
}
#[cfg(feature = "afl_internals")]
fn init(afl: &'static afl_state, seed: u32) -> Result<Self, Self::Error>
where
Self: Sized;
#[cfg(not(feature = "afl_internals"))]
fn init(seed: u32) -> Result<Self, Self::Error>
where
Self: Sized;
fn fuzz_count(&mut self, buffer: &[u8]) -> Result<u32, Self::Error> {
Ok(1)
}
fn fuzz<'b, 's: 'b>(
&'s mut self,
buffer: &'b mut [u8],
add_buff: Option<&[u8]>,
max_size: usize,
) -> Result<Option<&'b [u8]>, Self::Error>;
fn queue_new_entry(
&mut self,
filename_new_queue: &CStr,
filename_orig_queue: Option<&CStr>,
) -> Result<(), Self::Error> {
Ok(())
}
fn queue_get(&mut self, filename: &CStr) -> Result<bool, Self::Error> {
Ok(true)
}
fn describe(&mut self, max_description: usize) -> Result<Option<&CStr>, Self::Error> {
Ok(None)
}
fn introspection(&mut self) -> Result<Option<&CStr>, Self::Error> {
Ok(None)
}
}
impl<M> RawCustomMutator for M
where
M: CustomMutator,
M::Error: Debug,
{
#[cfg(feature = "afl_internals")]
fn init(afl: &'static afl_state, seed: u32) -> Self
where
Self: Sized,
{
match Self::init(afl, seed) {
Ok(r) => r,
Err(e) => {
Self::handle_error(e);
panic!("Error in afl_custom_init")
}
}
}
#[cfg(not(feature = "afl_internals"))]
fn init(seed: u32) -> Self
where
Self: Sized,
{
match Self::init(seed) {
Ok(r) => r,
Err(e) => {
Self::handle_error(e);
panic!("Error in afl_custom_init")
}
}
}
fn fuzz_count(&mut self, buffer: &[u8]) -> u32 {
match self.fuzz_count(buffer) {
Ok(r) => r,
Err(e) => {
Self::handle_error(e);
0
}
}
}
fn fuzz<'b, 's: 'b>(
&'s mut self,
buffer: &'b mut [u8],
add_buff: Option<&[u8]>,
max_size: usize,
) -> Option<&'b [u8]> {
match self.fuzz(buffer, add_buff, max_size) {
Ok(r) => r,
Err(e) => {
Self::handle_error(e);
None
}
}
}
fn queue_new_entry(&mut self, filename_new_queue: &CStr, filename_orig_queue: Option<&CStr>) {
match self.queue_new_entry(filename_new_queue, filename_orig_queue) {
Ok(r) => r,
Err(e) => {
Self::handle_error(e);
}
}
}
fn queue_get(&mut self, filename: &CStr) -> bool {
match self.queue_get(filename) {
Ok(r) => r,
Err(e) => {
Self::handle_error(e);
false
}
}
}
fn describe(&mut self, max_description: usize) -> Option<&CStr> {
match self.describe(max_description) {
Ok(r) => r,
Err(e) => {
Self::handle_error(e);
None
}
}
}
fn introspection(&mut self) -> Option<&CStr> {
match self.introspection() {
Ok(r) => r,
Err(e) => {
Self::handle_error(e);
None
}
}
}
}

View File

@ -0,0 +1,15 @@
[package]
name = "example_mutator"
version = "0.1.0"
authors = ["Julius Hohnerlein <julihoh@users.noreply.github.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
custom_mutator = { path = "../custom_mutator" }
[[example]]
name = "example_mutator"
path = "./src/example_mutator.rs"
crate-type = ["cdylib"]

View File

@ -0,0 +1,49 @@
#![allow(unused_variables)]
use custom_mutator::{export_mutator, CustomMutator};
struct ExampleMutator;
impl CustomMutator for ExampleMutator {
type Error = ();
fn init(seed: u32) -> Result<Self, Self::Error> {
Ok(Self)
}
fn fuzz<'b, 's: 'b>(
&'s mut self,
buffer: &'b mut [u8],
add_buff: Option<&[u8]>,
max_size: usize,
) -> Result<Option<&'b [u8]>, Self::Error> {
buffer.reverse();
Ok(Some(buffer))
}
}
struct OwnBufferExampleMutator {
own_buffer: Vec<u8>,
}
impl CustomMutator for OwnBufferExampleMutator {
type Error = ();
fn init(seed: u32) -> Result<Self, Self::Error> {
Ok(Self {
own_buffer: Vec::new(),
})
}
fn fuzz<'b, 's: 'b>(
&'s mut self,
buffer: &'b mut [u8],
add_buff: Option<&[u8]>,
max_size: usize,
) -> Result<Option<&'b [u8]>, ()> {
self.own_buffer.reverse();
Ok(Some(self.own_buffer.as_slice()))
}
}
export_mutator!(ExampleMutator);

View File

@ -0,0 +1,16 @@
[package]
name = "example_lain"
version = "0.1.0"
authors = ["Julius Hohnerlein <julihoh@users.noreply.github.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
custom_mutator = { path = "../custom_mutator" }
lain="0.5"
[[example]]
name = "example_lain"
path = "./src/lain_mutator.rs"
crate-type = ["cdylib"]

View File

@ -0,0 +1 @@
nightly

View File

@ -0,0 +1,59 @@
use custom_mutator::{export_mutator, CustomMutator};
use lain::{
mutator::Mutator,
prelude::*,
rand::{rngs::StdRng, SeedableRng},
};
#[derive(Debug, Mutatable, NewFuzzed, BinarySerialize)]
struct MyStruct {
field_1: u8,
#[lain(bits = 3)]
field_2: u8,
#[lain(bits = 5)]
field_3: u8,
#[lain(min = 5, max = 10000)]
field_4: u32,
#[lain(ignore)]
ignored_field: u64,
}
struct LainMutator {
mutator: Mutator<StdRng>,
buffer: Vec<u8>,
}
impl CustomMutator for LainMutator {
type Error = ();
fn init(seed: u32) -> Result<Self, ()> {
Ok(Self {
mutator: Mutator::new(StdRng::seed_from_u64(seed as u64)),
buffer: Vec::new(),
})
}
fn fuzz<'b, 's: 'b>(
&'s mut self,
_buffer: &'b mut [u8],
_add_buff: Option<&[u8]>,
max_size: usize,
) -> Result<Option<&'b [u8]>, ()> {
// we just sample an instance of MyStruct, ignoring the current input
let instance = MyStruct::new_fuzzed(&mut self.mutator, None);
let size = instance.serialized_size();
if size > max_size {
return Err(());
}
self.buffer.clear();
self.buffer.reserve(size);
instance.binary_serialize::<_, BigEndian>(&mut self.buffer);
Ok(Some(self.buffer.as_slice()))
}
}
export_mutator!(LainMutator);

View File

@ -9,60 +9,74 @@ Want to stay in the loop on major new features? Join our mailing list by
sending a mail to <afl-users+subscribe@googlegroups.com>.
### Version ++3.01a (dev)
### Version ++3.10c (release)
- Mac OS ARM64 support
- Android support fixed and updated by Joey Jiaojg - thanks!
- New selective instrumentation option with __AFL_COVERAGE_* commands
to be placed in the source code.
Check out instrumentation/README.instrument_list.md
- afl-fuzz
- Making AFL_MAP_SIZE obsolete - afl-fuzz now learns on start the
target map size
- Making AFL_MAP_SIZE (mostly) obsolete - afl-fuzz now learns on
start the target map size
- upgraded cmplog/redqueen: solving for floating point, solving
transformations (e.g. toupper, tolower, to/from hex, xor,
arithmetics, etc.). this is costly hence new command line option
-l that sets the intensity (values 1 to 3). recommended is 1 or 2.
- added `AFL_CMPLOG_ONLY_NEW` to not use cmplog on initial testcases from
`-i` or resumes (as these have most likely already been done)
arithmetics, etc.). This is costly hence new command line option
`-l` that sets the intensity (values 1 to 3). Recommended is 2.
- added `AFL_CMPLOG_ONLY_NEW` to not use cmplog on initial seeds
from `-i` or resumes (these have most likely already been done)
- fix crash for very, very fast targets+systems (thanks to mhlakhani
for reporting)
- on restarts (-i)/autoresume (AFL_AUTORESUME) the stats are now
reloaded and used, thanks to Vimal Joseph for this PR!
- if determinstic mode is active (-D, or -M without -d) then we sync
after every queue entry as this can take very long time otherwise
- on restarts (`-i`)/autoresume (AFL_AUTORESUME) the stats are now
reloaded and used, thanks to Vimal Joseph for this patch!
- changed the meaning of '+' of the '-t' option, it now means to
auto-calculate the timeout with the value given being the max
timeout. The original meaning of skipping timeouts instead of
abort is now inherent to the -t option.
- if deterministic mode is active (`-D`, or `-M` without `-d`) then
we sync after every queue entry as this can take very long time
otherwise
- added minimum SYNC_TIME to include/config.h (30 minutes default)
- better detection if a target needs a large shared map
- fix for -Z
- fix for `-Z`
- fixed a few crashes
- switched to an even faster RNG
- added hghwng's patch for faster trace map analysis
- printing suggestions for mistyped `AFL_` env variables
- added Rust bindings for custom mutators (thanks @julihoh)
- afl-cc
- allow instrumenting LLVMFuzzerTestOneInput
- fixed endless loop for allow/blocklist lines starting with a
comment (thanks to Zherya for reporting)
- cmplog/redqueen now also tracks floating point, _ExtInt() + 128bit
- cmplog/redqueen can now process basic libc++ and libstdc++
std::string comparisons (though no position or length type variants)
- added support for __afl_coverage_interesting() for LTO and
and our own PCGUARD (llvm 10.0.1+), read more about this function
and selective coverage in instrumentation/README.instrument_list.md
std::string comparisons (no position or length type variants)
- added support for __afl_coverage_interesting() for LTO and our
own PCGUARD (llvm 10.0.1+), read more about this function and
selective coverage in instrumentation/README.instrument_list.md
- added AFL_LLVM_INSTRUMENT option NATIVE for native clang pc-guard
support (less performant than our own), GCC for old afl-gcc and
CLANG for old afl-clang
- fixed a potential crash in the LAF feature
- workaround for llvm bitcast lto bug
- workaround for llvm 13
- qemuafl
- ported QASan to qemuafl! see qemu_mode/libqasan/README.md
- QASan (address sanitizer for Qemu) ported to qemuafl!
See qemu_mode/libqasan/README.md
- solved some persistent mode bugs (thanks Dil4rd)
- solved an issue when dumping the memory maps (thanks wizche)
- Android support for QASan
- unicornafl
- Substential speed gains in python bindings for certain use cases
- Substantial speed gains in python bindings for certain use cases
- Improved rust bindings
- Added a new example harness to compare python, c, and rust bindings
- Added a new example harness to compare python, c and rust bindings
- afl-cmin and afl-showmap now support the -f option
- afl_plot now also generates a graph on the discovered edges
- changed default: no memory limit for afl-cmin and afl-cmin.bash
- warn on any _AFL and __AFL env vars
- LLVM mode is now compiled with -j4, unicorn with all cores. qemu was
already building with all cores, the gcc plugin needs only one.
- warn on any _AFL and __AFL env vars.
- set AFL_IGNORE_UNKNOWN_ENVS to not warn on unknown AFL_... env vars
- added dummy Makefile to instrumentation/
- Updated utils/afl_frida to be 5% faster, 7% on x86_x64
- Added AFL_KILL_SIGNAL env variable (thanks @v-p-b)
- Added `AFL_KILL_SIGNAL` env variable (thanks @v-p-b)
- @Edznux added a nice documentation on how to use rpc.statsd with
afl++ in docs/rpc_statsd.md, thanks!

View File

@ -4,6 +4,11 @@ This file describes how you can implement custom mutations to be used in AFL.
For now, we support C/C++ library and Python module, collectivelly named as the
custom mutator.
There is also experimental support for Rust in `custom_mutators/rust`.
Please refer to that directory for documentation.
Run ```cargo doc -p custom_mutator --open``` in that directory to view the
documentation in your web browser.
Implemented by
- C/C++ library (`*.so`): Khaled Yakdan from Code Intelligence (<yakdan@code-intelligence.de>)
- Python module: Christian Holler from Mozilla (<choller@mozilla.com>)

122
docs/docs.md Normal file
View File

@ -0,0 +1,122 @@
# Restructure afl++'s documentation
## About us
We are dedicated to everything around fuzzing, our main and most well known
contribution is the fuzzer `afl++` which is part of all major Unix
distributions (e.g. Debian, Arch, FreeBSD, etc.) and is deployed on Google's
oss-fuzz and clusterfuzz. It is rated the top fuzzer on Google's fuzzbench.
We are four individuals from Europe supported by a large community.
All our tools are open source.
## About the afl++ fuzzer project
afl++ inherited it's documentation from the original Google afl project.
Since then it has been massively improved - feature and performance wise -
and although the documenation has likewise been continued it has grown out
of proportion.
The documentation is done by non-natives to the English language, plus
none of us has a writer background.
We see questions on afl++ usage on mailing lists (e.g. afl-users), discord
channels, web forums and as issues in our repository.
This only increases as afl++ has been on the top of Google's fuzzbench
statistics (which measures the performance of fuzzers) and is now being
integrated in Google's oss-fuzz and clusterfuzz - and is in many Unix
packaging repositories, e.g. Debian, FreeBSD, etc.
afl++ now has 44 (!) documentation files with 13k total lines of content.
This is way too much.
Hence afl++ needs a complete overhaul of it's documentation, both on a
organisation/structural level as well as the content.
Overall the following actions have to be performed:
* Create a better structure of documentation so it is easier to find the
information that is being looked for, combining and/or splitting up the
existing documents as needed.
* Rewrite some documentation to remove duplication. Several information is
present several times in the documentation. These should be removed to
where needed so that we have as little bloat as possible.
* The documents have been written and modified by a lot of different people,
most of them non-native English speaker. Hence an overall review where
parts should be rewritten has to be performed and then the rewrite done.
* Create a cheat-sheet for a very short best-setup build and run of afl++
* Pictures explain more than 1000 words. We need at least 4 images that
explain the workflow with afl++:
- the build workflow
- the fuzzing workflow
- the fuzzing campaign management workflow
- the overall workflow that is an overview of the above
- maybe more? where the technical writes seems it necessary for
understanding.
Requirements:
* Documentation has to be in Markdown format
* Images have to be either in SVG or PNG format.
* All documentation should be (moved) in(to) docs/
The project does not require writing new documentation or tutorials beside the
cheat sheet. The technical information for the cheat sheet will be provided by
us.
## Metrics
afl++ is a the highest performant fuzzer publicly available - but is also the
most feature rich and complex. With the publicity of afl++' success and
deployment in Google projects internally and externally and availability as
a package on most Linux distributions we see more and more issues being
created and help requests on our Discord channel that would not be
necessary if people would have read through all our documentation - which
is unrealistic.
We expect the the new documenation after this project to be cleaner, easier
accessible and lighter to digest by our users, resulting in much less
help requests. On the other hand the amount of users using afl++ should
increase as well as it will be more accessible which would also increase
questions again - but overall resulting in a reduction of help requests.
In numbers: we currently have per week on average 5 issues on Github,
10 questions on discord and 1 on mailing lists that would not be necessary
with perfect documentation and perfect people.
We would consider this project a success if afterwards we only have
2 issues on Github and 3 questions on discord anymore that would be answered
by reading the documentation. The mailing list is usually used by the most
novice users and we don't expect any less questions there.
## Project Budget
We have zero experience with technical writers, so this is very hard for us
to calculate. We expect it to be a lot of work though because of the amount
of documentation we have that needs to be restructured and partially rewritten
(44 documents with 13k total lines of content).
We assume the daily rate of a very good and experienced technical writer in
times of a pandemic to be ~500$ (according to web research), and calculate
the overall amout of work to be around 20 days for everything incl. the
graphics (but again - this is basically just guessing).
Technical Writer 10000$
Volunteer stipends 0$ (waved)
T-Shirts for the top 10 contributors and helpers to this documentation project:
10 afl++ logo t-shirts 20$ each 200$
10 shipping cost of t-shirts 10$ each 100$
Total: 10.300$
(in the submission form 10.280$ was entered)
## Additional Information
We have participated in Google Summer of Code in 2020 and hope to be selected
again in 2021.
We have no experience with a technical writer, but we will support that person
with video calls, chats, emails and messaging, provide all necessary information
and write technical contents that is required for the success of this project.
It is clear to us that a technical writer knows how to write, but cannot know
the technical details in a complex tooling like in afl++. This guidance, input,
etc. has to come from us.

View File

@ -5,6 +5,10 @@
users or for some types of custom fuzzing setups. See [README.md](README.md) for the general
instruction manual.
Note that most tools will warn on any unknown AFL environment variables.
This is for warning on typos that can happen. If you want to disable this
check then set the `AFL_IGNORE_UNKNOWN_ENVS` environment variable.
## 1) Settings for all compilers
Starting with afl++ 3.0 there is only one compiler: afl-cc
@ -18,7 +22,6 @@ To select the different instrumentation modes this can be done by
`MODE` can be one of `LTO` (afl-clang-lto*), `LLVM` (afl-clang-fast*), `GCC_PLUGIN`
(afl-g*-fast) or `GCC` (afl-gcc/afl-g++).
Because (with the exception of the --afl-MODE command line option) the
compile-time tools do not accept afl specific command-line options, they
make fairly broad use of environmental variables instead:
@ -448,6 +451,7 @@ checks or alter some of the more exotic semantics of the tool:
`banner` corresponds to the name of the fuzzer provided through `-M/-S`.
`afl_version` corresponds to the currently running afl version (e.g `++3.0c`).
Default (empty/non present) will add no tags to the metrics.
See [rpc_statsd.md](rpc_statsd.md) for more information.
- Setting `AFL_CRASH_EXITCODE` sets the exit code afl treats as crash.
For example, if `AFL_CRASH_EXITCODE='-1'` is set, each input resulting
@ -514,6 +518,12 @@ The QEMU wrapper used to instrument binary-only code supports several settings:
stack pointer in which QEMU can find the return address when `start addr` is
hit.
- With `AFL_USE_QASAN` you can enable QEMU AddressSanitizer for dynamically
linked binaries.
- With `AFL_QEMU_FORCE_DFL` you force QEMU to ignore the registered signal
handlers of the target.
## 6) Settings for afl-cmin
The corpus minimization script offers very little customization:

View File

@ -3,6 +3,40 @@
In the following, we describe a variety of ideas that could be implemented
for future AFL++ versions.
# GSoC 2021
All GSoC 2021 projects will be in the Rust development language!
## UI for libaflrs
Write a user interface to libaflrs, the upcoming backend of afl++.
This might look like the afl-fuzz UI, but you can improve on it - and should!
## Schedulers for libaflrs
Schedulers is a mechanism that selects items from the fuzzing corpus based
on strategy and randomness. One scheduler might focus on long paths,
another on rarity of edges disocvered, still another on a combination on
things. Some of the schedulers in afl++ have to be ported, but you are free
to come up with your own if you want to - and see how it performs.
## Forkserver support for libaflrs
The current libaflrs implementation fuzzes in-memory, however obviously we
want to support afl instrumented binaries as well.
Hence a forkserver support needs to be implemented - forking off the target
and talking to the target via a socketpair and the communication protocol
within.
## More Observers for libaflrs
An observer is measuring functionality that looks at the target being fuzzed
and documents something about it. In traditional fuzzing this is the coverage
in the target, however we want to add various more observers, e.g. stack depth,
heap usage, etc. - this is a topic for an experienced Rust developer.
# Generic ideas and wishlist
## Analysis software
Currently analysis is done by using afl-plot, which is rather outdated.

View File

@ -1,6 +1,6 @@
# Remote monitoring with StatsD
StatsD allows you to receive and aggregate metrics from a wide range of application and retransmit them to the backend of your choice.
StatsD allows you to receive and aggregate metrics from a wide range of applications and retransmit them to the backend of your choice.
This enables you to create nice and readable dashboards containing all the information you need on your fuzzer instances.
No need to write your own statistics parsing system, deploy and maintain it to all your instances, sync with your graph rendering system...
@ -45,7 +45,7 @@ For more information on these env vars, check out `docs/env_variables.md`.
The simplest way of using this feature is to use any metric provider and change the host/port of your StatsD daemon,
with `AFL_STATSD_HOST` and `AFL_STATSD_PORT`, if required (defaults are `localhost` and port `8125`).
To get started, here are some instruction with free and open source tools.
To get started, here are some instructions with free and open source tools.
The following setup is based on Prometheus, statsd_exporter and Grafana.
Grafana here is not mandatory, but gives you some nice graphs and features.
@ -131,7 +131,7 @@ mappings:
Run `docker-compose up -d`.
Everything should be now setup, you are now able to run your fuzzers with
Everything should now be setup, you are now able to run your fuzzers with
```
AFL_STATSD_TAGS_FLAVOR=dogstatsd AFL_STATSD=1 afl-fuzz -M test-fuzzer-1 -i i -o o ./bin/my-application @@
@ -139,5 +139,5 @@ AFL_STATSD_TAGS_FLAVOR=dogstatsd AFL_STATSD=1 afl-fuzz -S test-fuzzer-2 -i i -o
...
```
This setup may be modified before use in production environment. Depending on your needs: addind passwords, creating volumes for storage,
This setup may be modified before use in a production environment. Depending on your needs: adding passwords, creating volumes for storage,
tweaking the metrics gathering to get host metrics (CPU, RAM ...).

View File

@ -570,6 +570,7 @@ typedef struct afl_state {
blocks_eff_total, /* Blocks subject to effector maps */
blocks_eff_select, /* Blocks selected as fuzzable */
start_time, /* Unix start time (ms) */
last_sync_time, /* Time of last sync */
last_path_time, /* Time for most recent path (ms) */
last_crash_time, /* Time for most recent crash (ms) */
last_hang_time; /* Time for most recent hang (ms) */
@ -649,6 +650,7 @@ typedef struct afl_state {
u32 cmplog_max_filesize;
u32 cmplog_lvl;
u32 colorize_success;
u8 cmplog_enable_arith, cmplog_enable_transform;
struct afl_pass_stat *pass_stats;
struct cmp_map * orig_cmp_map;
@ -1070,8 +1072,8 @@ void destroy_extras(afl_state_t *);
void load_stats_file(afl_state_t *);
void write_setup_file(afl_state_t *, u32, char **);
void write_stats_file(afl_state_t *, double, double, double);
void maybe_update_plot_file(afl_state_t *, double, double);
void write_stats_file(afl_state_t *, u32, double, double, double);
void maybe_update_plot_file(afl_state_t *, u32, double, double);
void show_stats(afl_state_t *);
void show_init_stats(afl_state_t *);

View File

@ -39,6 +39,7 @@
#define STRINGIFY_VAL_SIZE_MAX (16)
void detect_file_args(char **argv, u8 *prog_in, bool *use_stdin);
void print_suggested_envs(char *mispelled_env);
void check_environment_vars(char **env);
char **argv_cpy_dup(int argc, char **argv);

View File

@ -10,7 +10,7 @@
Dominik Maier <mail@dmnk.co>
Copyright 2016, 2017 Google Inc. All rights reserved.
Copyright 2019-2020 AFLplusplus Project. All rights reserved.
Copyright 2019-2021 AFLplusplus Project. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -25,8 +25,8 @@
/* Version string: */
// c = release, d = volatile github dev, e = experimental branch
#define VERSION "++3.01a"
// c = release, a = volatile github dev, e = experimental branch
#define VERSION "++3.10c"
/******************************************************
* *
@ -36,27 +36,28 @@
/* CMPLOG/REDQUEEN TUNING
*
* Here you can tuning and solving options for cmplog.
* Here you can modify tuning and solving options for CMPLOG.
* Note that these are run-time options for afl-fuzz, no target
* recompilation required.
*
*/
/* Enable transform following (XOR/ADD/SUB manipulations, hex en/decoding) */
// #define CMPLOG_TRANSFORM
/* if TRANSFORM is enabled with '-l T', this additionally enables base64
encoding/decoding */
// #define CMPLOG_SOLVE_TRANSFORM_BASE64
/* if TRANSFORM is enabled, this additionally enables base64 en/decoding */
// #define CMPLOG_TRANSFORM_BASE64
/* If a redqueen pass finds more than one solution, try to combine them? */
#define CMPLOG_COMBINE
/* Minimum % of the corpus to perform cmplog on. Default: 20% */
#define CMPLOG_CORPUS_PERCENT 20U
/* Minimum % of the corpus to perform cmplog on. Default: 10% */
#define CMPLOG_CORPUS_PERCENT 5U
/* Number of potential posititions from which we decide the cmplog becomes
useless, default 16384 */
#define CMPLOG_POSITIONS_MAX 16384U
/* Number of potential positions from which we decide if cmplog becomes
useless, default 8096 */
#define CMPLOG_POSITIONS_MAX 8096U
/* Maximum allowed fails per CMP value. Default: 32 * 3 */
#define CMPLOG_FAIL_MAX 96
/* Maximum allowed fails per CMP value. Default: 128 */
#define CMPLOG_FAIL_MAX 128
/* Now non-cmplog configuration options */
@ -279,6 +280,11 @@
#define SYNC_INTERVAL 8
/* Sync time (minimum time between syncing in ms, time is halfed for -M main
nodes) - default is 30 minutes: */
#define SYNC_TIME (30 * 60 * 1000)
/* Output directory reuse grace period (minutes): */
#define OUTPUT_GRACE 25

View File

@ -61,6 +61,7 @@ static char *afl_environment_variables[] = {
"AFL_FORKSRV_INIT_TMOUT",
"AFL_HARDEN",
"AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES",
"AFL_IGNORE_UNKNOWN_ENVS",
"AFL_IMPORT_FIRST",
"AFL_INST_LIBS",
"AFL_INST_RATIO",
@ -130,6 +131,7 @@ static char *afl_environment_variables[] = {
"AFL_QEMU_DEBUG_MAPS",
"AFL_QEMU_DISABLE_CACHE",
"AFL_QEMU_DRIVER_NO_HOOK",
"AFL_QEMU_FORCE_DFL",
"AFL_QEMU_PERSISTENT_ADDR",
"AFL_QEMU_PERSISTENT_CNT",
"AFL_QEMU_PERSISTENT_GPR",

View File

@ -47,10 +47,10 @@ A special function is `__afl_coverage_interesting`.
To use this, you must define `void __afl_coverage_interesting(u8 val, u32 id);`.
Then you can use this function globally, where the `val` parameter can be set
by you, the `id` parameter is for afl-fuzz and will be overwritten.
Note that useful parameters are for `val` are: 1, 2, 3, 4, 8, 16, 32, 64, 128.
Note that useful parameters for `val` are: 1, 2, 3, 4, 8, 16, 32, 64, 128.
A value of e.g. 33 will be seen as 32 for coverage purposes.
## 3) Selective instrumenation with AFL_LLVM_ALLOWLIST/AFL_LLVM_DENYLIST
## 3) Selective instrumentation with AFL_LLVM_ALLOWLIST/AFL_LLVM_DENYLIST
This feature is equivalent to llvm 12 sancov feature and allows to specify
on a filename and/or function name level to instrument these or skip them.

View File

@ -88,16 +88,35 @@ apt-get install -y clang-12 clang-tools-12 libc++1-12 libc++-12-dev \
### Building llvm yourself (version 12)
Building llvm from github takes quite some long time and is not painless:
```
```sh
sudo apt install binutils-dev # this is *essential*!
git clone https://github.com/llvm/llvm-project
git clone --depth=1 https://github.com/llvm/llvm-project
cd llvm-project
mkdir build
cd build
cmake -DLLVM_ENABLE_PROJECTS='clang;clang-tools-extra;compiler-rt;libclc;libcxx;libcxxabi;libunwind;lld' -DCMAKE_BUILD_TYPE=Release -DLLVM_BINUTILS_INCDIR=/usr/include/ ../llvm/
make -j $(nproc)
export PATH=`pwd`/bin:$PATH
export LLVM_CONFIG=`pwd`/bin/llvm-config
# Add -G Ninja if ninja-build installed
# "Building with ninja significantly improves your build time, especially with
# incremental builds, and improves your memory usage."
cmake \
-DCLANG_INCLUDE_DOCS="OFF" \
-DCMAKE_BUILD_TYPE=Release \
-DLLVM_BINUTILS_INCDIR=/usr/include/ \
-DLLVM_BUILD_LLVM_DYLIB="ON" \
-DLLVM_ENABLE_BINDINGS="OFF" \
-DLLVM_ENABLE_PROJECTS='clang;compiler-rt;libcxx;libcxxabi;libunwind;lld' \
-DLLVM_ENABLE_WARNINGS="OFF" \
-DLLVM_INCLUDE_BENCHMARKS="OFF" \
-DLLVM_INCLUDE_DOCS="OFF" \
-DLLVM_INCLUDE_EXAMPLES="OFF" \
-DLLVM_INCLUDE_TESTS="OFF" \
-DLLVM_LINK_LLVM_DYLIB="ON" \
-DLLVM_TARGETS_TO_BUILD="host" \
../llvm/
cmake --build . --parallel
export PATH="$(pwd)/bin:$PATH"
export LLVM_CONFIG="$(pwd)/bin/llvm-config"
export LD_LIBRARY_PATH="$(llvm-config --libdir)${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
cd /path/to/AFLplusplus/
make
sudo make install

View File

@ -1088,7 +1088,7 @@ void ModuleSanitizerCoverage::InjectTraceForSwitch(
}
llvm::sort(Initializers.begin() + 2, Initializers.end(),
llvm::sort(drop_begin(Initializers, 2),
[](const Constant *A, const Constant *B) {
return cast<ConstantInt>(A)->getLimitedValue() <
@ -1136,10 +1136,10 @@ void ModuleSanitizerCoverage::InjectTraceForGep(
for (auto GEP : GepTraceTargets) {
IRBuilder<> IRB(GEP);
for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I)
if (!isa<ConstantInt>(*I) && (*I)->getType()->isIntegerTy())
for (Use &Idx : GEP->indices())
if (!isa<ConstantInt>(Idx) && Idx->getType()->isIntegerTy())
IRB.CreateCall(SanCovTraceGepFunction,
{IRB.CreateIntCast(*I, IntptrTy, true)});
{IRB.CreateIntCast(Idx, IntptrTy, true)});
}

View File

@ -244,8 +244,12 @@ static void __afl_map_shm(void) {
if (__afl_final_loc) {
if (__afl_final_loc % 32)
__afl_final_loc = (((__afl_final_loc + 31) >> 5) << 5);
if (__afl_final_loc % 64) {
__afl_final_loc = (((__afl_final_loc + 63) >> 6) << 6);
}
__afl_map_size = __afl_final_loc;
if (__afl_final_loc > MAP_SIZE) {
@ -1090,7 +1094,7 @@ __attribute__((constructor(0))) void __afl_auto_first(void) {
if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
u8 *ptr;
ptr = (u8 *)malloc(2097152);
ptr = (u8 *)malloc(MAP_INITIAL_SIZE);
if (ptr && (ssize_t)ptr != -1) {
@ -1171,7 +1175,7 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) {
fprintf(stderr,
"Running __sanitizer_cov_trace_pc_guard_init: %p-%p (%lu edges)\n",
start, stop, stop - start);
start, stop, (unsigned long)(stop - start));
}
@ -1653,12 +1657,19 @@ static u8 *get_llvm_stdstring(u8 *string) {
void __cmplog_rtn_gcc_stdstring_cstring(u8 *stdstring, u8 *cstring) {
if (unlikely(!__afl_cmp_map)) return;
if (!area_is_mapped(stdstring, 32) || !area_is_mapped(cstring, 32)) return;
__cmplog_rtn_hook(get_gcc_stdstring(stdstring), cstring);
}
void __cmplog_rtn_gcc_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
if (unlikely(!__afl_cmp_map)) return;
if (!area_is_mapped(stdstring1, 32) || !area_is_mapped(stdstring2, 32))
return;
__cmplog_rtn_hook(get_gcc_stdstring(stdstring1),
get_gcc_stdstring(stdstring2));
@ -1666,12 +1677,19 @@ void __cmplog_rtn_gcc_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
void __cmplog_rtn_llvm_stdstring_cstring(u8 *stdstring, u8 *cstring) {
if (unlikely(!__afl_cmp_map)) return;
if (!area_is_mapped(stdstring, 32) || !area_is_mapped(cstring, 32)) return;
__cmplog_rtn_hook(get_llvm_stdstring(stdstring), cstring);
}
void __cmplog_rtn_llvm_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
if (unlikely(!__afl_cmp_map)) return;
if (!area_is_mapped(stdstring1, 32) || !area_is_mapped(stdstring2, 32))
return;
__cmplog_rtn_hook(get_llvm_stdstring(stdstring1),
get_llvm_stdstring(stdstring2));

View File

@ -90,7 +90,7 @@ void dict2file(int fd, u8 *mem, u32 len) {
j = 1;
for (i = 0; i < len; i++) {
if (isprint(mem[i])) {
if (isprint(mem[i]) && mem[i] != '\\' && mem[i] != '"') {
line[j++] = mem[i];

View File

@ -924,9 +924,7 @@ bool AFLLTOPass::runOnModule(Module &M) {
if (getenv("AFL_LLVM_LTO_DONTWRITEID") == NULL) {
uint32_t write_loc = afl_global_id;
if (afl_global_id % 32) write_loc = (((afl_global_id + 32) >> 4) << 4);
uint32_t write_loc = (((afl_global_id + 63) >> 6) << 6);
GlobalVariable *AFLFinalLoc = new GlobalVariable(
M, Int32Ty, true, GlobalValue::ExternalLinkage, 0, "__afl_final_loc");

View File

@ -19,12 +19,13 @@
#include <stdlib.h>
#include <unistd.h>
#include <iostream>
#include <list>
#include <string>
#include <fstream>
#include <sys/time.h>
#include "llvm/Config/llvm-config.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LegacyPassManager.h"
@ -265,13 +266,20 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
unsigned int max_size = Val->getType()->getIntegerBitWidth(), cast_size;
unsigned char do_cast = 0;
if (!SI->getNumCases() || max_size < 16 || max_size % 8) {
if (!SI->getNumCases() || max_size < 16) {
// if (!be_quiet) errs() << "skip trivial switch..\n";
continue;
}
if (max_size % 8) {
max_size = (((max_size / 8) + 1) * 8);
do_cast = 1;
}
IRBuilder<> IRB(SI->getParent());
IRB.SetInsertPoint(SI);
@ -310,36 +318,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
if (do_cast) {
ConstantInt *cint = dyn_cast<ConstantInt>(Val);
if (cint) {
uint64_t val = cint->getZExtValue();
// fprintf(stderr, "ConstantInt: %lu\n", val);
switch (cast_size) {
case 8:
CompareTo = ConstantInt::get(Int8Ty, val);
break;
case 16:
CompareTo = ConstantInt::get(Int16Ty, val);
break;
case 32:
CompareTo = ConstantInt::get(Int32Ty, val);
break;
case 64:
CompareTo = ConstantInt::get(Int64Ty, val);
break;
case 128:
CompareTo = ConstantInt::get(Int128Ty, val);
break;
}
} else {
CompareTo = IRB.CreateBitCast(Val, IntegerType::get(C, cast_size));
}
CompareTo =
IRB.CreateIntCast(CompareTo, IntegerType::get(C, cast_size), false);
}
@ -361,27 +341,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
if (do_cast) {
uint64_t val = cint->getZExtValue();
// fprintf(stderr, "ConstantInt: %lu\n", val);
switch (cast_size) {
case 8:
new_param = ConstantInt::get(Int8Ty, val);
break;
case 16:
new_param = ConstantInt::get(Int16Ty, val);
break;
case 32:
new_param = ConstantInt::get(Int32Ty, val);
break;
case 64:
new_param = ConstantInt::get(Int64Ty, val);
break;
case 128:
new_param = ConstantInt::get(Int128Ty, val);
break;
}
new_param =
IRB.CreateIntCast(cint, IntegerType::get(C, cast_size), false);
}
@ -540,7 +501,14 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
}
if (!max_size || max_size % 8 || max_size < 16) { continue; }
if (!max_size || max_size < 16) { continue; }
if (max_size % 8) {
max_size = (((max_size / 8) + 1) * 8);
do_cast = 1;
}
if (max_size > 128) {
@ -573,88 +541,27 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
}
if (do_cast) {
// errs() << "[CMPLOG] cmp " << *cmpInst << "(in function " <<
// cmpInst->getFunction()->getName() << ")\n";
// F*cking LLVM optimized out any kind of bitcasts of ConstantInt values
// creating illegal calls. WTF. So we have to work around this.
// first bitcast to integer type of the same bitsize as the original
// type (this is a nop, if already integer)
Value *op0_i = IRB.CreateBitCast(
op0, IntegerType::get(C, op0->getType()->getPrimitiveSizeInBits()));
// then create a int cast, which does zext, trunc or bitcast. In our case
// usually zext to the next larger supported type (this is a nop if
// already the right type)
Value *V0 =
IRB.CreateIntCast(op0_i, IntegerType::get(C, cast_size), false);
args.push_back(V0);
Value *op1_i = IRB.CreateBitCast(
op1, IntegerType::get(C, op1->getType()->getPrimitiveSizeInBits()));
Value *V1 =
IRB.CreateIntCast(op1_i, IntegerType::get(C, cast_size), false);
args.push_back(V1);
ConstantInt *cint = dyn_cast<ConstantInt>(op0);
if (cint) {
uint64_t val = cint->getZExtValue();
// fprintf(stderr, "ConstantInt: %lu\n", val);
ConstantInt *new_param = NULL;
switch (cast_size) {
case 8:
new_param = ConstantInt::get(Int8Ty, val);
break;
case 16:
new_param = ConstantInt::get(Int16Ty, val);
break;
case 32:
new_param = ConstantInt::get(Int32Ty, val);
break;
case 64:
new_param = ConstantInt::get(Int64Ty, val);
break;
case 128:
new_param = ConstantInt::get(Int128Ty, val);
break;
}
if (!new_param) { continue; }
args.push_back(new_param);
} else {
Value *V0 = IRB.CreateBitCast(op0, IntegerType::get(C, cast_size));
args.push_back(V0);
}
cint = dyn_cast<ConstantInt>(op1);
if (cint) {
uint64_t val = cint->getZExtValue();
ConstantInt *new_param = NULL;
switch (cast_size) {
case 8:
new_param = ConstantInt::get(Int8Ty, val);
break;
case 16:
new_param = ConstantInt::get(Int16Ty, val);
break;
case 32:
new_param = ConstantInt::get(Int32Ty, val);
break;
case 64:
new_param = ConstantInt::get(Int64Ty, val);
break;
case 128:
new_param = ConstantInt::get(Int128Ty, val);
break;
}
if (!new_param) { continue; }
args.push_back(new_param);
} else {
Value *V1 = IRB.CreateBitCast(op1, IntegerType::get(C, cast_size));
args.push_back(V1);
}
} else {
args.push_back(op0);
args.push_back(op1);
}
// errs() << "[CMPLOG] casted parameters:\n0: " << *V0 << "\n1: " << *V1
// << "\n";
ConstantInt *attribute = ConstantInt::get(Int8Ty, attr);
args.push_back(attribute);

View File

@ -362,19 +362,22 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp,
bool HasStr1 = getConstantStringInfo(Str1P, Str1);
bool HasStr2 = getConstantStringInfo(Str2P, Str2);
uint64_t constStrLen, unrollLen, constSizedLen = 0;
bool isMemcmp =
!callInst->getCalledFunction()->getName().compare(StringRef("memcmp"));
bool isSizedcmp = isMemcmp ||
!callInst->getCalledFunction()->getName().compare(
StringRef("strncmp")) ||
!callInst->getCalledFunction()->getName().compare(
StringRef("strncasecmp"));
bool isMemcmp = false;
bool isSizedcmp = false;
bool isCaseInsensitive = false;
Function * Callee = callInst->getCalledFunction();
if (Callee) {
isMemcmp = Callee->getName().compare("memcmp") == 0;
isSizedcmp = isMemcmp || Callee->getName().compare("strncmp") == 0 ||
Callee->getName().compare("strncasecmp") == 0;
isCaseInsensitive = Callee->getName().compare("strcasecmp") == 0 ||
Callee->getName().compare("strncasecmp") == 0;
}
Value *sizedValue = isSizedcmp ? callInst->getArgOperand(2) : NULL;
bool isConstSized = sizedValue && isa<ConstantInt>(sizedValue);
bool isCaseInsensitive = !callInst->getCalledFunction()->getName().compare(
StringRef("strcasecmp")) ||
!callInst->getCalledFunction()->getName().compare(
StringRef("strncasecmp"));
if (!(HasStr1 || HasStr2)) {
@ -436,15 +439,6 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp,
else
unrollLen = constStrLen;
/*
if (!be_quiet)
errs() << callInst->getCalledFunction()->getName() << ": unroll len "
<< unrollLen
<< ((isSizedcmp && !isConstSized) ? ", variable n" : "") << ":
"
<< ConstStr << "\n";
*/
/* split before the call instruction */
BasicBlock *bb = callInst->getParent();
BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(callInst));

View File

@ -1 +1 @@
6ab6bf28de
e36a30ebca

View File

@ -17,7 +17,7 @@ The idea and much of the initial implementation comes from Andrew Griffiths.
The actual implementation on current QEMU (shipped as qemuafl) is from
Andrea Fioraldi. Special thanks to abiondo that re-enabled TCG chaining.
## 2) How to use
## 2) How to use qemu_mode
The feature is implemented with a patched QEMU. The simplest way
to build it is to run ./build_qemu_support.sh. The script will download,
@ -176,7 +176,12 @@ Comparative measurements of execution speed or instrumentation coverage will be
fairly meaningless if the optimization levels or instrumentation scopes don't
match.
## 12) Gotchas, feedback, bugs
## 12) Other features
With `AFL_QEMU_FORCE_DFL` you force QEMU to ignore the registered signal
handlers of the target.
## 13) Gotchas, feedback, bugs
If you need to fix up checksums or do other cleanup on mutated test cases, see
utils/custom_mutators/ for a viable solution.
@ -197,19 +202,12 @@ with -march=core2, can help.
Beyond that, this is an early-stage mechanism, so fields reports are welcome.
You can send them to <afl-users@googlegroups.com>.
## 13) Alternatives: static rewriting
## 14) Alternatives: static rewriting
Statically rewriting binaries just once, instead of attempting to translate
them at run time, can be a faster alternative. That said, static rewriting is
fraught with peril, because it depends on being able to properly and fully model
program control flow without actually executing each and every code path.
The best implementation is this one:
https://github.com/vanhauser-thc/afl-dyninst
The issue however is Dyninst which is not rewriting the binaries so that
they run stable. A lot of crashes happen, especially in C++ programs that
use throw/catch. Try it first, and if it works for you be happy as it is
2-3x as fast as qemu_mode, however usually not as fast as QEMU persistent mode.
Checkout the "Fuzzing binary-only targets" section in our main README.md and
the docs/binaryonly_fuzzing.md document for more information and hints.

View File

@ -233,7 +233,6 @@ QEMU_CONF_FLAGS=" \
--disable-xen \
--disable-xen-pci-passthrough \
--disable-xfsctl \
--enable-pie \
--python=${PYTHONBIN} \
--target-list="${CPU_TARGET}-linux-user" \
--without-default-devices \
@ -241,7 +240,7 @@ QEMU_CONF_FLAGS=" \
if [ -n "${CROSS_PREFIX}" ]; then
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} --cross-prefix=${CROSS_PREFIX}"
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS --cross-prefix=$CROSS_PREFIX"
fi
@ -249,10 +248,15 @@ if [ "$STATIC" = "1" ]; then
echo Building STATIC binary
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} \
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS \
--static \
--extra-cflags=-DAFL_QEMU_STATIC_BUILD=1 \
"
else
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} --enable-pie "
fi
if [ "$DEBUG" = "1" ]; then
@ -262,7 +266,7 @@ if [ "$DEBUG" = "1" ]; then
# --enable-gcov might go here but incurs a mesonbuild error on meson
# versions prior to 0.56:
# https://github.com/qemu/meson/commit/903d5dd8a7dc1d6f8bef79e66d6ebc07c
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} \
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS \
--disable-strip \
--enable-debug \
--enable-debug-info \
@ -275,7 +279,7 @@ if [ "$DEBUG" = "1" ]; then
else
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} \
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS \
--disable-debug-info \
--disable-debug-mutex \
--disable-debug-tcg \
@ -290,7 +294,7 @@ if [ "$PROFILING" = "1" ]; then
echo Building PROFILED binary
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} \
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS \
--enable-gprof \
--enable-profiler \
"
@ -298,7 +302,7 @@ if [ "$PROFILING" = "1" ]; then
fi
# shellcheck disable=SC2086
./configure ${QEMU_CONF_FLAGS} || exit 1
./configure $QEMU_CONF_FLAGS || exit 1
echo "[+] Configuration complete."
@ -370,10 +374,20 @@ if [ "$ORIG_CROSS" = "" ]; then
fi
fi
if ! command -v "$CROSS" > /dev/null
then
if ! command -v "$CROSS" > /dev/null ; then
if [ "$CPU_TARGET" = "$(uname -m)" ] ; then
echo "[+] Building afl++ qemu support libraries with CC=$CC"
echo "[+] Building libcompcov ..."
make -C libcompcov && echo "[+] libcompcov ready"
echo "[+] Building unsigaction ..."
make -C unsigaction && echo "[+] unsigaction ready"
echo "[+] Building libqasan ..."
make -C libqasan && echo "[+] unsigaction ready"
else
echo "[!] Cross compiler $CROSS could not be found, cannot compile libcompcov libqasan and unsigaction"
fi
else
echo "[+] Building afl++ qemu support libraries with CC=$CROSS"
echo "[+] Building libcompcov ..."
make -C libcompcov CC=$CROSS && echo "[+] libcompcov ready"
echo "[+] Building unsigaction ..."

View File

@ -29,6 +29,8 @@
#include <sys/types.h>
#include <sys/shm.h>
#include <stdbool.h>
#include <stdint.h>
#include <inttypes.h>
#include "types.h"
#include "config.h"
@ -159,14 +161,15 @@ static void __compcov_load(void) {
}
static void __compcov_trace(u64 cur_loc, const u8 *v0, const u8 *v1, size_t n) {
static void __compcov_trace(uintptr_t cur_loc, const u8 *v0, const u8 *v1,
size_t n) {
size_t i;
if (debug_fd != 1) {
char debugbuf[4096];
snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %zu\n", cur_loc,
snprintf(debugbuf, sizeof(debugbuf), "0x%" PRIxPTR " %s %s %zu\n", cur_loc,
v0 == NULL ? "(null)" : (char *)v0,
v1 == NULL ? "(null)" : (char *)v1, n);
write(debug_fd, debugbuf, strlen(debugbuf));
@ -206,7 +209,7 @@ int strcmp(const char *str1, const char *str2) {
if (n <= MAX_CMP_LENGTH) {
u64 cur_loc = (u64)retaddr;
uintptr_t cur_loc = (uintptr_t)retaddr;
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
cur_loc &= MAP_SIZE - 1;
@ -235,7 +238,7 @@ int strncmp(const char *str1, const char *str2, size_t len) {
if (n <= MAX_CMP_LENGTH) {
u64 cur_loc = (u64)retaddr;
uintptr_t cur_loc = (uintptr_t)retaddr;
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
cur_loc &= MAP_SIZE - 1;
@ -265,7 +268,7 @@ int strcasecmp(const char *str1, const char *str2) {
if (n <= MAX_CMP_LENGTH) {
u64 cur_loc = (u64)retaddr;
uintptr_t cur_loc = (uintptr_t)retaddr;
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
cur_loc &= MAP_SIZE - 1;
@ -296,7 +299,7 @@ int strncasecmp(const char *str1, const char *str2, size_t len) {
if (n <= MAX_CMP_LENGTH) {
u64 cur_loc = (u64)retaddr;
uintptr_t cur_loc = (uintptr_t)retaddr;
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
cur_loc &= MAP_SIZE - 1;
@ -324,7 +327,7 @@ int memcmp(const void *mem1, const void *mem2, size_t len) {
if (n <= MAX_CMP_LENGTH) {
u64 cur_loc = (u64)retaddr;
uintptr_t cur_loc = (uintptr_t)retaddr;
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
cur_loc &= MAP_SIZE - 1;

View File

@ -4,16 +4,25 @@ This library is the injected runtime used by QEMU AddressSanitizer (QASan).
The original repository is [here](https://github.com/andreafioraldi/qasan).
The version embedded in qemuafl is an updated version of just the usermode part and this runtime in injected via LD_PRELOAD (so works just for dynamically linked binaries).
The version embedded in qemuafl is an updated version of just the usermode part
and this runtime is injected via LD_PRELOAD (so works just for dynamically
linked binaries).
The usage is super simple, just set the env var `AFL_USE_QASAN=1` when fuzzing in qemu mode (-Q). afl-fuzz will automatically set AFL_PRELOAD to load this library and enable the QASan instrumentation in afl-qemu-trace.
The usage is super simple, just set the env var `AFL_USE_QASAN=1` when fuzzing
in qemu mode (-Q). afl-fuzz will automatically set AFL_PRELOAD to load this
library and enable the QASan instrumentation in afl-qemu-trace.
For debugging purposes, we still suggest to run the original QASan as the stacktrace support for ARM (just a debug feature, it does not affect the bug finding capabilities during fuzzing) is WIP.
For debugging purposes, we still suggest to run the original QASan as the
stacktrace support for ARM (just a debug feature, it does not affect the bug
finding capabilities during fuzzing) is WIP.
### When I should use QASan?
### When should I use QASan?
If your target binary is PIC x86_64, you should before give a try to [retrowrite](https://github.com/HexHive/retrowrite) for static rewriting.
If your target binary is PIC x86_64, you should also give a try to
[retrowrite](https://github.com/HexHive/retrowrite) for static rewriting.
If it fails, or if your binary is for another architecture, or you want to use persistent and snapshot mdoe, AFL++ QASan mode is what you want/have to use.
If it fails, or if your binary is for another architecture, or you want to use
persistent and snapshot mode, AFL++ QASan mode is what you want/have to use.
Note that the overhead of libdislocator when combined with QEMU mode is much lower but it can catch less bugs. This is a short blanket, take your choice.
Note that the overhead of libdislocator when combined with QEMU mode is much
lower but it can catch less bugs. This is a short blanket, take your choice.

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "libqasan.h"
#include "map_macro.h"
ssize_t (*__lq_libc_write)(int, const void *, size_t);
ssize_t (*__lq_libc_read)(int, void *, size_t);
char *(*__lq_libc_fgets)(char *, int, FILE *);
int (*__lq_libc_atoi)(const char *);
long (*__lq_libc_atol)(const char *);
@ -35,6 +37,8 @@ void __libqasan_init_hooks(void) {
__libqasan_init_malloc();
__lq_libc_write = ASSERT_DLSYM(write);
__lq_libc_read = ASSERT_DLSYM(read);
__lq_libc_fgets = ASSERT_DLSYM(fgets);
__lq_libc_atoi = ASSERT_DLSYM(atoi);
__lq_libc_atol = ASSERT_DLSYM(atol);
@ -42,6 +46,30 @@ void __libqasan_init_hooks(void) {
}
ssize_t write(int fd, const void *buf, size_t count) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: write(%d, %p, %zu)\n", rtv, fd, buf, count);
ssize_t r = __lq_libc_write(fd, buf, count);
QASAN_DEBUG("\t\t = %zd\n", r);
return r;
}
ssize_t read(int fd, void *buf, size_t count) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: read(%d, %p, %zu)\n", rtv, fd, buf, count);
ssize_t r = __lq_libc_read(fd, buf, count);
QASAN_DEBUG("\t\t = %zd\n", r);
return r;
}
#ifdef __ANDROID__
size_t malloc_usable_size(const void *ptr) {
@ -54,7 +82,7 @@ size_t malloc_usable_size(void *ptr) {
QASAN_DEBUG("%14p: malloc_usable_size(%p)\n", rtv, ptr);
size_t r = __libqasan_malloc_usable_size((void *)ptr);
QASAN_DEBUG("\t\t = %ld\n", r);
QASAN_DEBUG("\t\t = %zu\n", r);
return r;
@ -64,7 +92,7 @@ void *malloc(size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: malloc(%ld)\n", rtv, size);
QASAN_DEBUG("%14p: malloc(%zu)\n", rtv, size);
void *r = __libqasan_malloc(size);
QASAN_DEBUG("\t\t = %p\n", r);
@ -76,7 +104,7 @@ void *calloc(size_t nmemb, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: calloc(%ld, %ld)\n", rtv, nmemb, size);
QASAN_DEBUG("%14p: calloc(%zu, %zu)\n", rtv, nmemb, size);
void *r = __libqasan_calloc(nmemb, size);
QASAN_DEBUG("\t\t = %p\n", r);
@ -88,7 +116,7 @@ void *realloc(void *ptr, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: realloc(%p, %ld)\n", rtv, ptr, size);
QASAN_DEBUG("%14p: realloc(%p, %zu)\n", rtv, ptr, size);
void *r = __libqasan_realloc(ptr, size);
QASAN_DEBUG("\t\t = %p\n", r);
@ -100,7 +128,7 @@ int posix_memalign(void **memptr, size_t alignment, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: posix_memalign(%p, %ld, %ld)\n", rtv, memptr, alignment,
QASAN_DEBUG("%14p: posix_memalign(%p, %zu, %zu)\n", rtv, memptr, alignment,
size);
int r = __libqasan_posix_memalign(memptr, alignment, size);
QASAN_DEBUG("\t\t = %d [*memptr = %p]\n", r, *memptr);
@ -113,7 +141,7 @@ void *memalign(size_t alignment, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memalign(%ld, %ld)\n", rtv, alignment, size);
QASAN_DEBUG("%14p: memalign(%zu, %zu)\n", rtv, alignment, size);
void *r = __libqasan_memalign(alignment, size);
QASAN_DEBUG("\t\t = %p\n", r);
@ -125,7 +153,7 @@ void *aligned_alloc(size_t alignment, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: aligned_alloc(%ld, %ld)\n", rtv, alignment, size);
QASAN_DEBUG("%14p: aligned_alloc(%zu, %zu)\n", rtv, alignment, size);
void *r = __libqasan_aligned_alloc(alignment, size);
QASAN_DEBUG("\t\t = %p\n", r);
@ -137,7 +165,7 @@ void *valloc(size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: valloc(%ld)\n", rtv, size);
QASAN_DEBUG("%14p: valloc(%zu)\n", rtv, size);
void *r = __libqasan_memalign(sysconf(_SC_PAGESIZE), size);
QASAN_DEBUG("\t\t = %p\n", r);
@ -149,7 +177,7 @@ void *pvalloc(size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: pvalloc(%ld)\n", rtv, size);
QASAN_DEBUG("%14p: pvalloc(%zu)\n", rtv, size);
size_t page_size = sysconf(_SC_PAGESIZE);
size = (size & (page_size - 1)) + page_size;
void *r = __libqasan_memalign(page_size, size);
@ -174,7 +202,9 @@ char *fgets(char *s, int size, FILE *stream) {
QASAN_DEBUG("%14p: fgets(%p, %d, %p)\n", rtv, s, size, stream);
QASAN_STORE(s, size);
#ifndef __ANDROID__
QASAN_LOAD(stream, sizeof(FILE));
#endif
char *r = __lq_libc_fgets(s, size, stream);
QASAN_DEBUG("\t\t = %p\n", r);
@ -186,7 +216,7 @@ int memcmp(const void *s1, const void *s2, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memcmp(%p, %p, %ld)\n", rtv, s1, s2, n);
QASAN_DEBUG("%14p: memcmp(%p, %p, %zu)\n", rtv, s1, s2, n);
QASAN_LOAD(s1, n);
QASAN_LOAD(s2, n);
int r = __libqasan_memcmp(s1, s2, n);
@ -200,7 +230,7 @@ void *memcpy(void *dest, const void *src, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memcpy(%p, %p, %ld)\n", rtv, dest, src, n);
QASAN_DEBUG("%14p: memcpy(%p, %p, %zu)\n", rtv, dest, src, n);
QASAN_LOAD(src, n);
QASAN_STORE(dest, n);
void *r = __libqasan_memcpy(dest, src, n);
@ -214,7 +244,7 @@ void *mempcpy(void *dest, const void *src, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: mempcpy(%p, %p, %ld)\n", rtv, dest, src, n);
QASAN_DEBUG("%14p: mempcpy(%p, %p, %zu)\n", rtv, dest, src, n);
QASAN_LOAD(src, n);
QASAN_STORE(dest, n);
void *r = (uint8_t *)__libqasan_memcpy(dest, src, n) + n;
@ -228,7 +258,7 @@ void *memmove(void *dest, const void *src, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memmove(%p, %p, %ld)\n", rtv, dest, src, n);
QASAN_DEBUG("%14p: memmove(%p, %p, %zu)\n", rtv, dest, src, n);
QASAN_LOAD(src, n);
QASAN_STORE(dest, n);
void *r = __libqasan_memmove(dest, src, n);
@ -242,7 +272,7 @@ void *memset(void *s, int c, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memset(%p, %d, %ld)\n", rtv, s, c, n);
QASAN_DEBUG("%14p: memset(%p, %d, %zu)\n", rtv, s, c, n);
QASAN_STORE(s, n);
void *r = __libqasan_memset(s, c, n);
QASAN_DEBUG("\t\t = %p\n", r);
@ -255,7 +285,7 @@ void *memchr(const void *s, int c, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memchr(%p, %d, %ld)\n", rtv, s, c, n);
QASAN_DEBUG("%14p: memchr(%p, %d, %zu)\n", rtv, s, c, n);
void *r = __libqasan_memchr(s, c, n);
if (r == NULL)
QASAN_LOAD(s, n);
@ -271,7 +301,7 @@ void *memrchr(const void *s, int c, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memrchr(%p, %d, %ld)\n", rtv, s, c, n);
QASAN_DEBUG("%14p: memrchr(%p, %d, %zu)\n", rtv, s, c, n);
QASAN_LOAD(s, n);
void *r = __libqasan_memrchr(s, c, n);
QASAN_DEBUG("\t\t = %p\n", r);
@ -285,7 +315,7 @@ void *memmem(const void *haystack, size_t haystacklen, const void *needle,
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memmem(%p, %ld, %p, %ld)\n", rtv, haystack, haystacklen,
QASAN_DEBUG("%14p: memmem(%p, %zu, %p, %zu)\n", rtv, haystack, haystacklen,
needle, needlelen);
QASAN_LOAD(haystack, haystacklen);
QASAN_LOAD(needle, needlelen);
@ -301,7 +331,7 @@ void bzero(void *s, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: bzero(%p, %ld)\n", rtv, s, n);
QASAN_DEBUG("%14p: bzero(%p, %zu)\n", rtv, s, n);
QASAN_STORE(s, n);
__libqasan_memset(s, 0, n);
@ -313,7 +343,7 @@ void explicit_bzero(void *s, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: bzero(%p, %ld)\n", rtv, s, n);
QASAN_DEBUG("%14p: bzero(%p, %zu)\n", rtv, s, n);
QASAN_STORE(s, n);
__libqasan_memset(s, 0, n);
@ -323,7 +353,7 @@ int bcmp(const void *s1, const void *s2, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: bcmp(%p, %p, %ld)\n", rtv, s1, s2, n);
QASAN_DEBUG("%14p: bcmp(%p, %p, %zu)\n", rtv, s1, s2, n);
QASAN_LOAD(s1, n);
QASAN_LOAD(s2, n);
int r = __libqasan_bcmp(s1, s2, n);
@ -381,7 +411,7 @@ int strncasecmp(const char *s1, const char *s2, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strncasecmp(%p, %p, %ld)\n", rtv, s1, s2, n);
QASAN_DEBUG("%14p: strncasecmp(%p, %p, %zu)\n", rtv, s1, s2, n);
size_t l1 = __libqasan_strnlen(s1, n);
QASAN_LOAD(s1, l1);
size_t l2 = __libqasan_strnlen(s2, n);
@ -431,7 +461,7 @@ int strncmp(const char *s1, const char *s2, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strncmp(%p, %p, %ld)\n", rtv, s1, s2, n);
QASAN_DEBUG("%14p: strncmp(%p, %p, %zu)\n", rtv, s1, s2, n);
size_t l1 = __libqasan_strnlen(s1, n);
QASAN_LOAD(s1, l1);
size_t l2 = __libqasan_strnlen(s2, n);
@ -462,7 +492,7 @@ char *strncpy(char *dest, const char *src, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strncpy(%p, %p, %ld)\n", rtv, dest, src, n);
QASAN_DEBUG("%14p: strncpy(%p, %p, %zu)\n", rtv, dest, src, n);
size_t l = __libqasan_strnlen(src, n);
QASAN_STORE(dest, n);
void *r;
@ -521,7 +551,7 @@ size_t strlen(const char *s) {
QASAN_DEBUG("%14p: strlen(%p)\n", rtv, s);
size_t r = __libqasan_strlen(s);
QASAN_LOAD(s, r + 1);
QASAN_DEBUG("\t\t = %ld\n", r);
QASAN_DEBUG("\t\t = %zu\n", r);
return r;
@ -531,10 +561,10 @@ size_t strnlen(const char *s, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strnlen(%p, %ld)\n", rtv, s, n);
QASAN_DEBUG("%14p: strnlen(%p, %zu)\n", rtv, s, n);
size_t r = __libqasan_strnlen(s, n);
QASAN_LOAD(s, r);
QASAN_DEBUG("\t\t = %ld\n", r);
QASAN_DEBUG("\t\t = %zu\n", r);
return r;
@ -621,7 +651,7 @@ size_t wcslen(const wchar_t *s) {
QASAN_DEBUG("%14p: wcslen(%p)\n", rtv, s);
size_t r = __libqasan_wcslen(s);
QASAN_LOAD(s, sizeof(wchar_t) * (r + 1));
QASAN_DEBUG("\t\t = %ld\n", r);
QASAN_DEBUG("\t\t = %zu\n", r);
return r;

View File

@ -72,7 +72,7 @@ void __libqasan_print_maps(void) {
QASAN_LOG("QEMU-AddressSanitizer (v%s)\n", QASAN_VERSTR);
QASAN_LOG(
"Copyright (C) 2019-2020 Andrea Fioraldi <andreafioraldi@gmail.com>\n");
"Copyright (C) 2019-2021 Andrea Fioraldi <andreafioraldi@gmail.com>\n");
QASAN_LOG("\n");
if (__qasan_log) __libqasan_print_maps();

View File

@ -24,6 +24,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "libqasan.h"
#include <features.h>
#include <errno.h>
#include <stddef.h>
#include <assert.h>
@ -65,9 +66,26 @@ struct chunk_struct {
};
#ifdef __GLIBC__
void *(*__lq_libc_malloc)(size_t);
void (*__lq_libc_free)(void *);
#define backend_malloc __lq_libc_malloc
#define backend_free __lq_libc_free
#define TMP_ZONE_SIZE 4096
static int __tmp_alloc_zone_idx;
static unsigned char __tmp_alloc_zone[TMP_ZONE_SIZE];
#else
// From dlmalloc.c
void *dlmalloc(size_t);
void dlfree(void *);
void * dlmalloc(size_t);
void dlfree(void *);
#define backend_malloc dlmalloc
#define backend_free dlfree
#endif
int __libqasan_malloc_initialized;
@ -102,9 +120,9 @@ static int quanratine_push(struct chunk_begin *ck) {
quarantine_bytes -= tmp->requested_size;
if (tmp->aligned_orig)
dlfree(tmp->aligned_orig);
backend_free(tmp->aligned_orig);
else
dlfree(tmp);
backend_free(tmp);
}
@ -122,6 +140,11 @@ void __libqasan_init_malloc(void) {
if (__libqasan_malloc_initialized) return;
#ifdef __GLIBC__
__lq_libc_malloc = dlsym(RTLD_NEXT, "malloc");
__lq_libc_free = dlsym(RTLD_NEXT, "free");
#endif
LOCK_INIT(&quarantine_lock, PTHREAD_PROCESS_PRIVATE);
__libqasan_malloc_initialized = 1;
@ -142,13 +165,27 @@ size_t __libqasan_malloc_usable_size(void *ptr) {
void *__libqasan_malloc(size_t size) {
if (!__libqasan_malloc_initialized) { __libqasan_init_malloc(); }
if (!__libqasan_malloc_initialized) {
if (!__libqasan_malloc_initialized) __libqasan_init_malloc();
__libqasan_init_malloc();
#ifdef __GLIBC__
void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx];
if (size & (ALLOC_ALIGN_SIZE - 1))
__tmp_alloc_zone_idx +=
(size & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
else
__tmp_alloc_zone_idx += size;
return r;
#endif
}
int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread
struct chunk_begin *p = dlmalloc(sizeof(struct chunk_struct) + size);
struct chunk_begin *p = backend_malloc(sizeof(struct chunk_struct) + size);
QASAN_SWAP(state);
@ -179,6 +216,12 @@ void __libqasan_free(void *ptr) {
if (!ptr) return;
#ifdef __GLIBC__
if (ptr >= (void *)__tmp_alloc_zone &&
ptr < ((void *)__tmp_alloc_zone + TMP_ZONE_SIZE))
return;
#endif
struct chunk_begin *p = ptr;
p -= 1;
@ -190,9 +233,9 @@ void __libqasan_free(void *ptr) {
if (!quanratine_push(p)) {
if (p->aligned_orig)
dlfree(p->aligned_orig);
backend_free(p->aligned_orig);
else
dlfree(p);
backend_free(p);
}
@ -210,6 +253,17 @@ void *__libqasan_calloc(size_t nmemb, size_t size) {
size *= nmemb;
#ifdef __GLIBC__
if (!__libqasan_malloc_initialized) {
void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx];
__tmp_alloc_zone_idx += size;
return r;
}
#endif
char *p = __libqasan_malloc(size);
if (!p) return NULL;
@ -252,7 +306,7 @@ int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) {
int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread
char *orig = dlmalloc(sizeof(struct chunk_struct) + size);
char *orig = backend_malloc(sizeof(struct chunk_struct) + size);
QASAN_SWAP(state);

View File

@ -1,7 +1,7 @@
/*
This code is DEPRECATED!
I'm keeping it here cause maybe the unistrumentation of a function is needed
I'm keeping it here cause maybe the uninstrumentation of a function is needed
for some strange reason.
*/

View File

@ -554,6 +554,11 @@ static void edit_params(u32 argc, char **argv, char **envp) {
}
#if LLVM_MAJOR >= 13
// fuck you llvm 13
cc_params[cc_par_cnt++] = "-fno-experimental-new-pass-manager";
#endif
if (lto_mode && !have_c) {
u8 *ld_path = strdup(AFL_REAL_LD);
@ -1416,6 +1421,14 @@ int main(int argc, char **argv, char **envp) {
}
if (instrument_opt_mode && instrument_mode == INSTRUMENT_DEFAULT &&
(compiler_mode == LLVM || compiler_mode == UNSET)) {
instrument_mode = INSTRUMENT_CLASSIC;
compiler_mode = LLVM;
}
if (!compiler_mode) {
// lto is not a default because outside of afl-cc RANLIB and AR have to
@ -1582,6 +1595,7 @@ int main(int argc, char **argv, char **envp) {
"libtokencap.so)\n"
" AFL_PATH: path to instrumenting pass and runtime "
"(afl-compiler-rt.*o)\n"
" AFL_IGNORE_UNKNOWN_ENVS: don't warn on unknown env vars\n"
" AFL_INST_RATIO: percentage of branches to instrument\n"
" AFL_QUIET: suppress verbose output\n"
" AFL_HARDEN: adds code hardening to catch memory bugs\n"
@ -1693,7 +1707,10 @@ int main(int argc, char **argv, char **envp) {
"Do not be overwhelmed :) afl-cc uses good defaults if no options are "
"selected.\n"
"Read the documentation for FEATURES though, all are good but few are "
"defaults.\n\n");
"defaults.\n"
"Recommended is afl-clang-lto with AFL_LLVM_CMPLOG or afl-clang-fast "
"with\n"
"AFL_LLVM_CMPLOG and AFL_LLVM_DICT2FILE.\n\n");
exit(1);
@ -1785,8 +1802,8 @@ int main(int argc, char **argv, char **envp) {
if (instrument_opt_mode && instrument_mode != INSTRUMENT_CLASSIC &&
instrument_mode != INSTRUMENT_CFG)
FATAL(
"CTX and NGRAM instrumentation options can only be used with CFG "
"(recommended) and CLASSIC instrumentation modes!");
"CTX and NGRAM instrumentation options can only be used with LLVM and "
"CFG or CLASSIC instrumentation modes!");
if (getenv("AFL_LLVM_SKIP_NEVERZERO") && getenv("AFL_LLVM_NOT_ZERO"))
FATAL(

View File

@ -47,6 +47,10 @@ u8 be_quiet = 0;
u8 *doc_path = "";
u8 last_intr = 0;
#ifndef AFL_PATH
#define AFL_PATH "/usr/local/lib/afl/"
#endif
void detect_file_args(char **argv, u8 *prog_in, bool *use_stdin) {
u32 i = 0;
@ -372,11 +376,11 @@ u8 *get_libqasan_path(u8 *own_loc) {
}
if (!access(BIN_PATH "/libqasan.so", X_OK)) {
if (!access(AFL_PATH "/libqasan.so", X_OK)) {
if (cp) { ck_free(cp); }
return ck_strdup(BIN_PATH "/libqasan.so");
return ck_strdup(AFL_PATH "/libqasan.so");
}
@ -518,12 +522,147 @@ int parse_afl_kill_signal_env(u8 *afl_kill_signal_env, int default_signal) {
}
static inline unsigned int helper_min3(unsigned int a, unsigned int b,
unsigned int c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
// from
// https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#C
static int string_distance_levenshtein(char *s1, char *s2) {
unsigned int s1len, s2len, x, y, lastdiag, olddiag;
s1len = strlen(s1);
s2len = strlen(s2);
unsigned int column[s1len + 1];
column[s1len] = 1;
for (y = 1; y <= s1len; y++)
column[y] = y;
for (x = 1; x <= s2len; x++) {
column[0] = x;
for (y = 1, lastdiag = x - 1; y <= s1len; y++) {
olddiag = column[y];
column[y] = helper_min3(column[y] + 1, column[y - 1] + 1,
lastdiag + (s1[y - 1] == s2[x - 1] ? 0 : 1));
lastdiag = olddiag;
}
}
return column[s1len];
}
#define ENV_SIMILARITY_TRESHOLD 3
void print_suggested_envs(char *mispelled_env) {
size_t env_name_len =
strcspn(mispelled_env, "=") - 4; // remove the AFL_prefix
char *env_name = ck_alloc(env_name_len + 1);
memcpy(env_name, mispelled_env + 4, env_name_len);
char *seen = ck_alloc(sizeof(afl_environment_variables) / sizeof(char *));
int found = 0;
int j;
for (j = 0; afl_environment_variables[j] != NULL; ++j) {
char *afl_env = afl_environment_variables[j] + 4;
int distance = string_distance_levenshtein(afl_env, env_name);
if (distance < ENV_SIMILARITY_TRESHOLD && seen[j] == 0) {
SAYF("Did you mean %s?\n", afl_environment_variables[j]);
seen[j] = 1;
found = 1;
}
}
if (found) goto cleanup;
for (j = 0; afl_environment_variables[j] != NULL; ++j) {
char * afl_env = afl_environment_variables[j] + 4;
size_t afl_env_len = strlen(afl_env);
char * reduced = ck_alloc(afl_env_len + 1);
size_t start = 0;
while (start < afl_env_len) {
size_t end = start + strcspn(afl_env + start, "_") + 1;
memcpy(reduced, afl_env, start);
if (end < afl_env_len)
memcpy(reduced + start, afl_env + end, afl_env_len - end);
reduced[afl_env_len - end + start] = 0;
int distance = string_distance_levenshtein(reduced, env_name);
if (distance < ENV_SIMILARITY_TRESHOLD && seen[j] == 0) {
SAYF("Did you mean %s?\n", afl_environment_variables[j]);
seen[j] = 1;
found = 1;
}
start = end;
};
ck_free(reduced);
}
if (found) goto cleanup;
char * reduced = ck_alloc(env_name_len + 1);
size_t start = 0;
while (start < env_name_len) {
size_t end = start + strcspn(env_name + start, "_") + 1;
memcpy(reduced, env_name, start);
if (end < env_name_len)
memcpy(reduced + start, env_name + end, env_name_len - end);
reduced[env_name_len - end + start] = 0;
for (j = 0; afl_environment_variables[j] != NULL; ++j) {
int distance = string_distance_levenshtein(
afl_environment_variables[j] + 4, reduced);
if (distance < ENV_SIMILARITY_TRESHOLD && seen[j] == 0) {
SAYF("Did you mean %s?\n", afl_environment_variables[j]);
seen[j] = 1;
}
}
start = end;
};
ck_free(reduced);
cleanup:
ck_free(env_name);
ck_free(seen);
}
void check_environment_vars(char **envp) {
if (be_quiet) { return; }
int index = 0, issue_detected = 0;
char *env, *val;
char *env, *val, *ignore = getenv("AFL_IGNORE_UNKNOWN_ENVS");
while ((env = envp[index++]) != NULL) {
if (strncmp(env, "ALF_", 4) == 0 || strncmp(env, "_ALF", 4) == 0 ||
@ -582,11 +721,13 @@ void check_environment_vars(char **envp) {
}
if (match == 0) {
if (match == 0 && !ignore) {
WARNF("Mistyped AFL environment variable: %s", env);
issue_detected = 1;
print_suggested_envs(env);
}
}
@ -994,7 +1135,7 @@ u32 get_map_size(void) {
}
if (map_size % 32) { map_size = (((map_size >> 5) + 1) << 5); }
if (map_size % 64) { map_size = (((map_size >> 6) + 1) << 6); }
}

View File

@ -656,11 +656,11 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
if (!fsrv->map_size) { fsrv->map_size = MAP_SIZE; }
if (unlikely(tmp_map_size % 32)) {
if (unlikely(tmp_map_size % 64)) {
// should not happen
WARNF("Target reported non-aligned map size of %u", tmp_map_size);
tmp_map_size = (((tmp_map_size + 31) >> 5) << 5);
tmp_map_size = (((tmp_map_size + 63) >> 6) << 6);
}

View File

@ -882,32 +882,23 @@ void perform_dry_run(afl_state_t *afl) {
if (afl->timeout_given) {
/* The -t nn+ syntax in the command line sets afl->timeout_given to
'2' and instructs afl-fuzz to tolerate but skip queue entries that
time out. */
/* if we have a timeout but a timeout value was given then always
skip. The '+' meaning has been changed! */
WARNF("Test case results in a timeout (skipping)");
++cal_failures;
q->cal_failed = CAL_CHANCES;
q->disabled = 1;
q->perf_score = 0;
if (afl->timeout_given > 1) {
if (!q->was_fuzzed) {
WARNF("Test case results in a timeout (skipping)");
q->cal_failed = CAL_CHANCES;
++cal_failures;
break;
q->was_fuzzed = 1;
--afl->pending_not_fuzzed;
--afl->active_paths;
}
SAYF("\n" cLRD "[-] " cRST
"The program took more than %u ms to process one of the initial "
"test cases.\n"
" Usually, the right thing to do is to relax the -t option - "
"or to delete it\n"
" altogether and allow the fuzzer to auto-calibrate. That "
"said, if you know\n"
" what you are doing and want to simply skip the unruly test "
"cases, append\n"
" '+' at the end of the value passed to -t ('-t %u+').\n",
afl->fsrv.exec_tmout, afl->fsrv.exec_tmout);
FATAL("Test case '%s' results in a timeout", fn);
break;
} else {
@ -1060,13 +1051,22 @@ void perform_dry_run(afl_state_t *afl) {
p->perf_score = 0;
u32 i = 0;
while (unlikely(afl->queue_buf[i]->disabled)) {
while (unlikely(i < afl->queued_paths && afl->queue_buf[i] &&
afl->queue_buf[i]->disabled)) {
++i;
}
afl->queue = afl->queue_buf[i];
if (i < afl->queued_paths && afl->queue_buf[i]) {
afl->queue = afl->queue_buf[i];
} else {
afl->queue = afl->queue_buf[0];
}
afl->max_depth = 0;
for (i = 0; i < afl->queued_paths; i++) {
@ -2017,7 +2017,7 @@ void setup_dirs_fds(afl_state_t *afl) {
fprintf(afl->fsrv.plot_file,
"# unix_time, cycles_done, cur_path, paths_total, "
"pending_total, pending_favs, map_size, unique_crashes, "
"unique_hangs, max_depth, execs_per_sec\n");
"unique_hangs, max_depth, execs_per_sec, total_execs, edges_found\n");
fflush(afl->fsrv.plot_file);
/* ignore errors */

View File

@ -30,8 +30,6 @@
//#define _DEBUG
//#define CMPLOG_INTROSPECTION
#define COMBINE
#define ARITHMETIC_LESSER_GREATER
// CMP attribute enum
enum {
@ -206,14 +204,31 @@ static void type_replace(afl_state_t *afl, u8 *buf, u32 len) {
case '\t':
c = ' ';
break;
/*
case '\r':
case '\n':
// nothing ...
break;
*/
case '\r':
c = '\n';
break;
case '\n':
c = '\r';
break;
case 0:
c = 1;
break;
case 1:
c = 0;
break;
case 0xff:
c = 0;
break;
default:
c = (buf[i] ^ 0xff);
if (buf[i] < 32) {
c = (buf[i] ^ 0x1f);
} else {
c = (buf[i] ^ 0x7f); // we keep the highest bit
}
}
@ -383,6 +398,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
rng = ranges;
ranges = rng->next;
ck_free(rng);
rng = NULL;
}
@ -421,8 +437,9 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
if (taint) {
if (len / positions == 1 && positions > CMPLOG_POSITIONS_MAX &&
afl->active_paths / afl->colorize_success > CMPLOG_CORPUS_PERCENT) {
if (afl->colorize_success &&
(len / positions == 1 && positions > CMPLOG_POSITIONS_MAX &&
afl->active_paths / afl->colorize_success > CMPLOG_CORPUS_PERCENT)) {
#ifdef _DEBUG
fprintf(stderr, "Colorization unsatisfactory\n");
@ -456,6 +473,15 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
return 0;
checksum_fail:
while (ranges) {
rng = ranges;
ranges = rng->next;
ck_free(rng);
rng = NULL;
}
ck_free(backup);
ck_free(changed);
@ -496,7 +522,7 @@ static u8 its_fuzz(afl_state_t *afl, u8 *buf, u32 len, u8 *status) {
}
#ifdef CMPLOG_TRANSFORM
//#ifdef CMPLOG_SOLVE_TRANSFORM
static int strntoll(const char *str, size_t sz, char **end, int base,
long long *out) {
@ -504,6 +530,8 @@ static int strntoll(const char *str, size_t sz, char **end, int base,
long long ret;
const char *beg = str;
if (!str || !sz) { return 1; }
for (; beg && sz && *beg == ' '; beg++, sz--) {};
if (!sz) return 1;
@ -527,6 +555,8 @@ static int strntoull(const char *str, size_t sz, char **end, int base,
unsigned long long ret;
const char * beg = str;
if (!str || !sz) { return 1; }
for (; beg && sz && *beg == ' '; beg++, sz--)
;
@ -577,7 +607,7 @@ static int is_hex(const char *str) {
}
#ifdef CMPLOG_TRANSFORM_BASE64
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
// tests 4 bytes at location
static int is_base64(const char *str) {
@ -690,10 +720,10 @@ static void to_base64(u8 *src, u8 *dst, u32 dst_len) {
}
#endif
#endif
//#endif
static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
u64 pattern, u64 repl, u64 o_pattern,
u64 changed_val, u8 attr, u32 idx, u32 taint_len,
@ -717,9 +747,9 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
// o_pattern, pattern, repl, changed_val, idx, taint_len,
// h->shape + 1, attr);
#ifdef CMPLOG_TRANSFORM
//#ifdef CMPLOG_SOLVE_TRANSFORM
// reverse atoi()/strnu?toll() is expensive, so we only to it in lvl 3
if (lvl & LVL3) {
if (afl->cmplog_enable_transform && (lvl & LVL3)) {
u8 * endptr;
u8 use_num = 0, use_unum = 0;
@ -740,11 +770,11 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
#ifdef _DEBUG
#ifdef _DEBUG
if (idx == 0)
fprintf(stderr, "ASCII is=%u use_num=%u use_unum=%u idx=%u %llx==%llx\n",
afl->queue_cur->is_ascii, use_num, use_unum, idx, num, pattern);
#endif
#endif
// num is likely not pattern as atoi("AAA") will be zero...
if (use_num && ((u64)num == pattern || !num)) {
@ -794,37 +824,82 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
// Try to identify transform magic
if (pattern != o_pattern && repl == changed_val && attr <= IS_EQUAL) {
u64 *ptr = (u64 *)&buf[idx];
u64 *o_ptr = (u64 *)&orig_buf[idx];
u64 b_val, o_b_val, mask;
u64 b_val, o_b_val, mask;
u8 bytes;
switch (SHAPE_BYTES(h->shape)) {
case 0:
case 1:
b_val = (u64)(*ptr % 0x100);
bytes = 1;
break;
case 2:
bytes = 2;
break;
case 3:
case 4:
bytes = 4;
break;
default:
bytes = 8;
}
// necessary for preventing heap access overflow
bytes = MIN(bytes, len - idx);
switch (bytes) {
case 0: // cannot happen
b_val = o_b_val = mask = 0; // keep the linters happy
break;
case 1: {
u8 *ptr = (u8 *)&buf[idx];
u8 *o_ptr = (u8 *)&orig_buf[idx];
b_val = (u64)(*ptr);
o_b_val = (u64)(*o_ptr % 0x100);
mask = 0xff;
break;
}
case 2:
case 3:
b_val = (u64)(*ptr % 0x10000);
o_b_val = (u64)(*o_ptr % 0x10000);
case 3: {
u16 *ptr = (u16 *)&buf[idx];
u16 *o_ptr = (u16 *)&orig_buf[idx];
b_val = (u64)(*ptr);
o_b_val = (u64)(*o_ptr);
mask = 0xffff;
break;
}
case 4:
case 5:
case 6:
case 7:
b_val = (u64)(*ptr % 0x100000000);
o_b_val = (u64)(*o_ptr % 0x100000000);
case 7: {
u32 *ptr = (u32 *)&buf[idx];
u32 *o_ptr = (u32 *)&orig_buf[idx];
b_val = (u64)(*ptr);
o_b_val = (u64)(*o_ptr);
mask = 0xffffffff;
break;
default:
b_val = *ptr;
o_b_val = *o_ptr;
}
default: {
u64 *ptr = (u64 *)&buf[idx];
u64 *o_ptr = (u64 *)&orig_buf[idx];
b_val = (u64)(*ptr);
o_b_val = (u64)(*o_ptr);
mask = 0xffffffffffffffff;
}
}
// test for arithmetic, eg. "if ((user_val - 0x1111) == 0x1234) ..."
@ -984,7 +1059,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
#endif
//#endif
// we only allow this for ascii2integer (above) so leave if this is the case
if (unlikely(pattern == o_pattern)) { return 0; }
@ -1009,7 +1084,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
u64 tmp_64 = *buf_64;
*buf_64 = repl;
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
#ifdef COMBINE
#ifdef CMPLOG_COMBINE
if (*status == 1) { memcpy(cbuf + idx, buf_64, 8); }
#endif
*buf_64 = tmp_64;
@ -1050,7 +1125,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
u32 tmp_32 = *buf_32;
*buf_32 = (u32)repl;
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
#ifdef COMBINE
#ifdef CMPLOG_COMBINE
if (*status == 1) { memcpy(cbuf + idx, buf_32, 4); }
#endif
*buf_32 = tmp_32;
@ -1084,7 +1159,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
u16 tmp_16 = *buf_16;
*buf_16 = (u16)repl;
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
#ifdef COMBINE
#ifdef CMPLOG_COMBINE
if (*status == 1) { memcpy(cbuf + idx, buf_16, 2); }
#endif
*buf_16 = tmp_16;
@ -1122,7 +1197,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
u8 tmp_8 = *buf_8;
*buf_8 = (u8)repl;
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
#ifdef COMBINE
#ifdef CMPLOG_COMBINE
if (*status == 1) { cbuf[idx] = *buf_8; }
#endif
*buf_8 = tmp_8;
@ -1139,8 +1214,12 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
// 16 = modified float, 32 = modified integer (modified = wont match
// in original buffer)
#ifdef ARITHMETIC_LESSER_GREATER
if (lvl < LVL3 || attr == IS_TRANSFORM) { return 0; }
//#ifdef CMPLOG_SOLVE_ARITHMETIC
if (!afl->cmplog_enable_arith || lvl < LVL3 || attr == IS_TRANSFORM) {
return 0;
}
if (!(attr & (IS_GREATER | IS_LESSER)) || SHAPE_BYTES(h->shape) < 4) {
@ -1245,11 +1324,11 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
double *f = (double *)&repl;
float g = (float)*f;
repl_new = 0;
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
memcpy((char *)&repl_new, (char *)&g, 4);
#else
#else
memcpy(((char *)&repl_new) + 4, (char *)&g, 4);
#endif
#endif
changed_val = repl_new;
h->shape = 3; // modify shape
@ -1304,7 +1383,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
}
#endif /* ARITHMETIC_LESSER_GREATER */
//#endif /* CMPLOG_SOLVE_ARITHMETIC
return 0;
@ -1366,7 +1445,7 @@ static u8 cmp_extend_encodingN(afl_state_t *afl, struct cmp_header *h,
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
#ifdef COMBINE
#ifdef CMPLOG_COMBINE
if (*status == 1) { memcpy(cbuf + idx, r, shape); }
#endif
@ -1463,7 +1542,7 @@ static void try_to_add_to_dictN(afl_state_t *afl, u128 v, u8 size) {
for (k = 0; k < size; ++k) {
#else
u32 off = 16 - size;
u32 off = 16 - size;
for (k = 16 - size; k < 16; ++k) {
#endif
@ -1499,11 +1578,12 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
struct cmp_header *h = &afl->shm.cmp_map->headers[key];
struct tainted * t;
u32 i, j, idx, taint_len, loggeds;
u32 have_taint = 1, is_n = 0;
u32 have_taint = 1;
u8 status = 0, found_one = 0;
/* loop cmps are useless, detect and ignore them */
#ifdef WORD_SIZE_64
u32 is_n = 0;
u128 s128_v0 = 0, s128_v1 = 0, orig_s128_v0 = 0, orig_s128_v1 = 0;
#endif
u64 s_v0, s_v1;
@ -1521,6 +1601,7 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
}
#ifdef WORD_SIZE_64
switch (SHAPE_BYTES(h->shape)) {
case 1:
@ -1533,6 +1614,8 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
}
#endif
for (i = 0; i < loggeds; ++i) {
struct cmp_operands *o = &afl->shm.cmp_map->log[key][i];
@ -1774,12 +1857,12 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
u32 taint_len, u8 *orig_buf, u8 *buf, u8 *cbuf,
u32 len, u8 lvl, u8 *status) {
#ifndef COMBINE
#ifndef CMPLOG_COMBINE
(void)(cbuf);
#endif
#ifndef CMPLOG_TRANSFORM
(void)(changed_val);
#endif
//#ifndef CMPLOG_SOLVE_TRANSFORM
// (void)(changed_val);
//#endif
u8 save[40];
u32 saved_idx = idx, pre, from = 0, to = 0, i, j;
@ -1847,7 +1930,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; }
#ifdef COMBINE
#ifdef CMPLOG_COMBINE
if (*status == 1) { memcpy(cbuf + idx, &buf[idx], i); }
#endif
@ -1859,16 +1942,16 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
}
#ifdef CMPLOG_TRANSFORM
//#ifdef CMPLOG_SOLVE_TRANSFORM
if (*status == 1) return 0;
if (lvl & LVL3) {
if (afl->cmplog_enable_transform && (lvl & LVL3)) {
u32 toupper = 0, tolower = 0, xor = 0, arith = 0, tohex = 0, fromhex = 0;
#ifdef CMPLOG_TRANSFORM_BASE64
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
u32 tob64 = 0, fromb64 = 0;
#endif
#endif
u32 from_0 = 0, from_x = 0, from_X = 0, from_slash = 0, from_up = 0;
u32 to_0 = 0, to_x = 0, to_slash = 0, to_up = 0;
u8 xor_val[32], arith_val[32], tmp[48];
@ -1964,7 +2047,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
}
#ifdef CMPLOG_TRANSFORM_BASE64
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
if (i % 3 == 2 && i < 24) {
if (is_base64(repl + ((i / 3) << 2))) tob64 += 3;
@ -1977,7 +2060,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
}
#endif
#endif
if ((o_pattern[i] ^ orig_buf[idx + i]) == xor_val[i] && xor_val[i]) {
@ -2005,20 +2088,20 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
}
#ifdef _DEBUG
#ifdef _DEBUG
fprintf(stderr,
"RTN idx=%u loop=%u xor=%u arith=%u tolower=%u toupper=%u "
"tohex=%u fromhex=%u to_0=%u to_slash=%u to_x=%u "
"from_0=%u from_slash=%u from_x=%u\n",
idx, i, xor, arith, tolower, toupper, tohex, fromhex, to_0,
to_slash, to_x, from_0, from_slash, from_x);
#ifdef CMPLOG_TRANSFORM_BASE64
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
fprintf(stderr, "RTN idx=%u loop=%u tob64=%u from64=%u\n", tob64,
fromb64);
#endif
#endif
#endif
#ifdef CMPLOG_TRANSFORM_BASE64
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
// input is base64 and converted to binary? convert repl to base64!
if ((i % 4) == 3 && i < 24 && fromb64 > i) {
@ -2041,7 +2124,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
}
#endif
#endif
// input is converted to hex? convert repl to binary!
if (i < 16 && tohex > i) {
@ -2170,16 +2253,16 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
}
#ifdef COMBINE
#ifdef CMPLOG_COMBINE
if (*status == 1) { memcpy(cbuf + idx, &buf[idx], i + 1); }
#endif
#endif
if ((i >= 7 &&
(i >= xor&&i >= arith &&i >= tolower &&i >= toupper &&i > tohex &&i >
(fromhex + from_0 + from_x + from_slash + 1)
#ifdef CMPLOG_TRANSFORM_BASE64
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
&& i > tob64 + 3 && i > fromb64 + 4
#endif
#endif
)) ||
repl[i] != changed_val[i] || *status == 1) {
@ -2193,7 +2276,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
}
#endif
//#endif
return 0;
@ -2469,7 +2552,7 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
u32 lvl = (afl->queue_cur->colorized ? 0 : LVL1) +
(afl->cmplog_lvl == CMPLOG_LVL_MAX ? LVL3 : 0);
#ifdef COMBINE
#ifdef CMPLOG_COMBINE
u8 *cbuf = afl_realloc((void **)&afl->in_scratch_buf, len + 128);
memcpy(cbuf, orig_buf, len);
u8 *virgin_backup = afl_realloc((void **)&afl->ex_buf, afl->shm.map_size);
@ -2526,9 +2609,9 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
} else if ((lvl & LVL1)
#ifdef CMPLOG_TRANSFORM
|| (lvl & LVL3)
#endif
//#ifdef CMPLOG_SOLVE_TRANSFORM
|| ((lvl & LVL3) && afl->cmplog_enable_transform)
//#endif
) {
if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, cbuf, len, lvl, taint))) {
@ -2583,7 +2666,7 @@ exit_its:
}
#ifdef COMBINE
#ifdef CMPLOG_COMBINE
if (afl->queued_paths + afl->unique_crashes > orig_hit_cnt + 1) {
// copy the current virgin bits so we can recover the information
@ -2607,9 +2690,9 @@ exit_its:
}
#else
u32 *v = (u64 *)afl->virgin_bits;
u32 *s = (u64 *)virgin_save;
u32 i;
u32 *v = (u32 *)afl->virgin_bits;
u32 *s = (u32 *)virgin_save;
u32 i;
for (i = 0; i < (afl->shm.map_size >> 2); i++) {
v[i] &= s[i];
@ -2622,7 +2705,7 @@ exit_its:
dump("COMB", cbuf, len);
if (status == 1) {
fprintf(stderr, "NEW COMBINED\n");
fprintf(stderr, "NEW CMPLOG_COMBINED\n");
} else {
@ -2672,7 +2755,3 @@ exit_its:
}
#ifdef COMBINE
#undef COMBINE
#endif

View File

@ -707,6 +707,8 @@ void sync_fuzzers(afl_state_t *afl) {
if (afl->foreign_sync_cnt) read_foreign_testcases(afl, 0);
afl->last_sync_time = get_cur_time();
}
/* Trim all new test cases to save cycles when doing deterministic checks. The

View File

@ -486,6 +486,8 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
WARNF("Mistyped AFL environment variable: %s", env);
issue_detected = 1;
print_suggested_envs(env);
}
}

View File

@ -185,15 +185,14 @@ void load_stats_file(afl_state_t *afl) {
/* Update stats file for unattended monitoring. */
void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
double eps) {
void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
double stability, double eps) {
#ifndef __HAIKU__
struct rusage rus;
#endif
u64 cur_time = get_cur_time();
u32 t_bytes = count_non_255_bytes(afl, afl->virgin_bits);
u8 fn[PATH_MAX];
FILE *f;
@ -353,9 +352,11 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
/* Update the plot file if there is a reason to. */
void maybe_update_plot_file(afl_state_t *afl, double bitmap_cvg, double eps) {
void maybe_update_plot_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
double eps) {
if (unlikely(afl->plot_prev_qp == afl->queued_paths &&
if (unlikely(afl->stop_soon) ||
unlikely(afl->plot_prev_qp == afl->queued_paths &&
afl->plot_prev_pf == afl->pending_favored &&
afl->plot_prev_pnf == afl->pending_not_fuzzed &&
afl->plot_prev_ce == afl->current_entry &&
@ -384,16 +385,16 @@ void maybe_update_plot_file(afl_state_t *afl, double bitmap_cvg, double eps) {
/* Fields in the file:
unix_time, afl->cycles_done, cur_path, paths_total, paths_not_fuzzed,
favored_not_fuzzed, afl->unique_crashes, afl->unique_hangs, afl->max_depth,
execs_per_sec */
favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
execs_per_sec, edges_found */
fprintf(
afl->fsrv.plot_file,
"%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f, %llu\n",
get_cur_time() / 1000, afl->queue_cycle - 1, afl->current_entry,
afl->queued_paths, afl->pending_not_fuzzed, afl->pending_favored,
bitmap_cvg, afl->unique_crashes, afl->unique_hangs, afl->max_depth, eps,
afl->plot_prev_ed); /* ignore errors */
fprintf(afl->fsrv.plot_file,
"%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f, %llu, "
"%u\n",
get_cur_time() / 1000, afl->queue_cycle - 1, afl->current_entry,
afl->queued_paths, afl->pending_not_fuzzed, afl->pending_favored,
bitmap_cvg, afl->unique_crashes, afl->unique_hangs, afl->max_depth,
eps, afl->plot_prev_ed, t_bytes); /* ignore errors */
fflush(afl->fsrv.plot_file);
@ -532,7 +533,8 @@ void show_stats(afl_state_t *afl) {
if (cur_ms - afl->stats_last_stats_ms > STATS_UPDATE_SEC * 1000) {
afl->stats_last_stats_ms = cur_ms;
write_stats_file(afl, t_byte_ratio, stab_ratio, afl->stats_avg_exec);
write_stats_file(afl, t_bytes, t_byte_ratio, stab_ratio,
afl->stats_avg_exec);
save_auto(afl);
write_bitmap(afl);
@ -555,7 +557,7 @@ void show_stats(afl_state_t *afl) {
if (cur_ms - afl->stats_last_plot_ms > PLOT_UPDATE_SEC * 1000) {
afl->stats_last_plot_ms = cur_ms;
maybe_update_plot_file(afl, t_byte_ratio, afl->stats_avg_exec);
maybe_update_plot_file(afl, t_bytes, t_byte_ratio, afl->stats_avg_exec);
}
@ -1217,7 +1219,7 @@ void show_init_stats(afl_state_t *afl) {
stringify_int(IB(0), min_us), stringify_int(IB(1), max_us),
stringify_int(IB(2), avg_us));
if (!afl->timeout_given) {
if (afl->timeout_given != 1) {
/* Figure out the appropriate timeout. The basic idea is: 5x average or
1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.

View File

@ -103,7 +103,10 @@ static void usage(u8 *argv0, int more_help) {
" quad -- see docs/power_schedules.md\n"
" -f file - location read by the fuzzed program (default: stdin "
"or @@)\n"
" -t msec - timeout for each run (auto-scaled, 50-%u ms)\n"
" -t msec - timeout for each run (auto-scaled, default %u ms). "
"Add a '+'\n"
" to auto-calculate the timeout, the value being the "
"maximum.\n"
" -m megs - memory limit for child process (%u MB, 0 = no limit "
"[default])\n"
" -Q - use binary-only instrumentation (QEMU mode)\n"
@ -122,10 +125,10 @@ static void usage(u8 *argv0, int more_help) {
" -c program - enable CmpLog by specifying a binary compiled for "
"it.\n"
" if using QEMU, just use -c 0.\n"
" -l cmplog_level - set the complexity/intensivity of CmpLog.\n"
" Values: 1 (basic), 2 (larger files) and 3 "
"(transform)\n\n"
" -l cmplog_opts - CmpLog configuration values (e.g. \"2AT\"):\n"
" 1=small files (default), 2=larger files, 3=all "
"files,\n"
" A=arithmetic solving, T=transformational solving.\n\n"
"Fuzzing behavior settings:\n"
" -Z - sequential queue selection instead of weighted "
"random\n"
@ -137,8 +140,8 @@ static void usage(u8 *argv0, int more_help) {
"Testing settings:\n"
" -s seed - use a fixed seed for the RNG\n"
" -V seconds - fuzz for a specific time then terminate\n"
" -E execs - fuzz for a approx. no of total executions then "
" -V seconds - fuzz for a specified time then terminate\n"
" -E execs - fuzz for an approx. no. of total executions then "
"terminate\n"
" Note: not precise and can have several more "
"executions.\n\n"
@ -198,6 +201,7 @@ static void usage(u8 *argv0, int more_help) {
"AFL_FORKSRV_INIT_TMOUT: time spent waiting for forkserver during startup (in milliseconds)\n"
"AFL_HANG_TMOUT: override timeout value (in milliseconds)\n"
"AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES: don't warn about core dump handlers\n"
"AFL_IGNORE_UNKNOWN_ENVS: don't warn on unknown env vars\n"
"AFL_IMPORT_FIRST: sync and import test cases from other fuzzer instances first\n"
"AFL_KILL_SIGNAL: Signal ID delivered to child processes on timeout, etc. (default: SIGKILL)\n"
"AFL_MAP_SIZE: the shared memory size for that target. must be >= the size\n"
@ -552,13 +556,22 @@ int main(int argc, char **argv_orig, char **envp) {
case 'F': /* foreign sync dir */
if (!afl->is_main_node)
if (!optarg) { FATAL("Missing path for -F"); }
if (!afl->is_main_node) {
FATAL(
"Option -F can only be specified after the -M option for the "
"main fuzzer of a fuzzing campaign");
if (afl->foreign_sync_cnt >= FOREIGN_SYNCS_MAX)
}
if (afl->foreign_sync_cnt >= FOREIGN_SYNCS_MAX) {
FATAL("Maximum %u entried of -F option can be specified",
FOREIGN_SYNCS_MAX);
}
afl->foreign_syncs[afl->foreign_sync_cnt].dir = optarg;
while (afl->foreign_syncs[afl->foreign_sync_cnt]
.dir[strlen(afl->foreign_syncs[afl->foreign_sync_cnt].dir) -
@ -802,13 +815,36 @@ int main(int argc, char **argv_orig, char **envp) {
case 'l': {
afl->cmplog_lvl = atoi(optarg);
if (afl->cmplog_lvl < 1 || afl->cmplog_lvl > CMPLOG_LVL_MAX) {
if (!optarg) { FATAL("missing parameter for 'l'"); }
char *c = optarg;
while (*c) {
FATAL(
"Bad complog level value, accepted values are 1 (default), 2 and "
"%u.",
CMPLOG_LVL_MAX);
switch (*c) {
case '0':
case '1':
afl->cmplog_lvl = 1;
break;
case '2':
afl->cmplog_lvl = 2;
break;
case '3':
afl->cmplog_lvl = 3;
break;
case 'a':
case 'A':
afl->cmplog_enable_arith = 1;
break;
case 't':
case 'T':
afl->cmplog_enable_transform = 1;
break;
default:
FATAL("Unknown option value '%c' in -l %s", *c, optarg);
}
++c;
}
@ -1418,7 +1454,7 @@ int main(int argc, char **argv_orig, char **envp) {
}
if (!afl->timeout_given) { find_timeout(afl); }
if (!afl->timeout_given) { find_timeout(afl); } // only for resumes!
if ((afl->tmp_dir = afl->afl_env.afl_tmpdir) != NULL &&
!afl->in_place_resume) {
@ -1672,20 +1708,49 @@ int main(int argc, char **argv_orig, char **envp) {
cull_queue(afl);
if (!afl->pending_not_fuzzed) {
// ensure we have at least one seed that is not disabled.
u32 entry, valid_seeds = 0;
for (entry = 0; entry < afl->queued_paths; ++entry)
if (!afl->queue_buf[entry]->disabled) { ++valid_seeds; }
if (!afl->pending_not_fuzzed || !valid_seeds) {
FATAL("We need at least one valid input seed that does not crash!");
}
if (afl->timeout_given == 2) { // -t ...+ option
if (valid_seeds == 1) {
WARNF(
"Only one valid seed is present, auto-calculating the timeout is "
"disabled!");
afl->timeout_given = 1;
} else {
u64 max_ms = 0;
for (entry = 0; entry < afl->queued_paths; ++entry)
if (!afl->queue_buf[entry]->disabled)
if (afl->queue_buf[entry]->exec_us > max_ms)
max_ms = afl->queue_buf[entry]->exec_us;
afl->fsrv.exec_tmout = max_ms;
}
}
show_init_stats(afl);
if (unlikely(afl->old_seed_selection)) seek_to = find_start_position(afl);
afl->start_time = get_cur_time();
if (afl->in_place_resume || afl->afl_env.afl_autoresume) load_stats_file(afl);
write_stats_file(afl, 0, 0, 0);
maybe_update_plot_file(afl, 0, 0);
write_stats_file(afl, 0, 0, 0, 0);
maybe_update_plot_file(afl, 0, 0, 0);
save_auto(afl);
if (afl->stop_soon) { goto stop_fuzzing; }
@ -1735,12 +1800,15 @@ int main(int argc, char **argv_orig, char **envp) {
if (unlikely(afl->old_seed_selection)) {
afl->current_entry = 0;
while (unlikely(afl->queue_buf[afl->current_entry]->disabled)) {
while (unlikely(afl->current_entry < afl->queued_paths &&
afl->queue_buf[afl->current_entry]->disabled)) {
++afl->current_entry;
}
if (afl->current_entry >= afl->queued_paths) { afl->current_entry = 0; }
afl->queue_cur = afl->queue_buf[afl->current_entry];
if (unlikely(seek_to)) {
@ -1943,15 +2011,24 @@ int main(int argc, char **argv_orig, char **envp) {
if (unlikely(afl->is_main_node)) {
if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) {
if (unlikely(get_cur_time() >
(SYNC_TIME >> 1) + afl->last_sync_time)) {
sync_fuzzers(afl);
if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) {
sync_fuzzers(afl);
}
}
} else {
if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); }
if (unlikely(get_cur_time() > SYNC_TIME + afl->last_sync_time)) {
if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); }
}
}
@ -1966,12 +2043,12 @@ int main(int argc, char **argv_orig, char **envp) {
}
write_bitmap(afl);
maybe_update_plot_file(afl, 0, 0);
maybe_update_plot_file(afl, 0, 0, 0);
save_auto(afl);
stop_fuzzing:
write_stats_file(afl, 0, 0, 0);
write_stats_file(afl, 0, 0, 0, 0);
afl->force_ui_update = 1; // ensure the screen is reprinted
show_stats(afl); // print the screen one last time

View File

@ -1013,7 +1013,6 @@ int main(int argc, char **argv_orig, char **envp) {
if (in_dir) {
if (at_file) { PFATAL("Options -A and -i are mutually exclusive"); }
detect_file_args(argv + optind, "", &fsrv->use_stdin);
} else {
@ -1169,8 +1168,9 @@ int main(int argc, char **argv_orig, char **envp) {
}
stdin_file =
alloc_printf("%s/.afl-showmap-temp-%u", use_dir, (u32)getpid());
stdin_file = at_file ? strdup(at_file)
: (char *)alloc_printf("%s/.afl-showmap-temp-%u",
use_dir, (u32)getpid());
unlink(stdin_file);
atexit(at_exit_handler);
fsrv->out_file = stdin_file;

View File

@ -7,7 +7,7 @@ AFL_GCC=afl-gcc
$ECHO "$BLUE[*] Testing: ${AFL_GCC}, afl-showmap, afl-fuzz, afl-cmin and afl-tmin"
test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc" -o "$SYS" = "i386" && {
test -e ../${AFL_GCC} -a -e ../afl-showmap -a -e ../afl-fuzz && {
../${AFL_GCC} -o test-instr.plain ../test-instr.c > /dev/null 2>&1
../${AFL_GCC} -o test-instr.plain -O0 ../test-instr.c > /dev/null 2>&1
AFL_HARDEN=1 ../${AFL_GCC} -o test-compcov.harden test-compcov.c > /dev/null 2>&1
test -e test-instr.plain && {
$ECHO "$GREEN[+] ${AFL_GCC} compilation succeeded"
@ -39,7 +39,7 @@ test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc
$ECHO "$RED[!] ${AFL_GCC} failed"
echo CUT------------------------------------------------------------------CUT
uname -a
../${AFL_GCC} -o test-instr.plain ../test-instr.c
../${AFL_GCC} -o test-instr.plain -O0 ../test-instr.c
echo CUT------------------------------------------------------------------CUT
CODE=1
}
@ -128,7 +128,7 @@ test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc
$ECHO "$BLUE[*] Testing: ${AFL_GCC}, afl-showmap, afl-fuzz, afl-cmin and afl-tmin"
SKIP=
test -e ../${AFL_GCC} -a -e ../afl-showmap -a -e ../afl-fuzz && {
../${AFL_GCC} -o test-instr.plain ../test-instr.c > /dev/null 2>&1
../${AFL_GCC} -o test-instr.plain -O0 ../test-instr.c > /dev/null 2>&1
AFL_HARDEN=1 ../${AFL_GCC} -o test-compcov.harden test-compcov.c > /dev/null 2>&1
test -e test-instr.plain && {
$ECHO "$GREEN[+] ${AFL_GCC} compilation succeeded"

View File

@ -39,14 +39,7 @@ test -e ../afl-qemu-trace && {
$ECHO "$GREY[*] running afl-fuzz for qemu_mode AFL_ENTRYPOINT, this will take approx 6 seconds"
{
{
if file test-instr | grep -q "32-bit"; then
# for 32-bit reduce 8 nibbles to the lower 7 nibbles
ADDR_LOWER_PART=`nm test-instr | grep "T main" | awk '{print $1}' | sed 's/^.//'`
else
# for 64-bit reduce 16 nibbles to the lower 9 nibbles
ADDR_LOWER_PART=`nm test-instr | grep "T main" | awk '{print $1}' | sed 's/^.......//'`
fi
export AFL_ENTRYPOINT=`expr 0x4${ADDR_LOWER_PART}`
export AFL_ENTRYPOINT=`printf 1 | AFL_DEBUG=1 ../afl-qemu-trace ./test-instr 2>&1 >/dev/null | awk '/forkserver/{print $4; exit}'`
$ECHO AFL_ENTRYPOINT=$AFL_ENTRYPOINT - $(nm test-instr | grep "T main") - $(file ./test-instr)
../afl-fuzz -m ${MEM_LIMIT} -V2 -Q -i in -o out -- ./test-instr
unset AFL_ENTRYPOINT

View File

@ -14,7 +14,7 @@ test -d ../unicorn_mode/unicornafl -a -e ../unicorn_mode/unicornafl/samples/shel
EASY_INSTALL_FOUND=0
for PYTHON in $PYTHONS ; do
if $PYTHON -c "help('easy_install');" </dev/null | grep -q module ; then
if $PYTHON -c "import setuptools" ; then
EASY_INSTALL_FOUND=1
PY=$PYTHON

View File

@ -8,19 +8,19 @@ The CompareCoverage and NeverZero counters features are by Andrea Fioraldi <andr
## 1) Introduction
The code in ./unicorn_mode allows you to build a standalone feature that
leverages the Unicorn Engine and allows callers to obtain instrumentation
The code in ./unicorn_mode allows you to build the (Unicorn Engine)[https://github.com/unicorn-engine/unicorn] with afl support.
This means, you can run anything that can be emulated in unicorn and obtain instrumentation
output for black-box, closed-source binary code snippets. This mechanism
can be then used by afl-fuzz to stress-test targets that couldn't be built
with afl-gcc or used in QEMU mode, or with other extensions such as
TriforceAFL.
with afl-cc or used in QEMU mode.
There is a significant performance penalty compared to native AFL,
but at least we're able to use AFL++ on these binaries, right?
## 2) How to use
Requirements: you need an installed python environment.
First, you will need a working harness for your target in unicorn, using Python, C, or Rust.
For some pointers for more advanced emulation, take a look at [BaseSAFE](https://github.com/fgsect/BaseSAFE) and [Qiling](https://github.com/qilingframework/qiling).
### Building AFL++'s Unicorn Mode
@ -34,23 +34,23 @@ cd unicorn_mode
```
NOTE: This script checks out a Unicorn Engine fork as submodule that has been tested
and is stable-ish, based on the unicorn engine master.
and is stable-ish, based on the unicorn engine `next` branch.
Building Unicorn will take a little bit (~5-10 minutes). Once it completes
it automatically compiles a sample application and verifies that it works.
### Fuzzing with Unicorn Mode
To really use unicorn-mode effectively you need to prepare the following:
To use unicorn-mode effectively you need to prepare the following:
* Relevant binary code to be fuzzed
* Knowledge of the memory map and good starting state
* Folder containing sample inputs to start fuzzing with
+ Same ideas as any other AFL inputs
+ Quality/speed of results will depend greatly on quality of starting
+ Quality/speed of results will depend greatly on the quality of starting
samples
+ See AFL's guidance on how to create a sample corpus
* Unicornafl-based test harness which:
* Unicornafl-based test harness in Rust, C, or Python, which:
+ Adds memory map regions
+ Loads binary code into memory
+ Calls uc.afl_fuzz() / uc.afl_start_forkserver
@ -59,13 +59,13 @@ To really use unicorn-mode effectively you need to prepare the following:
the test harness
+ Presumably the data to be fuzzed is at a fixed buffer address
+ If input constraints (size, invalid bytes, etc.) are known they
should be checked after the file is loaded. If a constraint
fails, just exit the test harness. AFL will treat the input as
should be checked in the place_input handler. If a constraint
fails, just return false from the handler. AFL will treat the input as
'uninteresting' and move on.
+ Sets up registers and memory state for beginning of test
+ Emulates the interested code from beginning to end
+ Emulates the interesting code from beginning to end
+ If a crash is detected, the test harness must 'crash' by
throwing a signal (SIGSEGV, SIGKILL, SIGABORT, etc.)
throwing a signal (SIGSEGV, SIGKILL, SIGABORT, etc.), or indicate a crash in the crash validation callback.
Once you have all those things ready to go you just need to run afl-fuzz in
'unicorn-mode' by passing in the '-U' flag:
@ -79,11 +79,12 @@ AFL's main documentation for more info about how to use afl-fuzz effectively.
For a much clearer vision of what all of this looks like, please refer to the
sample provided in the 'unicorn_mode/samples' directory. There is also a blog
post that goes over the basics at:
post that uses slightly older concepts, but describes the general ideas, at:
[https://medium.com/@njvoss299/afl-unicorn-fuzzing-arbitrary-binary-code-563ca28936bf](https://medium.com/@njvoss299/afl-unicorn-fuzzing-arbitrary-binary-code-563ca28936bf)
The 'helper_scripts' directory also contains several helper scripts that allow you
The ['helper_scripts'](./helper_scripts) directory also contains several helper scripts that allow you
to dump context from a running process, load it, and hook heap allocations. For details
on how to use this check out the follow-up blog post to the one linked above.
@ -92,10 +93,10 @@ A example use of AFL-Unicorn mode is discussed in the paper Unicorefuzz:
## 3) Options
As for the QEMU-based instrumentation, the afl-unicorn twist of afl++
comes with a sub-instruction based instrumentation similar in purpose to laf-intel.
As for the QEMU-based instrumentation, unicornafl comes with a sub-instruction based instrumentation similar in purpose to laf-intel.
The options that enable Unicorn CompareCoverage are the same used for QEMU.
This will split up each multi-byte compare to give feedback for each correct byte.
AFL_COMPCOV_LEVEL=1 is to instrument comparisons with only immediate values.
AFL_COMPCOV_LEVEL=2 instruments all comparison instructions.
@ -119,6 +120,20 @@ unicornafl.monkeypatch()
This will replace all unicorn imports with unicornafl inputs.
Refer to the [samples/arm_example/arm_tester.c](samples/arm_example/arm_tester.c) for an example
of how to do this properly! If you don't get this right, AFL will not
load any mutated inputs and your fuzzing will be useless!
5) Examples
Apart from reading the documentation in `afl.c` and the python bindings of unicornafl, the best documentation are the [samples/](./samples).
The following examples exist at the time of writing:
- c: A simple example how to use the c bindings
- compcov_x64: A python example that uses compcov to traverse hard-to-reach blocks
- persistent: A c example using persistent mode for maximum speed, and resetting the target state between each iteration
- simple: A simple python example
- speedtest/c: The c harness for an example target, used to compare c, python, and rust bindings and fix speed issues
- speedtest/python: Fuzzing the same target in python
- speedtest/rust: Fuzzing the same target using a rust harness
Usually, the place to look at is the `harness` in each folder. The source code in each harness is pretty well documented.
Most harnesses also have the `afl-fuzz` commandline, or even offer a `make fuzz` Makefile target.
Targets in these folders, if x86, can usually be made using `make target` in each folder or get shipped pre-built (plus their source).
Especially take a look at the [speedtest documentation](./samples/speedtest/README.md) to see how the languages compare.

View File

@ -117,19 +117,19 @@ done
# some python version should be available now
PYTHONS="`command -v python3` `command -v python` `command -v python2`"
EASY_INSTALL_FOUND=0
SETUPTOOLS_FOUND=0
for PYTHON in $PYTHONS ; do
if $PYTHON -c "import setuptools" ; then
EASY_INSTALL_FOUND=1
SETUPTOOLS_FOUND=1
PYTHONBIN=$PYTHON
break
fi
done
if [ "0" = $EASY_INSTALL_FOUND ]; then
if [ "0" = $SETUPTOOLS_FOUND ]; then
echo "[-] Error: Python setup-tools not found. Run 'sudo apt-get install python-setuptools', or install python3-setuptools, or run '$PYTHONBIN -m ensurepip', or create a virtualenv, or ..."
PREREQ_NOTFOUND=1

View File

@ -45,30 +45,31 @@ MAX_SEG_SIZE = 128 * 1024 * 1024
INDEX_FILE_NAME = "_index.json"
#----------------------
#---- Helper Functions
# ----------------------
# ---- Helper Functions
def map_arch():
arch = get_arch() # from GEF
if 'x86_64' in arch or 'x86-64' in arch:
arch = get_arch() # from GEF
if "x86_64" in arch or "x86-64" in arch:
return "x64"
elif 'x86' in arch or 'i386' in arch:
elif "x86" in arch or "i386" in arch:
return "x86"
elif 'aarch64' in arch or 'arm64' in arch:
elif "aarch64" in arch or "arm64" in arch:
return "arm64le"
elif 'aarch64_be' in arch:
elif "aarch64_be" in arch:
return "arm64be"
elif 'armeb' in arch:
elif "armeb" in arch:
# check for THUMB mode
cpsr = get_register('$cpsr')
if (cpsr & (1 << 5)):
cpsr = get_register("$cpsr")
if cpsr & (1 << 5):
return "armbethumb"
else:
return "armbe"
elif 'arm' in arch:
elif "arm" in arch:
# check for THUMB mode
cpsr = get_register('$cpsr')
if (cpsr & (1 << 5)):
cpsr = get_register("$cpsr")
if cpsr & (1 << 5):
return "armlethumb"
else:
return "armle"
@ -76,8 +77,9 @@ def map_arch():
return ""
#-----------------------
#---- Dumping functions
# -----------------------
# ---- Dumping functions
def dump_arch_info():
arch_info = {}
@ -89,7 +91,7 @@ def dump_regs():
reg_state = {}
for reg in current_arch.all_registers:
reg_val = get_register(reg)
reg_state[reg.strip().strip('$')] = reg_val
reg_state[reg.strip().strip("$")] = reg_val
return reg_state
@ -108,47 +110,76 @@ def dump_process_memory(output_dir):
if entry.page_start == entry.page_end:
continue
seg_info = {'start': entry.page_start, 'end': entry.page_end, 'name': entry.path, 'permissions': {
"r": entry.is_readable() > 0,
"w": entry.is_writable() > 0,
"x": entry.is_executable() > 0
}, 'content_file': ''}
seg_info = {
"start": entry.page_start,
"end": entry.page_end,
"name": entry.path,
"permissions": {
"r": entry.is_readable() > 0,
"w": entry.is_writable() > 0,
"x": entry.is_executable() > 0,
},
"content_file": "",
}
# "(deleted)" may or may not be valid, but don't push it.
if entry.is_readable() and not '(deleted)' in entry.path:
if entry.is_readable() and not "(deleted)" in entry.path:
try:
# Compress and dump the content to a file
seg_content = read_memory(entry.page_start, entry.size)
if(seg_content == None):
print("Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(entry.page_start, entry.path))
if seg_content == None:
print(
"Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(
entry.page_start, entry.path
)
)
else:
print("Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(entry.page_start, len(seg_content), entry.path, repr(seg_info['permissions'])))
print(
"Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(
entry.page_start,
len(seg_content),
entry.path,
repr(seg_info["permissions"]),
)
)
compressed_seg_content = zlib.compress(seg_content)
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
seg_info["content_file"] = md5_sum
# Write the compressed contents to disk
out_file = open(os.path.join(output_dir, md5_sum), 'wb')
out_file = open(os.path.join(output_dir, md5_sum), "wb")
out_file.write(compressed_seg_content)
out_file.close()
except:
print("Exception reading segment ({}): {}".format(entry.path, sys.exc_info()[0]))
print(
"Exception reading segment ({}): {}".format(
entry.path, sys.exc_info()[0]
)
)
else:
print("Skipping segment {0}@0x{1:016x}".format(entry.path, entry.page_start))
print(
"Skipping segment {0}@0x{1:016x}".format(entry.path, entry.page_start)
)
# Add the segment to the list
final_segment_list.append(seg_info)
return final_segment_list
#---------------------------------------------
#---- ARM Extention (dump floating point regs)
# ---------------------------------------------
# ---- ARM Extention (dump floating point regs)
def dump_float(rge=32):
reg_convert = ""
if map_arch() == "armbe" or map_arch() == "armle" or map_arch() == "armbethumb" or map_arch() == "armbethumb":
if (
map_arch() == "armbe"
or map_arch() == "armle"
or map_arch() == "armbethumb"
or map_arch() == "armbethumb"
):
reg_state = {}
for reg_num in range(32):
value = gdb.selected_frame().read_register("d" + str(reg_num))
@ -158,8 +189,10 @@ def dump_float(rge=32):
return reg_state
#----------
#---- Main
# ----------
# ---- Main
def main():
print("----- Unicorn Context Dumper -----")
@ -175,7 +208,9 @@ def main():
try:
# Create the output directory
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime(
"%Y%m%d_%H%M%S"
)
output_path = "UnicornContext_" + timestamp
if not os.path.exists(output_path):
os.makedirs(output_path)
@ -190,7 +225,7 @@ def main():
}
# Write the index file
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), 'w')
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), "w")
index_file.write(json.dumps(context, indent=4))
index_file.close()
print("Done.")
@ -198,5 +233,6 @@ def main():
except Exception as e:
print("!!! ERROR:\n\t{}".format(repr(e)))
if __name__ == "__main__":
main()

View File

@ -31,8 +31,9 @@ MAX_SEG_SIZE = 128 * 1024 * 1024
# Name of the index file
INDEX_FILE_NAME = "_index.json"
#----------------------
#---- Helper Functions
# ----------------------
# ---- Helper Functions
def get_arch():
if ph.id == PLFM_386 and ph.flag & PR_USE64:
@ -52,6 +53,7 @@ def get_arch():
else:
return ""
def get_register_list(arch):
if arch == "arm64le" or arch == "arm64be":
arch = "arm64"
@ -59,84 +61,174 @@ def get_register_list(arch):
arch = "arm"
registers = {
"x64" : [
"rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"rip", "rsp", "efl",
"cs", "ds", "es", "fs", "gs", "ss",
"x64": [
"rax",
"rbx",
"rcx",
"rdx",
"rsi",
"rdi",
"rbp",
"rsp",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"r14",
"r15",
"rip",
"rsp",
"efl",
"cs",
"ds",
"es",
"fs",
"gs",
"ss",
],
"x86" : [
"eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
"eip", "esp", "efl",
"cs", "ds", "es", "fs", "gs", "ss",
],
"arm" : [
"R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
"R8", "R9", "R10", "R11", "R12", "PC", "SP", "LR",
"x86": [
"eax",
"ebx",
"ecx",
"edx",
"esi",
"edi",
"ebp",
"esp",
"eip",
"esp",
"efl",
"cs",
"ds",
"es",
"fs",
"gs",
"ss",
],
"arm": [
"R0",
"R1",
"R2",
"R3",
"R4",
"R5",
"R6",
"R7",
"R8",
"R9",
"R10",
"R11",
"R12",
"PC",
"SP",
"LR",
"PSR",
],
"arm64" : [
"X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7",
"X8", "X9", "X10", "X11", "X12", "X13", "X14",
"X15", "X16", "X17", "X18", "X19", "X20", "X21",
"X22", "X23", "X24", "X25", "X26", "X27", "X28",
"PC", "SP", "FP", "LR", "CPSR"
"arm64": [
"X0",
"X1",
"X2",
"X3",
"X4",
"X5",
"X6",
"X7",
"X8",
"X9",
"X10",
"X11",
"X12",
"X13",
"X14",
"X15",
"X16",
"X17",
"X18",
"X19",
"X20",
"X21",
"X22",
"X23",
"X24",
"X25",
"X26",
"X27",
"X28",
"PC",
"SP",
"FP",
"LR",
"CPSR"
# "NZCV",
]
],
}
return registers[arch]
return registers[arch]
# -----------------------
# ---- Dumping functions
#-----------------------
#---- Dumping functions
def dump_arch_info():
arch_info = {}
arch_info["arch"] = get_arch()
return arch_info
def dump_regs():
reg_state = {}
for reg in get_register_list(get_arch()):
reg_state[reg] = GetRegValue(reg)
return reg_state
def dump_process_memory(output_dir):
# Segment information dictionary
segment_list = []
# Loop over the segments, fill in the info dictionary
for seg_ea in Segments():
seg_start = SegStart(seg_ea)
seg_end = SegEnd(seg_ea)
seg_size = seg_end - seg_start
seg_info = {}
seg_info["name"] = SegName(seg_ea)
seg_info["name"] = SegName(seg_ea)
seg_info["start"] = seg_start
seg_info["end"] = seg_end
seg_info["end"] = seg_end
perms = getseg(seg_ea).perm
seg_info["permissions"] = {
"r": False if (perms & SEGPERM_READ) == 0 else True,
"r": False if (perms & SEGPERM_READ) == 0 else True,
"w": False if (perms & SEGPERM_WRITE) == 0 else True,
"x": False if (perms & SEGPERM_EXEC) == 0 else True,
"x": False if (perms & SEGPERM_EXEC) == 0 else True,
}
if (perms & SEGPERM_READ) and seg_size <= MAX_SEG_SIZE and isLoaded(seg_start):
try:
# Compress and dump the content to a file
seg_content = get_many_bytes(seg_start, seg_end - seg_start)
if(seg_content == None):
print("Segment empty: {0}@0x{1:016x} (size:UNKNOWN)".format(SegName(seg_ea), seg_ea))
if seg_content == None:
print(
"Segment empty: {0}@0x{1:016x} (size:UNKNOWN)".format(
SegName(seg_ea), seg_ea
)
)
seg_info["content_file"] = ""
else:
print("Dumping segment {0}@0x{1:016x} (size:{2})".format(SegName(seg_ea), seg_ea, len(seg_content)))
print(
"Dumping segment {0}@0x{1:016x} (size:{2})".format(
SegName(seg_ea), seg_ea, len(seg_content)
)
)
compressed_seg_content = zlib.compress(seg_content)
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
seg_info["content_file"] = md5_sum
# Write the compressed contents to disk
out_file = open(os.path.join(output_dir, md5_sum), 'wb')
out_file = open(os.path.join(output_dir, md5_sum), "wb")
out_file.write(compressed_seg_content)
out_file.close()
except:
@ -145,12 +237,13 @@ def dump_process_memory(output_dir):
else:
print("Skipping segment {0}@0x{1:016x}".format(SegName(seg_ea), seg_ea))
seg_info["content_file"] = ""
# Add the segment to the list
segment_list.append(seg_info)
segment_list.append(seg_info)
return segment_list
"""
TODO: FINISH IMPORT DUMPING
def import_callback(ea, name, ord):
@ -169,41 +262,47 @@ def dump_imports():
return import_dict
"""
#----------
#---- Main
# ----------
# ---- Main
def main():
try:
print("----- Unicorn Context Dumper -----")
print("You must be actively debugging before running this!")
print("If it fails, double check that you are actively debugging before running.")
print(
"If it fails, double check that you are actively debugging before running."
)
# Create the output directory
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime(
"%Y%m%d_%H%M%S"
)
output_path = os.path.dirname(os.path.abspath(GetIdbPath()))
output_path = os.path.join(output_path, "UnicornContext_" + timestamp)
if not os.path.exists(output_path):
os.makedirs(output_path)
print("Process context will be output to {}".format(output_path))
# Get the context
context = {
"arch": dump_arch_info(),
"regs": dump_regs(),
"regs": dump_regs(),
"segments": dump_process_memory(output_path),
#"imports": dump_imports(),
# "imports": dump_imports(),
}
# Write the index file
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), 'w')
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), "w")
index_file.write(json.dumps(context, indent=4))
index_file.close()
index_file.close()
print("Done.")
except Exception, e:
print("!!! ERROR:\n\t{}".format(str(e)))
if __name__ == "__main__":
main()
main()

View File

@ -50,10 +50,11 @@ UNICORN_PAGE_SIZE = 0x1000
# Alignment functions to align all memory segments to Unicorn page boundaries (4KB pages only)
ALIGN_PAGE_DOWN = lambda x: x & ~(UNICORN_PAGE_SIZE - 1)
ALIGN_PAGE_UP = lambda x: (x + UNICORN_PAGE_SIZE - 1) & ~(UNICORN_PAGE_SIZE-1)
ALIGN_PAGE_UP = lambda x: (x + UNICORN_PAGE_SIZE - 1) & ~(UNICORN_PAGE_SIZE - 1)
# ----------------------
# ---- Helper Functions
#----------------------
#---- Helper Functions
def overlap_alignments(segments, memory):
final_list = []
@ -61,33 +62,40 @@ def overlap_alignments(segments, memory):
curr_end_addr = 0
curr_node = None
current_segment = None
sorted_segments = sorted(segments, key=lambda k: (k['start'], k['end']))
sorted_segments = sorted(segments, key=lambda k: (k["start"], k["end"]))
if curr_seg_idx < len(sorted_segments):
current_segment = sorted_segments[curr_seg_idx]
for mem in sorted(memory, key=lambda k: (k['start'], -k['end'])):
for mem in sorted(memory, key=lambda k: (k["start"], -k["end"])):
if curr_node is None:
if current_segment is not None and current_segment['start'] == mem['start']:
if current_segment is not None and current_segment["start"] == mem["start"]:
curr_node = deepcopy(current_segment)
curr_node['permissions'] = mem['permissions']
curr_node["permissions"] = mem["permissions"]
else:
curr_node = deepcopy(mem)
curr_end_addr = curr_node['end']
curr_end_addr = curr_node["end"]
while curr_end_addr <= mem['end']:
if curr_node['end'] == mem['end']:
if current_segment is not None and current_segment['start'] > curr_node['start'] and current_segment['start'] < curr_node['end']:
curr_node['end'] = current_segment['start']
if(curr_node['end'] > curr_node['start']):
while curr_end_addr <= mem["end"]:
if curr_node["end"] == mem["end"]:
if (
current_segment is not None
and current_segment["start"] > curr_node["start"]
and current_segment["start"] < curr_node["end"]
):
curr_node["end"] = current_segment["start"]
if curr_node["end"] > curr_node["start"]:
final_list.append(curr_node)
curr_node = deepcopy(current_segment)
curr_node['permissions'] = mem['permissions']
curr_end_addr = curr_node['end']
curr_node["permissions"] = mem["permissions"]
curr_end_addr = curr_node["end"]
else:
if(curr_node['end'] > curr_node['start']):
if curr_node["end"] > curr_node["start"]:
final_list.append(curr_node)
# if curr_node is a segment
if current_segment is not None and current_segment['end'] == mem['end']:
if (
current_segment is not None
and current_segment["end"] == mem["end"]
):
curr_seg_idx += 1
if curr_seg_idx < len(sorted_segments):
current_segment = sorted_segments[curr_seg_idx]
@ -98,50 +106,56 @@ def overlap_alignments(segments, memory):
break
# could only be a segment
else:
if curr_node['end'] < mem['end']:
if curr_node["end"] < mem["end"]:
# check for remaining segments and valid segments
if(curr_node['end'] > curr_node['start']):
if curr_node["end"] > curr_node["start"]:
final_list.append(curr_node)
curr_seg_idx += 1
if curr_seg_idx < len(sorted_segments):
current_segment = sorted_segments[curr_seg_idx]
else:
current_segment = None
if current_segment is not None and current_segment['start'] <= curr_end_addr and current_segment['start'] < mem['end']:
if (
current_segment is not None
and current_segment["start"] <= curr_end_addr
and current_segment["start"] < mem["end"]
):
curr_node = deepcopy(current_segment)
curr_node['permissions'] = mem['permissions']
curr_node["permissions"] = mem["permissions"]
else:
# no more segments
curr_node = deepcopy(mem)
curr_node['start'] = curr_end_addr
curr_end_addr = curr_node['end']
return final_list
curr_node["start"] = curr_end_addr
curr_end_addr = curr_node["end"]
return final_list
# https://github.com/llvm-mirror/llvm/blob/master/include/llvm/ADT/Triple.h
def get_arch():
arch, arch_vendor, arch_os = lldb.target.GetTriple().split('-')
if arch == 'x86_64':
arch, arch_vendor, arch_os = lldb.target.GetTriple().split("-")
if arch == "x86_64":
return "x64"
elif arch == 'x86' or arch == 'i386':
elif arch == "x86" or arch == "i386":
return "x86"
elif arch == 'aarch64' or arch == 'arm64':
elif arch == "aarch64" or arch == "arm64":
return "arm64le"
elif arch == 'aarch64_be':
elif arch == "aarch64_be":
return "arm64be"
elif arch == 'armeb':
elif arch == "armeb":
return "armbe"
elif arch == 'arm':
elif arch == "arm":
return "armle"
else:
return ""
#-----------------------
#---- Dumping functions
# -----------------------
# ---- Dumping functions
def dump_arch_info():
arch_info = {}
@ -152,56 +166,64 @@ def dump_arch_info():
def dump_regs():
reg_state = {}
for reg_list in lldb.frame.GetRegisters():
if 'general purpose registers' in reg_list.GetName().lower():
if "general purpose registers" in reg_list.GetName().lower():
for reg in reg_list:
reg_state[reg.GetName()] = int(reg.GetValue(), 16)
return reg_state
def get_section_info(sec):
name = sec.name if sec.name is not None else ''
name = sec.name if sec.name is not None else ""
if sec.GetParent().name is not None:
name = sec.GetParent().name + '.' + sec.name
name = sec.GetParent().name + "." + sec.name
module_name = sec.addr.module.file.GetFilename()
module_name = module_name if module_name is not None else ''
long_name = module_name + '.' + name
module_name = module_name if module_name is not None else ""
long_name = module_name + "." + name
return sec.addr.load_addr, (sec.addr.load_addr + sec.size), sec.size, long_name
def dump_process_memory(output_dir):
# Segment information dictionary
raw_segment_list = []
raw_memory_list = []
# 1st pass:
# Loop over the segments, fill in the segment info dictionary
for module in lldb.target.module_iter():
for seg_ea in module.section_iter():
seg_info = {'module': module.file.GetFilename() }
seg_info['start'], seg_info['end'], seg_size, seg_info['name'] = get_section_info(seg_ea)
seg_info = {"module": module.file.GetFilename()}
(
seg_info["start"],
seg_info["end"],
seg_size,
seg_info["name"],
) = get_section_info(seg_ea)
# TODO: Ugly hack for -1 LONG address on 32-bit
if seg_info['start'] >= sys.maxint or seg_size <= 0:
print "Throwing away page: {}".format(seg_info['name'])
if seg_info["start"] >= sys.maxint or seg_size <= 0:
print "Throwing away page: {}".format(seg_info["name"])
continue
# Page-align segment
seg_info['start'] = ALIGN_PAGE_DOWN(seg_info['start'])
seg_info['end'] = ALIGN_PAGE_UP(seg_info['end'])
print("Appending: {}".format(seg_info['name']))
seg_info["start"] = ALIGN_PAGE_DOWN(seg_info["start"])
seg_info["end"] = ALIGN_PAGE_UP(seg_info["end"])
print ("Appending: {}".format(seg_info["name"]))
raw_segment_list.append(seg_info)
# Add the stack memory region (just hardcode 0x1000 around the current SP)
sp = lldb.frame.GetSP()
start_sp = ALIGN_PAGE_DOWN(sp)
raw_segment_list.append({'start': start_sp, 'end': start_sp + 0x1000, 'name': 'STACK'})
raw_segment_list.append(
{"start": start_sp, "end": start_sp + 0x1000, "name": "STACK"}
)
# Write the original memory to file for debugging
index_file = open(os.path.join(output_dir, DEBUG_MEM_FILE_NAME), 'w')
index_file = open(os.path.join(output_dir, DEBUG_MEM_FILE_NAME), "w")
index_file.write(json.dumps(raw_segment_list, indent=4))
index_file.close()
index_file.close()
# Loop over raw memory regions
# Loop over raw memory regions
mem_info = lldb.SBMemoryRegionInfo()
start_addr = -1
next_region_addr = 0
@ -218,15 +240,20 @@ def dump_process_memory(output_dir):
end_addr = mem_info.GetRegionEnd()
# Unknown region name
region_name = 'UNKNOWN'
region_name = "UNKNOWN"
# Ignore regions that aren't even mapped
if mem_info.IsMapped() and mem_info.IsReadable():
mem_info_obj = {'start': start_addr, 'end': end_addr, 'name': region_name, 'permissions': {
"r": mem_info.IsReadable(),
"w": mem_info.IsWritable(),
"x": mem_info.IsExecutable()
}}
mem_info_obj = {
"start": start_addr,
"end": end_addr,
"name": region_name,
"permissions": {
"r": mem_info.IsReadable(),
"w": mem_info.IsWritable(),
"x": mem_info.IsExecutable(),
},
}
raw_memory_list.append(mem_info_obj)
@ -234,65 +261,89 @@ def dump_process_memory(output_dir):
for seg_info in final_segment_list:
try:
seg_info['content_file'] = ''
start_addr = seg_info['start']
end_addr = seg_info['end']
region_name = seg_info['name']
seg_info["content_file"] = ""
start_addr = seg_info["start"]
end_addr = seg_info["end"]
region_name = seg_info["name"]
# Compress and dump the content to a file
err = lldb.SBError()
seg_content = lldb.process.ReadMemory(start_addr, end_addr - start_addr, err)
if(seg_content == None):
print("Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(start_addr, region_name))
seg_info['content_file'] = ''
seg_content = lldb.process.ReadMemory(
start_addr, end_addr - start_addr, err
)
if seg_content == None:
print (
"Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(
start_addr, region_name
)
)
seg_info["content_file"] = ""
else:
print("Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(start_addr, len(seg_content), region_name, repr(seg_info['permissions'])))
print (
"Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(
start_addr,
len(seg_content),
region_name,
repr(seg_info["permissions"]),
)
)
compressed_seg_content = zlib.compress(seg_content)
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
seg_info['content_file'] = md5_sum
seg_info["content_file"] = md5_sum
# Write the compressed contents to disk
out_file = open(os.path.join(output_dir, md5_sum), 'wb')
out_file = open(os.path.join(output_dir, md5_sum), "wb")
out_file.write(compressed_seg_content)
out_file.close()
except:
print("Exception reading segment ({}): {}".format(region_name, sys.exc_info()[0]))
print (
"Exception reading segment ({}): {}".format(
region_name, sys.exc_info()[0]
)
)
return final_segment_list
#----------
#---- Main
# ----------
# ---- Main
def main():
try:
print("----- Unicorn Context Dumper -----")
print("You must be actively debugging before running this!")
print("If it fails, double check that you are actively debugging before running.")
print ("----- Unicorn Context Dumper -----")
print ("You must be actively debugging before running this!")
print (
"If it fails, double check that you are actively debugging before running."
)
# Create the output directory
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime(
"%Y%m%d_%H%M%S"
)
output_path = "UnicornContext_" + timestamp
if not os.path.exists(output_path):
os.makedirs(output_path)
print("Process context will be output to {}".format(output_path))
print ("Process context will be output to {}".format(output_path))
# Get the context
context = {
"arch": dump_arch_info(),
"regs": dump_regs(),
"regs": dump_regs(),
"segments": dump_process_memory(output_path),
}
# Write the index file
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), 'w')
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), "w")
index_file.write(json.dumps(context, indent=4))
index_file.close()
print("Done.")
index_file.close()
print ("Done.")
except Exception, e:
print("!!! ERROR:\n\t{}".format(repr(e)))
print ("!!! ERROR:\n\t{}".format(repr(e)))
if __name__ == "__main__":
main()
elif lldb.debugger:

View File

@ -59,45 +59,47 @@ MAX_SEG_SIZE = 128 * 1024 * 1024
# Name of the index file
INDEX_FILE_NAME = "_index.json"
#----------------------
#---- Helper Functions
# ----------------------
# ---- Helper Functions
def map_arch():
arch = pwndbg.arch.current # from PWNDBG
if 'x86_64' in arch or 'x86-64' in arch:
arch = pwndbg.arch.current # from PWNDBG
if "x86_64" in arch or "x86-64" in arch:
return "x64"
elif 'x86' in arch or 'i386' in arch:
elif "x86" in arch or "i386" in arch:
return "x86"
elif 'aarch64' in arch or 'arm64' in arch:
elif "aarch64" in arch or "arm64" in arch:
return "arm64le"
elif 'aarch64_be' in arch:
elif "aarch64_be" in arch:
return "arm64be"
elif 'arm' in arch:
cpsr = pwndbg.regs['cpsr']
# check endianess
if pwndbg.arch.endian == 'big':
elif "arm" in arch:
cpsr = pwndbg.regs["cpsr"]
# check endianess
if pwndbg.arch.endian == "big":
# check for THUMB mode
if (cpsr & (1 << 5)):
if cpsr & (1 << 5):
return "armbethumb"
else:
return "armbe"
else:
# check for THUMB mode
if (cpsr & (1 << 5)):
if cpsr & (1 << 5):
return "armlethumb"
else:
return "armle"
elif 'mips' in arch:
if pwndbg.arch.endian == 'little':
return 'mipsel'
elif "mips" in arch:
if pwndbg.arch.endian == "little":
return "mipsel"
else:
return 'mips'
return "mips"
else:
return ""
#-----------------------
#---- Dumping functions
# -----------------------
# ---- Dumping functions
def dump_arch_info():
arch_info = {}
@ -110,26 +112,26 @@ def dump_regs():
for reg in pwndbg.regs.all:
reg_val = pwndbg.regs[reg]
# current dumper script looks for register values to be hex strings
# reg_str = "0x{:08x}".format(reg_val)
# if "64" in get_arch():
# reg_str = "0x{:016x}".format(reg_val)
# reg_state[reg.strip().strip('$')] = reg_str
reg_state[reg.strip().strip('$')] = reg_val
# reg_str = "0x{:08x}".format(reg_val)
# if "64" in get_arch():
# reg_str = "0x{:016x}".format(reg_val)
# reg_state[reg.strip().strip('$')] = reg_str
reg_state[reg.strip().strip("$")] = reg_val
return reg_state
def dump_process_memory(output_dir):
# Segment information dictionary
final_segment_list = []
# PWNDBG:
vmmap = pwndbg.vmmap.get()
# Pointer to end of last dumped memory segment
segment_last_addr = 0x0;
segment_last_addr = 0x0
start = None
end = None
end = None
if not vmmap:
print("No address mapping information found")
@ -141,86 +143,107 @@ def dump_process_memory(output_dir):
continue
start = entry.start
end = entry.end
end = entry.end
if (segment_last_addr > entry.start): # indicates overlap
if (segment_last_addr > entry.end): # indicates complete overlap, so we skip the segment entirely
if segment_last_addr > entry.start: # indicates overlap
if (
segment_last_addr > entry.end
): # indicates complete overlap, so we skip the segment entirely
continue
else:
else:
start = segment_last_addr
seg_info = {'start': start, 'end': end, 'name': entry.objfile, 'permissions': {
"r": entry.read,
"w": entry.write,
"x": entry.execute
}, 'content_file': ''}
seg_info = {
"start": start,
"end": end,
"name": entry.objfile,
"permissions": {"r": entry.read, "w": entry.write, "x": entry.execute},
"content_file": "",
}
# "(deleted)" may or may not be valid, but don't push it.
if entry.read and not '(deleted)' in entry.objfile:
if entry.read and not "(deleted)" in entry.objfile:
try:
# Compress and dump the content to a file
seg_content = pwndbg.memory.read(start, end - start)
if(seg_content == None):
print("Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(entry.start, entry.objfile))
if seg_content == None:
print(
"Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(
entry.start, entry.objfile
)
)
else:
print("Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(entry.start, len(seg_content), entry.objfile, repr(seg_info['permissions'])))
print(
"Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(
entry.start,
len(seg_content),
entry.objfile,
repr(seg_info["permissions"]),
)
)
compressed_seg_content = zlib.compress(str(seg_content))
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
seg_info["content_file"] = md5_sum
# Write the compressed contents to disk
out_file = open(os.path.join(output_dir, md5_sum), 'wb')
out_file = open(os.path.join(output_dir, md5_sum), "wb")
out_file.write(compressed_seg_content)
out_file.close()
except Exception as e:
traceback.print_exc()
print("Exception reading segment ({}): {}".format(entry.objfile, sys.exc_info()[0]))
print(
"Exception reading segment ({}): {}".format(
entry.objfile, sys.exc_info()[0]
)
)
else:
print("Skipping segment {0}@0x{1:016x}".format(entry.objfile, entry.start))
segment_last_addr = end
# Add the segment to the list
final_segment_list.append(seg_info)
return final_segment_list
#----------
#---- Main
# ----------
# ---- Main
def main():
print("----- Unicorn Context Dumper -----")
print("You must be actively debugging before running this!")
print("If it fails, double check that you are actively debugging before running.")
try:
# Create the output directory
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime(
"%Y%m%d_%H%M%S"
)
output_path = "UnicornContext_" + timestamp
if not os.path.exists(output_path):
os.makedirs(output_path)
print("Process context will be output to {}".format(output_path))
# Get the context
context = {
"arch": dump_arch_info(),
"regs": dump_regs(),
"regs": dump_regs(),
"segments": dump_process_memory(output_path),
}
# Write the index file
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), 'w')
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), "w")
index_file.write(json.dumps(context, indent=4))
index_file.close()
index_file.close()
print("Done.")
except Exception as e:
print("!!! ERROR:\n\t{}".format(repr(e)))
if __name__ == "__main__" and pwndbg_loaded:
main()

View File

@ -17,6 +17,6 @@ You shouldn't need to compile simple_target.c since a X86_64 binary version is
pre-built and shipped in this sample folder. This file documents how the binary
was built in case you want to rebuild it or recompile it for any reason.
The pre-built binary (simple_target_x86_64.bin) was built using -g -O0 in gcc.
The pre-built binary (persistent_target_x86_64) was built using -g -O0 in gcc.
We then load the binary and execute the main function directly.

View File

@ -22,48 +22,81 @@ from unicornafl import *
from unicornafl.x86_const import *
# Path to the file containing the binary to emulate
BINARY_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'compcov_target.bin')
BINARY_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "compcov_target.bin"
)
# Memory map for the code to be tested
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb)
STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen)
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data
try:
# If Capstone is installed then we'll dump disassembly, otherwise just dump the binary.
from capstone import *
cs = Cs(CS_ARCH_X86, CS_MODE_64)
def unicorn_debug_instruction(uc, address, size, user_data):
mem = uc.mem_read(address, size)
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(bytes(mem), size):
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(
bytes(mem), size
):
print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr))
except ImportError:
def unicorn_debug_instruction(uc, address, size, user_data):
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
def unicorn_debug_block(uc, address, size, user_data):
print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
def unicorn_debug_mem_access(uc, access, address, size, value, user_data):
if access == UC_MEM_WRITE:
print(" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
print(
" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
address, size, value
)
)
else:
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data):
if access == UC_MEM_WRITE_UNMAPPED:
print(" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
print(
" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
address, size, value
)
)
else:
print(" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size))
print(
" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size)
)
def main():
parser = argparse.ArgumentParser(description="Test harness for compcov_target.bin")
parser.add_argument('input_file', type=str, help="Path to the file containing the mutated input to load")
parser.add_argument('-t', '--trace', default=False, action="store_true", help="Enables debug tracing")
parser.add_argument(
"input_file",
type=str,
help="Path to the file containing the mutated input to load",
)
parser.add_argument(
"-t",
"--trace",
default=False,
action="store_true",
help="Enables debug tracing",
)
args = parser.parse_args()
# Instantiate a MIPS32 big endian Unicorn Engine instance
@ -73,13 +106,16 @@ def main():
uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction)
uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access)
uc.hook_add(UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID, unicorn_debug_mem_invalid_access)
uc.hook_add(
UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID,
unicorn_debug_mem_invalid_access,
)
#---------------------------------------------------
# ---------------------------------------------------
# Load the binary to emulate and map it into memory
print("Loading data input from {}".format(args.input_file))
binary_file = open(BINARY_FILE, 'rb')
binary_file = open(BINARY_FILE, "rb")
binary_code = binary_file.read()
binary_file.close()
@ -93,11 +129,11 @@ def main():
uc.mem_write(CODE_ADDRESS, binary_code)
# Set the program counter to the start of the code
start_address = CODE_ADDRESS # Address of entry point of main()
end_address = CODE_ADDRESS + 0x55 # Address of last instruction in main()
start_address = CODE_ADDRESS # Address of entry point of main()
end_address = CODE_ADDRESS + 0x55 # Address of last instruction in main()
uc.reg_write(UC_X86_REG_RIP, start_address)
#-----------------
# -----------------
# Setup the stack
uc.mem_map(STACK_ADDRESS, STACK_SIZE)
@ -106,8 +142,7 @@ def main():
# Mapping a location to write our buffer to
uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX)
#-----------------------------------------------
# -----------------------------------------------
# Load the mutated input and map it into memory
def place_input_callback(uc, input, _, data):
@ -121,7 +156,7 @@ def main():
# Write the mutated command into the data buffer
uc.mem_write(DATA_ADDRESS, input)
#------------------------------------------------------------
# ------------------------------------------------------------
# Emulate the code, allowing it to process the mutated input
print("Starting the AFL fuzz")
@ -129,8 +164,9 @@ def main():
input_file=args.input_file,
place_input_callback=place_input_callback,
exits=[end_address],
persistent_iters=1
persistent_iters=1,
)
if __name__ == "__main__":
main()

View File

@ -22,48 +22,81 @@ from unicornafl import *
from unicornafl.mips_const import *
# Path to the file containing the binary to emulate
BINARY_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'simple_target.bin')
BINARY_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "simple_target.bin"
)
# Memory map for the code to be tested
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb)
STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen)
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data
try:
# If Capstone is installed then we'll dump disassembly, otherwise just dump the binary.
from capstone import *
cs = Cs(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN)
def unicorn_debug_instruction(uc, address, size, user_data):
mem = uc.mem_read(address, size)
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(bytes(mem), size):
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(
bytes(mem), size
):
print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr))
except ImportError:
def unicorn_debug_instruction(uc, address, size, user_data):
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
def unicorn_debug_block(uc, address, size, user_data):
print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
def unicorn_debug_mem_access(uc, access, address, size, value, user_data):
if access == UC_MEM_WRITE:
print(" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
print(
" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
address, size, value
)
)
else:
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data):
if access == UC_MEM_WRITE_UNMAPPED:
print(" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
print(
" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
address, size, value
)
)
else:
print(" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size))
print(
" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size)
)
def main():
parser = argparse.ArgumentParser(description="Test harness for simple_target.bin")
parser.add_argument('input_file', type=str, help="Path to the file containing the mutated input to load")
parser.add_argument('-t', '--trace', default=False, action="store_true", help="Enables debug tracing")
parser.add_argument(
"input_file",
type=str,
help="Path to the file containing the mutated input to load",
)
parser.add_argument(
"-t",
"--trace",
default=False,
action="store_true",
help="Enables debug tracing",
)
args = parser.parse_args()
# Instantiate a MIPS32 big endian Unicorn Engine instance
@ -73,13 +106,16 @@ def main():
uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction)
uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access)
uc.hook_add(UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID, unicorn_debug_mem_invalid_access)
uc.hook_add(
UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID,
unicorn_debug_mem_invalid_access,
)
#---------------------------------------------------
# ---------------------------------------------------
# Load the binary to emulate and map it into memory
print("Loading data input from {}".format(args.input_file))
binary_file = open(BINARY_FILE, 'rb')
binary_file = open(BINARY_FILE, "rb")
binary_code = binary_file.read()
binary_file.close()
@ -93,11 +129,11 @@ def main():
uc.mem_write(CODE_ADDRESS, binary_code)
# Set the program counter to the start of the code
start_address = CODE_ADDRESS # Address of entry point of main()
end_address = CODE_ADDRESS + 0xf4 # Address of last instruction in main()
start_address = CODE_ADDRESS # Address of entry point of main()
end_address = CODE_ADDRESS + 0xF4 # Address of last instruction in main()
uc.reg_write(UC_MIPS_REG_PC, start_address)
#-----------------
# -----------------
# Setup the stack
uc.mem_map(STACK_ADDRESS, STACK_SIZE)
@ -106,14 +142,14 @@ def main():
# reserve some space for data
uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX)
#-----------------------------------------------------
# -----------------------------------------------------
# Set up a callback to place input data (do little work here, it's called for every single iteration)
# We did not pass in any data and don't use persistent mode, so we can ignore these params.
# Be sure to check out the docstrings for the uc.afl_* functions.
def place_input_callback(uc, input, persistent_round, data):
# Apply constraints to the mutated input
if len(input) > DATA_SIZE_MAX:
#print("Test input is too long (> {} bytes)")
# print("Test input is too long (> {} bytes)")
return False
# Write the mutated command into the data buffer
@ -122,5 +158,6 @@ def main():
# Start the fuzzer.
uc.afl_fuzz(args.input_file, place_input_callback, [end_address])
if __name__ == "__main__":
main()

View File

@ -25,50 +25,79 @@ from unicornafl import *
from unicornafl.mips_const import *
# Path to the file containing the binary to emulate
BINARY_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'simple_target.bin')
BINARY_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "simple_target.bin"
)
# Memory map for the code to be tested
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb)
STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen)
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data
try:
# If Capstone is installed then we'll dump disassembly, otherwise just dump the binary.
from capstone import *
cs = Cs(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN)
def unicorn_debug_instruction(uc, address, size, user_data):
mem = uc.mem_read(address, size)
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(bytes(mem), size):
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(
bytes(mem), size
):
print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr))
except ImportError:
def unicorn_debug_instruction(uc, address, size, user_data):
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
def unicorn_debug_block(uc, address, size, user_data):
print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
def unicorn_debug_mem_access(uc, access, address, size, value, user_data):
if access == UC_MEM_WRITE:
print(" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
print(
" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
address, size, value
)
)
else:
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data):
if access == UC_MEM_WRITE_UNMAPPED:
print(" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
print(
" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
address, size, value
)
)
else:
print(" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size))
print(
" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size)
)
def force_crash(uc_error):
# This function should be called to indicate to AFL that a crash occurred during emulation.
# Pass in the exception received from Uc.emu_start()
mem_errors = [
UC_ERR_READ_UNMAPPED, UC_ERR_READ_PROT, UC_ERR_READ_UNALIGNED,
UC_ERR_WRITE_UNMAPPED, UC_ERR_WRITE_PROT, UC_ERR_WRITE_UNALIGNED,
UC_ERR_FETCH_UNMAPPED, UC_ERR_FETCH_PROT, UC_ERR_FETCH_UNALIGNED,
UC_ERR_READ_UNMAPPED,
UC_ERR_READ_PROT,
UC_ERR_READ_UNALIGNED,
UC_ERR_WRITE_UNMAPPED,
UC_ERR_WRITE_PROT,
UC_ERR_WRITE_UNALIGNED,
UC_ERR_FETCH_UNMAPPED,
UC_ERR_FETCH_PROT,
UC_ERR_FETCH_UNALIGNED,
]
if uc_error.errno in mem_errors:
# Memory error - throw SIGSEGV
@ -80,11 +109,22 @@ def force_crash(uc_error):
# Not sure what happened - throw SIGABRT
os.kill(os.getpid(), signal.SIGABRT)
def main():
parser = argparse.ArgumentParser(description="Test harness for simple_target.bin")
parser.add_argument('input_file', type=str, help="Path to the file containing the mutated input to load")
parser.add_argument('-d', '--debug', default=False, action="store_true", help="Enables debug tracing")
parser.add_argument(
"input_file",
type=str,
help="Path to the file containing the mutated input to load",
)
parser.add_argument(
"-d",
"--debug",
default=False,
action="store_true",
help="Enables debug tracing",
)
args = parser.parse_args()
# Instantiate a MIPS32 big endian Unicorn Engine instance
@ -94,13 +134,16 @@ def main():
uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction)
uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access)
uc.hook_add(UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID, unicorn_debug_mem_invalid_access)
uc.hook_add(
UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID,
unicorn_debug_mem_invalid_access,
)
#---------------------------------------------------
# ---------------------------------------------------
# Load the binary to emulate and map it into memory
print("Loading data input from {}".format(args.input_file))
binary_file = open(BINARY_FILE, 'rb')
binary_file = open(BINARY_FILE, "rb")
binary_code = binary_file.read()
binary_file.close()
@ -114,11 +157,11 @@ def main():
uc.mem_write(CODE_ADDRESS, binary_code)
# Set the program counter to the start of the code
start_address = CODE_ADDRESS # Address of entry point of main()
end_address = CODE_ADDRESS + 0xf4 # Address of last instruction in main()
start_address = CODE_ADDRESS # Address of entry point of main()
end_address = CODE_ADDRESS + 0xF4 # Address of last instruction in main()
uc.reg_write(UC_MIPS_REG_PC, start_address)
#-----------------
# -----------------
# Setup the stack
uc.mem_map(STACK_ADDRESS, STACK_SIZE)
@ -127,10 +170,10 @@ def main():
# reserve some space for data
uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX)
#-----------------------------------------------------
# -----------------------------------------------------
# Kick off AFL's fork server
# THIS MUST BE DONE BEFORE LOADING USER DATA!
# If this isn't done every single run, the AFL fork server
# THIS MUST BE DONE BEFORE LOADING USER DATA!
# If this isn't done every single run, the AFL fork server
# will not be started appropriately and you'll get erratic results!
print("Starting the AFL forkserver")
@ -142,12 +185,12 @@ def main():
else:
out = lambda x, y: print(x.format(y))
#-----------------------------------------------
# -----------------------------------------------
# Load the mutated input and map it into memory
# Load the mutated input from disk
out("Loading data input from {}", args.input_file)
input_file = open(args.input_file, 'rb')
input_file = open(args.input_file, "rb")
input = input_file.read()
input_file.close()
@ -159,7 +202,7 @@ def main():
# Write the mutated command into the data buffer
uc.mem_write(DATA_ADDRESS, input)
#------------------------------------------------------------
# ------------------------------------------------------------
# Emulate the code, allowing it to process the mutated input
out("Executing until a crash or execution reaches 0x{0:016x}", end_address)
@ -175,5 +218,6 @@ def main():
# UC_AFL_RET_FINISHED = 3
out("Done. AFL Mode is {}", afl_mode)
if __name__ == "__main__":
main()

View File

@ -256,17 +256,17 @@ def main():
input_len = len(input)
# global input_len
if input_len > INPUT_MAX:
#print("Test input is too long (> {} bytes)")
# print("Test input is too long (> {} bytes)")
return False
# print(f"Placing input: {input} in round {persistent_round}")
# Make sure the string is always 0-terminated (as it would be "in the wild")
input[-1] = b'\0'
input[-1] = b"\0"
# Write the mutated command into the data buffer
uc.mem_write(INPUT_ADDRESS, input)
#uc.reg_write(UC_X86_REG_RIP, main_offset)
# uc.reg_write(UC_X86_REG_RIP, main_offset)
print(f"Starting to fuzz. Running from addr {main_offset} to one of {main_ends}")
# Start the fuzzer.

View File

@ -1,5 +1,6 @@
PREFIX ?= /usr/local
BIN_PATH = $(PREFIX)/bin
HELPER_PATH = $(PREFIX)/lib/afl
DOC_PATH = $(PREFIX)/share/doc/afl
PROGRAMS = afl-network-client afl-network-server
@ -31,7 +32,7 @@ afl-network-client: afl-network-client.c
$(CC) $(CFLAGS) -I../../include -o afl-network-client afl-network-client.c $(LDFLAGS)
afl-network-server: afl-network-server.c
$(CC) $(CFLAGS) -I../../include -o afl-network-server afl-network-server.c ../../src/afl-forkserver.c ../../src/afl-sharedmem.c ../../src/afl-common.c -DBIN_PATH=\"$(BIN_PATH)\" $(LDFLAGS)
$(CC) $(CFLAGS) -I../../include -o afl-network-server afl-network-server.c ../../src/afl-forkserver.c ../../src/afl-sharedmem.c ../../src/afl-common.c -DAFL_PATH=\"$(HELPER_PATH)\" -DBIN_PATH=\"$(BIN_PATH)\" $(LDFLAGS)
clean:
rm -f $(PROGRAMS) *~ core

View File

@ -11,6 +11,7 @@ import idc
# See https://www.hex-rays.com/products/ida/support/ida74_idapython_no_bc695_porting_guide.shtml
from os.path import expanduser
home = expanduser("~")
patchpoints = set()
@ -18,7 +19,7 @@ patchpoints = set()
max_offset = 0
for seg_ea in idautils.Segments():
name = idc.get_segm_name(seg_ea)
#print("Segment: " + name)
# print("Segment: " + name)
if name != "__text" and name != ".text":
continue
@ -26,7 +27,7 @@ for seg_ea in idautils.Segments():
end = idc.get_segm_end(seg_ea)
first = 0
subtract_addr = 0
#print("Start: " + hex(start) + " End: " + hex(end))
# print("Start: " + hex(start) + " End: " + hex(end))
for func_ea in idautils.Functions(start, end):
f = idaapi.get_func(func_ea)
if not f:
@ -37,10 +38,10 @@ for seg_ea in idautils.Segments():
if block.start_ea >= 0x1000:
subtract_addr = 0x1000
first = 1
max_offset = max(max_offset, block.start_ea)
patchpoints.add(block.start_ea - subtract_addr)
#else:
# else:
# print("Warning: broken CFG?")
# Round up max_offset to page size
@ -52,11 +53,11 @@ if rem != 0:
print("Writing to " + home + "/Desktop/patches.txt")
with open(home + "/Desktop/patches.txt", "w") as f:
f.write(ida_nalt.get_root_filename() + ':' + hex(size) + '\n')
f.write('\n'.join(map(hex, sorted(patchpoints))))
f.write('\n')
f.write(ida_nalt.get_root_filename() + ":" + hex(size) + "\n")
f.write("\n".join(map(hex, sorted(patchpoints))))
f.write("\n")
print("Done, found {} patchpoints".format(len(patchpoints)))
# For headless script running remove the comment from the next line
#ida_pro.qexit()
# ida_pro.qexit()

View File

@ -12,12 +12,13 @@ import random, re, io
# The XmlMutatorMin class #
###########################
class XmlMutatorMin:
"""
Optionals parameters:
seed Seed used by the PRNG (default: "RANDOM")
verbose Verbosity (default: False)
Optionals parameters:
seed Seed used by the PRNG (default: "RANDOM")
verbose Verbosity (default: False)
"""
def __init__(self, seed="RANDOM", verbose=False):
@ -41,7 +42,12 @@ class XmlMutatorMin:
self.tree = None
# High-level mutators (no database needed)
hl_mutators_delete = ["del_node_and_children", "del_node_but_children", "del_attribute", "del_content"] # Delete items
hl_mutators_delete = [
"del_node_and_children",
"del_node_but_children",
"del_attribute",
"del_content",
] # Delete items
hl_mutators_fuzz = ["fuzz_attribute"] # Randomly change attribute values
# Exposed mutators
@ -74,7 +80,9 @@ class XmlMutatorMin:
""" Serialize a XML document. Basic wrapper around lxml.tostring() """
return ET.tostring(tree, with_tail=False, xml_declaration=True, encoding=tree.docinfo.encoding)
return ET.tostring(
tree, with_tail=False, xml_declaration=True, encoding=tree.docinfo.encoding
)
def __ver(self, version):
@ -161,7 +169,7 @@ class XmlMutatorMin:
# Randomly pick one the function calls
(func, args) = random.choice(l)
# Split by "," and randomly pick one of the arguments
value = random.choice(args.split(','))
value = random.choice(args.split(","))
# Remove superfluous characters
unclean_value = value
value = value.strip(" ").strip("'")
@ -170,49 +178,49 @@ class XmlMutatorMin:
value = attrib_value
# For each type, define some possible replacement values
choices_number = ( \
"0", \
"11111", \
"-128", \
"2", \
"-1", \
"1/3", \
"42/0", \
"1094861636 idiv 1.0", \
"-1123329771506872 idiv 3.8", \
"17=$numericRTF", \
str(3 + random.randrange(0, 100)), \
)
choices_number = (
"0",
"11111",
"-128",
"2",
"-1",
"1/3",
"42/0",
"1094861636 idiv 1.0",
"-1123329771506872 idiv 3.8",
"17=$numericRTF",
str(3 + random.randrange(0, 100)),
)
choices_letter = ( \
"P" * (25 * random.randrange(1, 100)), \
"%s%s%s%s%s%s", \
"foobar", \
)
choices_letter = (
"P" * (25 * random.randrange(1, 100)),
"%s%s%s%s%s%s",
"foobar",
)
choices_alnum = ( \
"Abc123", \
"020F0302020204030204", \
"020F0302020204030204" * (random.randrange(5, 20)), \
)
choices_alnum = (
"Abc123",
"020F0302020204030204",
"020F0302020204030204" * (random.randrange(5, 20)),
)
# Fuzz the value
if random.choice((True,False)) and value == "":
if random.choice((True, False)) and value == "":
# Empty
new_value = value
elif random.choice((True,False)) and value.isdigit():
elif random.choice((True, False)) and value.isdigit():
# Numbers
new_value = random.choice(choices_number)
elif random.choice((True,False)) and value.isalpha():
elif random.choice((True, False)) and value.isalpha():
# Letters
new_value = random.choice(choices_letter)
elif random.choice((True,False)) and value.isalnum():
elif random.choice((True, False)) and value.isalnum():
# Alphanumeric
new_value = random.choice(choices_alnum)
@ -232,22 +240,25 @@ class XmlMutatorMin:
# Log something
if self.verbose:
print("Fuzzing attribute #%i '%s' of tag #%i '%s'" % (rand_attrib_id, rand_attrib, rand_elem_id, rand_elem.tag))
print(
"Fuzzing attribute #%i '%s' of tag #%i '%s'"
% (rand_attrib_id, rand_attrib, rand_elem_id, rand_elem.tag)
)
# Modify the attribute
rand_elem.set(rand_attrib, new_value.decode("utf-8"))
def __del_node_and_children(self):
""" High-level minimizing mutator
Delete a random node and its children (i.e. delete a random tree) """
"""High-level minimizing mutator
Delete a random node and its children (i.e. delete a random tree)"""
self.__del_node(True)
def __del_node_but_children(self):
""" High-level minimizing mutator
Delete a random node but its children (i.e. link them to the parent of the deleted node) """
"""High-level minimizing mutator
Delete a random node but its children (i.e. link them to the parent of the deleted node)"""
self.__del_node(False)
@ -270,7 +281,10 @@ class XmlMutatorMin:
# Log something
if self.verbose:
but_or_and = "and" if delete_children else "but"
print("Deleting tag #%i '%s' %s its children" % (rand_elem_id, rand_elem.tag, but_or_and))
print(
"Deleting tag #%i '%s' %s its children"
% (rand_elem_id, rand_elem.tag, but_or_and)
)
if delete_children is False:
# Link children of the random (soon to be deleted) node to its parent
@ -282,8 +296,8 @@ class XmlMutatorMin:
def __del_content(self):
""" High-level minimizing mutator
Delete the attributes and children of a random node """
"""High-level minimizing mutator
Delete the attributes and children of a random node"""
# Select a node to modify
(rand_elem_id, rand_elem) = self.__pick_element()
@ -297,8 +311,8 @@ class XmlMutatorMin:
def __del_attribute(self):
""" High-level minimizing mutator
Delete a random attribute from a random node """
"""High-level minimizing mutator
Delete a random attribute from a random node"""
# Select a node to modify
(rand_elem_id, rand_elem) = self.__pick_element()
@ -318,7 +332,10 @@ class XmlMutatorMin:
# Log something
if self.verbose:
print("Deleting attribute #%i '%s' of tag #%i '%s'" % (rand_attrib_id, rand_attrib, rand_elem_id, rand_elem.tag))
print(
"Deleting attribute #%i '%s' of tag #%i '%s'"
% (rand_attrib_id, rand_attrib, rand_elem_id, rand_elem.tag)
)
# Delete the attribute
rand_elem.attrib.pop(rand_attrib)
@ -329,4 +346,3 @@ class XmlMutatorMin:
# High-level mutation
self.__exec_among(self, self.hl_mutators_all, min, max)

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
# encoding: utf-8
'''
"""
Module containing functions shared between multiple AFL modules
@author: Christian Holler (:decoder)
@ -12,7 +12,7 @@ License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
@contact: choller@mozilla.com
'''
"""
from __future__ import print_function
import random
@ -23,18 +23,18 @@ import re
def randel(l):
if not l:
return None
return l[random.randint(0, len(l)-1)]
return l[random.randint(0, len(l) - 1)]
def randel_pop(l):
if not l:
return None
return l.pop(random.randint(0, len(l)-1))
return l.pop(random.randint(0, len(l) - 1))
def write_exc_example(data, exc):
exc_name = re.sub(r'[^a-zA-Z0-9]', '_', repr(exc))
exc_name = re.sub(r"[^a-zA-Z0-9]", "_", repr(exc))
if not os.path.exists(exc_name):
with open(exc_name, 'w') as f:
with open(exc_name, "w") as f:
f.write(data)

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
# encoding: utf-8
'''
"""
Example Python Module for AFLFuzz
@author: Christian Holler (:decoder)
@ -12,7 +12,7 @@ License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
@contact: choller@mozilla.com
'''
"""
import random
@ -26,12 +26,12 @@ COMMANDS = [
def init(seed):
'''
"""
Called once when AFLFuzz starts up. Used to seed our RNG.
@type seed: int
@param seed: A 32-bit random value
'''
"""
random.seed(seed)
@ -40,7 +40,7 @@ def deinit():
def fuzz(buf, add_buf, max_size):
'''
"""
Called per fuzzing iteration.
@type buf: bytearray
@ -55,13 +55,14 @@ def fuzz(buf, add_buf, max_size):
@rtype: bytearray
@return: A new bytearray containing the mutated data
'''
"""
ret = bytearray(100)
ret[:3] = random.choice(COMMANDS)
return ret
# Uncomment and implement the following methods if you want to use a custom
# trimming algorithm. See also the documentation for a better API description.

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
# encoding: utf-8
'''
"""
Simple Chunk Cross-Over Replacement Module for AFLFuzz
@author: Christian Holler (:decoder)
@ -12,24 +12,24 @@ License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
@contact: choller@mozilla.com
'''
"""
import random
def init(seed):
'''
"""
Called once when AFLFuzz starts up. Used to seed our RNG.
@type seed: int
@param seed: A 32-bit random value
'''
"""
# Seed our RNG
random.seed(seed)
def fuzz(buf, add_buf, max_size):
'''
"""
Called per fuzzing iteration.
@type buf: bytearray
@ -44,7 +44,7 @@ def fuzz(buf, add_buf, max_size):
@rtype: bytearray
@return: A new bytearray containing the mutated data
'''
"""
# Make a copy of our input buffer for returning
ret = bytearray(buf)
@ -58,7 +58,9 @@ def fuzz(buf, add_buf, max_size):
rand_dst_idx = random.randint(0, len(buf))
# Make the chunk replacement
ret[rand_dst_idx:rand_dst_idx + fragment_len] = add_buf[rand_src_idx:rand_src_idx + fragment_len]
ret[rand_dst_idx : rand_dst_idx + fragment_len] = add_buf[
rand_src_idx : rand_src_idx + fragment_len
]
# Return data
return ret

View File

@ -27,7 +27,7 @@ def log(text):
def init(seed):
"""
Called once when AFL starts up. Seed is used to identify the AFL instance in log files
Called once when AFL starts up. Seed is used to identify the AFL instance in log files
"""
global __mutator__
@ -72,7 +72,10 @@ def fuzz(buf, add_buf, max_size):
if via_buffer:
try:
__mutator__.init_from_string(buf_str)
log("fuzz(): Mutator successfully initialized with AFL buffer (%d bytes)" % len(buf_str))
log(
"fuzz(): Mutator successfully initialized with AFL buffer (%d bytes)"
% len(buf_str)
)
except Exception:
via_buffer = False
log("fuzz(): Can't initialize mutator with AFL buffer")
@ -104,7 +107,7 @@ def fuzz(buf, add_buf, max_size):
# Main (for debug)
if __name__ == '__main__':
if __name__ == "__main__":
__log__ = True
__log_file__ = "/dev/stdout"
@ -112,7 +115,9 @@ if __name__ == '__main__':
init(__seed__)
in_1 = bytearray("<foo ddd='eeee'>ffff<a b='c' d='456' eee='ffffff'>zzzzzzzzzzzz</a><b yyy='YYY' zzz='ZZZ'></b></foo>")
in_1 = bytearray(
"<foo ddd='eeee'>ffff<a b='c' d='456' eee='ffffff'>zzzzzzzzzzzz</a><b yyy='YYY' zzz='ZZZ'></b></foo>"
)
in_2 = bytearray("<abc abc123='456' abcCBA='ppppppppppppppppppppppppppppp'/>")
out = fuzz(in_1, in_2)
print(out)

View File

@ -70,7 +70,7 @@ int main(int argc, char **argv) {
len = __AFL_FUZZ_TESTCASE_LEN; // do not use the macro directly in a call!
fprintf(stderr, "input: %zd \"%s\"\n", len, buf);
// fprintf(stderr, "input: %zd \"%s\"\n", len, buf);
/* do we have enough data? */
if (len < 8) continue;