mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-27 07:51:54 +00:00
Compare commits
127 Commits
Author | SHA1 | Date | |
---|---|---|---|
bd0a23de73 | |||
4619a1395b | |||
0c38850f95 | |||
07884e0054 | |||
bdadbb7207 | |||
e389eb9842 | |||
5cf0655071 | |||
f81ef4abf4 | |||
6036cf8437 | |||
1cad645400 | |||
36846836ed | |||
79f1a44a01 | |||
c2127e3ff7 | |||
2ad495ad0a | |||
8e051fd075 | |||
af628b16d1 | |||
c219502f0f | |||
a5da9ce42c | |||
79e02c2a9b | |||
3a461944ec | |||
78d96c4dc8 | |||
ee0ca07f3c | |||
7ae7b0f373 | |||
e2b4bc9310 | |||
6c9777de13 | |||
2f7e57f6aa | |||
5c239451cf | |||
35ca51c5a8 | |||
047f3436e9 | |||
5d181950eb | |||
48a1a29baa | |||
c05d392cd9 | |||
cc7c651dc9 | |||
e6ef2ee338 | |||
a090b2013f | |||
564f491566 | |||
2daeeab844 | |||
4ab90e739f | |||
745bc083d1 | |||
7674dac1a1 | |||
fb2a6b6941 | |||
70fe872940 | |||
a252943236 | |||
8c133b607c | |||
2785c8b197 | |||
a81b5aa921 | |||
8ad78f5b65 | |||
ac9cfd89da | |||
c67c4ce757 | |||
974aab6cf6 | |||
b957218a3a | |||
f629f4e341 | |||
871c3c91ec | |||
100aac4dd3 | |||
d941da33ae | |||
62767a42dc | |||
89cf94f0e6 | |||
17211253b2 | |||
6998489b26 | |||
4290cb5877 | |||
801f2449ec | |||
aaf5fcd98a | |||
5edfb7ba85 | |||
a5cb522f01 | |||
3195119dad | |||
d6fe6b9537 | |||
c0f9fba6d6 | |||
1a713ff420 | |||
89af2ef7a9 | |||
907c5d4276 | |||
5dd35f5281 | |||
857229654e | |||
4c47b242eb | |||
938512a6b9 | |||
7444cfa450 | |||
f091b8d692 | |||
7d97ffb1e8 | |||
80bdbf7be0 | |||
686719cdca | |||
6caec2169c | |||
5212481352 | |||
d999725de2 | |||
145c673a80 | |||
c5017945f7 | |||
5c4c49d9ca | |||
cebde1f9e6 | |||
0298ae82b0 | |||
512f53984c | |||
e3a5c31307 | |||
dd2fd80274 | |||
ffc1fc655f | |||
fe477e96ae | |||
98559ea8b0 | |||
f31d8b8401 | |||
389e348826 | |||
98fd50f78f | |||
95561ec5a7 | |||
fe9da70705 | |||
95c77c8486 | |||
e45333bcf9 | |||
c906c042be | |||
9bd1e19d7f | |||
6ce9230ed6 | |||
1d60c39191 | |||
70651d60bd | |||
385312c658 | |||
87a607c7d0 | |||
1ba5d1008e | |||
129a5adaf1 | |||
d827bc4580 | |||
64e46dcefc | |||
c0b3127b9d | |||
7cfa690d1c | |||
22a3c7f7d0 | |||
16ffbb37f5 | |||
ea05f3f4cd | |||
91f2f057e4 | |||
d44cf1344d | |||
756206e4d7 | |||
2ff6e5023f | |||
223bd70f1f | |||
dd3f4bb41c | |||
f3e783d343 | |||
2cd4624779 | |||
e11665564b | |||
93cebd6c7f | |||
a124540e50 |
@ -33,13 +33,13 @@ if CLANG_FORMAT_BIN is None:
|
|||||||
o, _ = p.communicate()
|
o, _ = p.communicate()
|
||||||
o = str(o, "utf-8")
|
o = str(o, "utf-8")
|
||||||
o = re.sub(r".*ersion ", "", o)
|
o = re.sub(r".*ersion ", "", o)
|
||||||
#o = o[len("clang-format version "):].strip()
|
# o = o[len("clang-format version "):].strip()
|
||||||
o = o[:o.find(".")]
|
o = o[: o.find(".")]
|
||||||
o = int(o)
|
o = int(o)
|
||||||
except:
|
except:
|
||||||
print ("clang-format-11 is needed. Aborted.")
|
print("clang-format-11 is needed. Aborted.")
|
||||||
exit(1)
|
exit(1)
|
||||||
#if o < 7:
|
# if o < 7:
|
||||||
# if subprocess.call(['which', 'clang-format-7'], stdout=subprocess.PIPE) == 0:
|
# if subprocess.call(['which', 'clang-format-7'], stdout=subprocess.PIPE) == 0:
|
||||||
# CLANG_FORMAT_BIN = 'clang-format-7'
|
# CLANG_FORMAT_BIN = 'clang-format-7'
|
||||||
# elif subprocess.call(['which', 'clang-format-8'], stdout=subprocess.PIPE) == 0:
|
# elif subprocess.call(['which', 'clang-format-8'], stdout=subprocess.PIPE) == 0:
|
||||||
@ -52,8 +52,8 @@ if CLANG_FORMAT_BIN is None:
|
|||||||
# print ("clang-format 7 or above is needed. Aborted.")
|
# print ("clang-format 7 or above is needed. Aborted.")
|
||||||
# exit(1)
|
# exit(1)
|
||||||
else:
|
else:
|
||||||
CLANG_FORMAT_BIN = 'clang-format-11'
|
CLANG_FORMAT_BIN = "clang-format-11"
|
||||||
|
|
||||||
COLUMN_LIMIT = 80
|
COLUMN_LIMIT = 80
|
||||||
for line in fmt.split("\n"):
|
for line in fmt.split("\n"):
|
||||||
line = line.split(":")
|
line = line.split(":")
|
||||||
@ -69,26 +69,47 @@ def custom_format(filename):
|
|||||||
in_define = False
|
in_define = False
|
||||||
last_line = None
|
last_line = None
|
||||||
out = ""
|
out = ""
|
||||||
|
|
||||||
for line in src.split("\n"):
|
for line in src.split("\n"):
|
||||||
if line.lstrip().startswith("#"):
|
if line.lstrip().startswith("#"):
|
||||||
if line[line.find("#")+1:].lstrip().startswith("define"):
|
if line[line.find("#") + 1 :].lstrip().startswith("define"):
|
||||||
in_define = True
|
in_define = True
|
||||||
|
|
||||||
if "/*" in line and not line.strip().startswith("/*") and line.endswith("*/") and len(line) < (COLUMN_LIMIT-2):
|
if (
|
||||||
|
"/*" in line
|
||||||
|
and not line.strip().startswith("/*")
|
||||||
|
and line.endswith("*/")
|
||||||
|
and len(line) < (COLUMN_LIMIT - 2)
|
||||||
|
):
|
||||||
cmt_start = line.rfind("/*")
|
cmt_start = line.rfind("/*")
|
||||||
line = line[:cmt_start] + " " * (COLUMN_LIMIT-2 - len(line)) + line[cmt_start:]
|
line = (
|
||||||
|
line[:cmt_start]
|
||||||
|
+ " " * (COLUMN_LIMIT - 2 - len(line))
|
||||||
|
+ line[cmt_start:]
|
||||||
|
)
|
||||||
|
|
||||||
define_padding = 0
|
define_padding = 0
|
||||||
if last_line is not None and in_define and last_line.endswith("\\"):
|
if last_line is not None and in_define and last_line.endswith("\\"):
|
||||||
last_line = last_line[:-1]
|
last_line = last_line[:-1]
|
||||||
define_padding = max(0, len(last_line[last_line.rfind("\n")+1:]))
|
define_padding = max(0, len(last_line[last_line.rfind("\n") + 1 :]))
|
||||||
|
|
||||||
if last_line is not None and last_line.strip().endswith("{") and line.strip() != "":
|
if (
|
||||||
|
last_line is not None
|
||||||
|
and last_line.strip().endswith("{")
|
||||||
|
and line.strip() != ""
|
||||||
|
):
|
||||||
line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
|
line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
|
||||||
elif last_line is not None and last_line.strip().startswith("}") and line.strip() != "":
|
elif (
|
||||||
|
last_line is not None
|
||||||
|
and last_line.strip().startswith("}")
|
||||||
|
and line.strip() != ""
|
||||||
|
):
|
||||||
line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
|
line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
|
||||||
elif line.strip().startswith("}") and last_line is not None and last_line.strip() != "":
|
elif (
|
||||||
|
line.strip().startswith("}")
|
||||||
|
and last_line is not None
|
||||||
|
and last_line.strip() != ""
|
||||||
|
):
|
||||||
line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
|
line = (" " * define_padding + "\\" if in_define else "") + "\n" + line
|
||||||
|
|
||||||
if not line.endswith("\\"):
|
if not line.endswith("\\"):
|
||||||
@ -97,14 +118,15 @@ def custom_format(filename):
|
|||||||
out += line + "\n"
|
out += line + "\n"
|
||||||
last_line = line
|
last_line = line
|
||||||
|
|
||||||
return (out)
|
return out
|
||||||
|
|
||||||
|
|
||||||
args = sys.argv[1:]
|
args = sys.argv[1:]
|
||||||
if len(args) == 0:
|
if len(args) == 0:
|
||||||
print ("Usage: ./format.py [-i] <filename>")
|
print("Usage: ./format.py [-i] <filename>")
|
||||||
print ()
|
print()
|
||||||
print (" The -i option, if specified, let the script to modify in-place")
|
print(" The -i option, if specified, let the script to modify in-place")
|
||||||
print (" the source files. By default the results are written to stdout.")
|
print(" the source files. By default the results are written to stdout.")
|
||||||
print()
|
print()
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
@ -120,4 +142,3 @@ for filename in args:
|
|||||||
f.write(code)
|
f.write(code)
|
||||||
else:
|
else:
|
||||||
print(code)
|
print(code)
|
||||||
|
|
||||||
|
27
.github/workflows/build_aflplusplus_docker.yaml
vendored
Normal file
27
.github/workflows/build_aflplusplus_docker.yaml
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
name: Publish Docker Images
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ stable ]
|
||||||
|
paths:
|
||||||
|
- Dockerfile
|
||||||
|
pull_request:
|
||||||
|
branches: [ stable ]
|
||||||
|
paths:
|
||||||
|
- Dockerfile
|
||||||
|
jobs:
|
||||||
|
push_to_registry:
|
||||||
|
name: Push Docker images to Dockerhub
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@master
|
||||||
|
- name: Login to Dockerhub
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_TOKEN }}
|
||||||
|
- name: Publish aflpp to Registry
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
tags: aflplusplus/aflplusplus:latest
|
10
Dockerfile
10
Dockerfile
@ -11,6 +11,8 @@ LABEL "about"="AFLplusplus docker image"
|
|||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
env NO_ARCH_OPT 1
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get -y install --no-install-suggests --no-install-recommends \
|
apt-get -y install --no-install-suggests --no-install-recommends \
|
||||||
automake \
|
automake \
|
||||||
@ -48,16 +50,16 @@ RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 0
|
|||||||
|
|
||||||
ENV LLVM_CONFIG=llvm-config-12
|
ENV LLVM_CONFIG=llvm-config-12
|
||||||
ENV AFL_SKIP_CPUFREQ=1
|
ENV AFL_SKIP_CPUFREQ=1
|
||||||
|
ENV AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES=1
|
||||||
|
|
||||||
RUN git clone https://github.com/vanhauser-thc/afl-cov /afl-cov
|
RUN git clone --depth=1 https://github.com/vanhauser-thc/afl-cov /afl-cov
|
||||||
RUN cd /afl-cov && make install && cd ..
|
RUN cd /afl-cov && make install && cd ..
|
||||||
|
|
||||||
COPY . /AFLplusplus
|
COPY . /AFLplusplus
|
||||||
WORKDIR /AFLplusplus
|
WORKDIR /AFLplusplus
|
||||||
|
|
||||||
RUN export REAL_CXX=g++-10 && export CC=gcc-10 && \
|
RUN export CC=gcc-10 && export CXX=g++-10 && make clean && \
|
||||||
export CXX=g++-10 && make clean && \
|
make distrib && make install && make clean
|
||||||
make distrib CFLAGS="-O3 -funroll-loops -D_FORTIFY_SOURCE=2" && make install && make clean
|
|
||||||
|
|
||||||
RUN echo 'alias joe="jupp --wordwrap"' >> ~/.bashrc
|
RUN echo 'alias joe="jupp --wordwrap"' >> ~/.bashrc
|
||||||
RUN echo 'export PS1="[afl++]$PS1"' >> ~/.bashrc
|
RUN echo 'export PS1="[afl++]$PS1"' >> ~/.bashrc
|
||||||
|
21
GNUmakefile
21
GNUmakefile
@ -57,8 +57,6 @@ ifdef MSAN_BUILD
|
|||||||
override LDFLAGS += -fsanitize=memory
|
override LDFLAGS += -fsanitize=memory
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
ifeq "$(findstring android, $(shell $(CC) --version 2>/dev/null))" ""
|
ifeq "$(findstring android, $(shell $(CC) --version 2>/dev/null))" ""
|
||||||
ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=full -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
|
ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -flto=full -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
|
||||||
CFLAGS_FLTO ?= -flto=full
|
CFLAGS_FLTO ?= -flto=full
|
||||||
@ -77,17 +75,17 @@ ifeq "$(shell echo 'int main() {return 0; }' | $(CC) -fno-move-loop-invariants -
|
|||||||
SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli
|
SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
|
#ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
|
||||||
ifndef SOURCE_DATE_EPOCH
|
# ifndef SOURCE_DATE_EPOCH
|
||||||
HAVE_MARCHNATIVE = 1
|
# HAVE_MARCHNATIVE = 1
|
||||||
CFLAGS_OPT += -march=native
|
# CFLAGS_OPT += -march=native
|
||||||
endif
|
# endif
|
||||||
endif
|
#endif
|
||||||
|
|
||||||
ifneq "$(shell uname)" "Darwin"
|
ifneq "$(shell uname)" "Darwin"
|
||||||
ifeq "$(HAVE_MARCHNATIVE)" "1"
|
#ifeq "$(HAVE_MARCHNATIVE)" "1"
|
||||||
SPECIAL_PERFORMANCE += -march=native
|
# SPECIAL_PERFORMANCE += -march=native
|
||||||
endif
|
#endif
|
||||||
# OS X does not like _FORTIFY_SOURCE=2
|
# OS X does not like _FORTIFY_SOURCE=2
|
||||||
ifndef DEBUG
|
ifndef DEBUG
|
||||||
CFLAGS_OPT += -D_FORTIFY_SOURCE=2
|
CFLAGS_OPT += -D_FORTIFY_SOURCE=2
|
||||||
@ -366,6 +364,7 @@ help:
|
|||||||
@echo NO_PYTHON - disable python support
|
@echo NO_PYTHON - disable python support
|
||||||
@echo NO_SPLICING - disables splicing mutation in afl-fuzz, not recommended for normal fuzzing
|
@echo NO_SPLICING - disables splicing mutation in afl-fuzz, not recommended for normal fuzzing
|
||||||
@echo AFL_NO_X86 - if compiling on non-intel/amd platforms
|
@echo AFL_NO_X86 - if compiling on non-intel/amd platforms
|
||||||
|
@echo NO_ARCH_OPT - builds afl++ without machine architecture optimizations
|
||||||
@echo "LLVM_CONFIG - if your distro doesn't use the standard name for llvm-config (e.g. Debian)"
|
@echo "LLVM_CONFIG - if your distro doesn't use the standard name for llvm-config (e.g. Debian)"
|
||||||
@echo "=========================================="
|
@echo "=========================================="
|
||||||
@echo e.g.: make ASAN_BUILD=1
|
@echo e.g.: make ASAN_BUILD=1
|
||||||
|
@ -43,7 +43,8 @@ endif
|
|||||||
LLVMVER = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/git//' | sed 's/svn//' )
|
LLVMVER = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/git//' | sed 's/svn//' )
|
||||||
LLVM_MAJOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/\..*//' )
|
LLVM_MAJOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/\..*//' )
|
||||||
LLVM_MINOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/.*\.//' | sed 's/git//' | sed 's/svn//' | sed 's/ .*//' )
|
LLVM_MINOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/.*\.//' | sed 's/git//' | sed 's/svn//' | sed 's/ .*//' )
|
||||||
LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^3\.[0-3]|^1[3-9]' && echo 1 || echo 0 )
|
LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^3\.[0-3]|^[0-2]\.' && echo 1 || echo 0 )
|
||||||
|
LLVM_TOO_NEW = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[3-9]' && echo 1 || echo 0 )
|
||||||
LLVM_NEW_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[0-9]' && echo 1 || echo 0 )
|
LLVM_NEW_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[0-9]' && echo 1 || echo 0 )
|
||||||
LLVM_10_OK = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]|^10\.[1-9]|^10\.0.[1-9]' && echo 1 || echo 0 )
|
LLVM_10_OK = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]|^10\.[1-9]|^10\.0.[1-9]' && echo 1 || echo 0 )
|
||||||
LLVM_HAVE_LTO = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]' && echo 1 || echo 0 )
|
LLVM_HAVE_LTO = $(shell $(LLVM_CONFIG) --version 2>/dev/null | egrep -q '^1[1-9]' && echo 1 || echo 0 )
|
||||||
@ -58,7 +59,11 @@ ifeq "$(LLVMVER)" ""
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq "$(LLVM_UNSUPPORTED)" "1"
|
ifeq "$(LLVM_UNSUPPORTED)" "1"
|
||||||
$(warning llvm_mode only supports llvm versions 3.4 up to 12)
|
$(error llvm_mode only supports llvm from version 3.4 onwards)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq "$(LLVM_TOO_NEW)" "1"
|
||||||
|
$(warning you are using an in-development llvm version - this might break llvm_mode!)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
LLVM_TOO_OLD=1
|
LLVM_TOO_OLD=1
|
||||||
|
39
README.md
39
README.md
@ -2,11 +2,9 @@
|
|||||||
|
|
||||||
<img align="right" src="https://raw.githubusercontent.com/andreafioraldi/AFLplusplus-website/master/static/logo_256x256.png" alt="AFL++ Logo">
|
<img align="right" src="https://raw.githubusercontent.com/andreafioraldi/AFLplusplus-website/master/static/logo_256x256.png" alt="AFL++ Logo">
|
||||||
|
|
||||||

|
Release Version: [3.10c](https://github.com/AFLplusplus/AFLplusplus/releases)
|
||||||
|
|
||||||
Release Version: [3.00c](https://github.com/AFLplusplus/AFLplusplus/releases)
|
Github Version: 3.11a
|
||||||
|
|
||||||
Github Version: 3.01a
|
|
||||||
|
|
||||||
Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus)
|
Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus)
|
||||||
|
|
||||||
@ -23,11 +21,18 @@
|
|||||||
mutations, more and better instrumentation, custom module support, etc.
|
mutations, more and better instrumentation, custom module support, etc.
|
||||||
|
|
||||||
If you want to use afl++ for your academic work, check the [papers page](https://aflplus.plus/papers/)
|
If you want to use afl++ for your academic work, check the [papers page](https://aflplus.plus/papers/)
|
||||||
on the website.
|
on the website. To cite our work, look at the [Cite](#cite) section.
|
||||||
|
For comparisons use the fuzzbench `aflplusplus` setup, or use `afl-clang-fast`
|
||||||
|
with `AFL_LLVM_CMPLOG=1`.
|
||||||
|
|
||||||
## Major changes in afl++ 3.0
|
## Major changes in afl++ 3.00 + 3.10
|
||||||
|
|
||||||
With afl++ 3.0 we introduced changes that break some previous afl and afl++
|
With afl++ 3.10 we introduced the following changes from previous behaviours:
|
||||||
|
* The '+' feature of the '-t' option now means to auto-calculate the timeout
|
||||||
|
with the value given being the maximum timeout. The original meaning of
|
||||||
|
"skipping timeouts instead of abort" is now inherent to the -t option.
|
||||||
|
|
||||||
|
With afl++ 3.00 we introduced changes that break some previous afl and afl++
|
||||||
behaviours and defaults:
|
behaviours and defaults:
|
||||||
|
|
||||||
* There are no llvm_mode and gcc_plugin subdirectories anymore and there is
|
* There are no llvm_mode and gcc_plugin subdirectories anymore and there is
|
||||||
@ -219,6 +224,7 @@ These build options exist:
|
|||||||
* NO_PYTHON - disable python support
|
* NO_PYTHON - disable python support
|
||||||
* NO_SPLICING - disables splicing mutation in afl-fuzz, not recommended for normal fuzzing
|
* NO_SPLICING - disables splicing mutation in afl-fuzz, not recommended for normal fuzzing
|
||||||
* AFL_NO_X86 - if compiling on non-intel/amd platforms
|
* AFL_NO_X86 - if compiling on non-intel/amd platforms
|
||||||
|
* NO_ARCH_OPT - builds afl++ without machine architecture optimizations
|
||||||
* LLVM_CONFIG - if your distro doesn't use the standard name for llvm-config (e.g. Debian)
|
* LLVM_CONFIG - if your distro doesn't use the standard name for llvm-config (e.g. Debian)
|
||||||
|
|
||||||
e.g.: make ASAN_BUILD=1
|
e.g.: make ASAN_BUILD=1
|
||||||
@ -752,6 +758,10 @@ campaigns as these are much shorter runnings.
|
|||||||
* for CMPLOG targets, 60% for `-l 2`, 40% for `-l 3`
|
* for CMPLOG targets, 60% for `-l 2`, 40% for `-l 3`
|
||||||
|
|
||||||
4. Do *not* run any `-M` modes, just running `-S` modes is better for CI fuzzing.
|
4. Do *not* run any `-M` modes, just running `-S` modes is better for CI fuzzing.
|
||||||
|
`-M` enables deterministic fuzzing, old queue handling etc. which is good for
|
||||||
|
a fuzzing campaign but not good for short CI runs.
|
||||||
|
|
||||||
|
How this can look like can e.g. be seen at afl++'s setup in Google's [oss-fuzz](https://github.com/google/oss-fuzz/blob/4bb61df7905c6005000f5766e966e6fe30ab4559/infra/base-images/base-builder/compile_afl#L69).
|
||||||
|
|
||||||
## Fuzzing binary-only targets
|
## Fuzzing binary-only targets
|
||||||
|
|
||||||
@ -789,8 +799,7 @@ If [afl-dyninst](https://github.com/vanhauser-thc/afl-dyninst) works for
|
|||||||
your binary, then you can use afl-fuzz normally and it will have twice
|
your binary, then you can use afl-fuzz normally and it will have twice
|
||||||
the speed compared to qemu_mode (but slower than persistent mode).
|
the speed compared to qemu_mode (but slower than persistent mode).
|
||||||
Note that several other binary rewriters exist, all with their advantages and
|
Note that several other binary rewriters exist, all with their advantages and
|
||||||
caveats. As rewriting a binary is much faster than Qemu this is a highly
|
caveats.
|
||||||
recommended approach!
|
|
||||||
|
|
||||||
### Unicorn
|
### Unicorn
|
||||||
|
|
||||||
@ -1166,8 +1175,18 @@ Thank you!
|
|||||||
|
|
||||||
## Cite
|
## Cite
|
||||||
|
|
||||||
|
If you use AFLpluplus to compare to your work, please use either `afl-clang-lto`
|
||||||
|
or `afl-clang-fast` with `AFL_LLVM_CMPLOG=1` for building targets and
|
||||||
|
`afl-fuzz` with the command line option `-l 2` for fuzzing.
|
||||||
|
The most effective setup is the `aflplusplus` default configuration on Google's [fuzzbench](https://github.com/google/fuzzbench/tree/master/fuzzers/aflplusplus).
|
||||||
|
|
||||||
If you use AFLplusplus in scientific work, consider citing [our paper](https://www.usenix.org/conference/woot20/presentation/fioraldi) presented at WOOT'20:
|
If you use AFLplusplus in scientific work, consider citing [our paper](https://www.usenix.org/conference/woot20/presentation/fioraldi) presented at WOOT'20:
|
||||||
```
|
|
||||||
|
+ Andrea Fioraldi, Dominik Maier, Heiko Eißfeldt, and Marc Heuse. “AFL++: Combining incremental steps of fuzzing research”. In 14th USENIX Workshop on Offensive Technologies (WOOT 20). USENIX Association, Aug. 2020.
|
||||||
|
|
||||||
|
Bibtex:
|
||||||
|
|
||||||
|
```bibtex
|
||||||
@inproceedings {AFLplusplus-Woot20,
|
@inproceedings {AFLplusplus-Woot20,
|
||||||
author = {Andrea Fioraldi and Dominik Maier and Heiko Ei{\ss}feldt and Marc Heuse},
|
author = {Andrea Fioraldi and Dominik Maier and Heiko Ei{\ss}feldt and Marc Heuse},
|
||||||
title = {{AFL++}: Combining Incremental Steps of Fuzzing Research},
|
title = {{AFL++}: Combining Incremental Steps of Fuzzing Research},
|
||||||
|
5
TODO.md
5
TODO.md
@ -6,16 +6,13 @@
|
|||||||
- CPU affinity for many cores? There seems to be an issue > 96 cores
|
- CPU affinity for many cores? There seems to be an issue > 96 cores
|
||||||
- afl-plot to support multiple plot_data
|
- afl-plot to support multiple plot_data
|
||||||
- afl_custom_fuzz_splice_optin()
|
- afl_custom_fuzz_splice_optin()
|
||||||
|
- afl_custom_splice()
|
||||||
- intel-pt tracer
|
- intel-pt tracer
|
||||||
|
|
||||||
## Further down the road
|
## Further down the road
|
||||||
|
|
||||||
afl-fuzz:
|
afl-fuzz:
|
||||||
- setting min_len/max_len/start_offset/end_offset limits for mutation output
|
- setting min_len/max_len/start_offset/end_offset limits for mutation output
|
||||||
- add __sanitizer_cov_trace_cmp* support via shmem
|
|
||||||
|
|
||||||
llvm_mode:
|
|
||||||
- add __sanitizer_cov_trace_cmp* support
|
|
||||||
|
|
||||||
qemu_mode:
|
qemu_mode:
|
||||||
- non colliding instrumentation
|
- non colliding instrumentation
|
||||||
|
4
afl-cmin
4
afl-cmin
@ -411,8 +411,8 @@ BEGIN {
|
|||||||
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -- \""target_bin"\" "prog_args_string)
|
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -- \""target_bin"\" "prog_args_string)
|
||||||
} else {
|
} else {
|
||||||
print " Processing "in_count" files (forkserver mode)..."
|
print " Processing "in_count" files (forkserver mode)..."
|
||||||
# print AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -- \""target_bin"\" "prog_args_string" </dev/null"
|
# print AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -A \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null"
|
||||||
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -- \""target_bin"\" "prog_args_string" </dev/null")
|
retval = system( AFL_CMIN_CRASHES_ONLY"\""showmap"\" -m "mem_limit" -t "timeout" -o \""trace_dir"\" -Z "extra_par" -i \""in_dir"\" -A \""stdin_file"\" -- \""target_bin"\" "prog_args_string" </dev/null")
|
||||||
}
|
}
|
||||||
|
|
||||||
if (retval && !AFL_CMIN_CRASHES_ONLY) {
|
if (retval && !AFL_CMIN_CRASHES_ONLY) {
|
||||||
|
11
afl-plot
11
afl-plot
@ -99,7 +99,7 @@ if [ ! -d "$outputdir" ]; then
|
|||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rm -f "$outputdir/high_freq.png" "$outputdir/low_freq.png" "$outputdir/exec_speed.png"
|
rm -f "$outputdir/high_freq.png" "$outputdir/low_freq.png" "$outputdir/exec_speed.png" "$outputdir/edges.png"
|
||||||
mv -f "$outputdir/index.html" "$outputdir/index.html.orig" 2>/dev/null
|
mv -f "$outputdir/index.html" "$outputdir/index.html.orig" 2>/dev/null
|
||||||
|
|
||||||
echo "[*] Generating plots..."
|
echo "[*] Generating plots..."
|
||||||
@ -152,6 +152,12 @@ set ytics auto
|
|||||||
plot '$inputdir/plot_data' using 1:11 with filledcurve x1 title '' linecolor rgb '#0090ff' fillstyle transparent solid 0.2 noborder, \\
|
plot '$inputdir/plot_data' using 1:11 with filledcurve x1 title '' linecolor rgb '#0090ff' fillstyle transparent solid 0.2 noborder, \\
|
||||||
'$inputdir/plot_data' using 1:11 with lines title ' execs/sec' linecolor rgb '#0090ff' linewidth 3 smooth bezier;
|
'$inputdir/plot_data' using 1:11 with lines title ' execs/sec' linecolor rgb '#0090ff' linewidth 3 smooth bezier;
|
||||||
|
|
||||||
|
set terminal png truecolor enhanced size 1000,300 butt
|
||||||
|
set output '$outputdir/edges.png'
|
||||||
|
|
||||||
|
set ytics auto
|
||||||
|
plot '$inputdir/plot_data' using 1:13 with lines title ' edges' linecolor rgb '#0090ff' linewidth 3
|
||||||
|
|
||||||
_EOF_
|
_EOF_
|
||||||
|
|
||||||
) | gnuplot
|
) | gnuplot
|
||||||
@ -172,6 +178,7 @@ cat >"$outputdir/index.html" <<_EOF_
|
|||||||
<tr><td><b>Generated on:</b></td><td>`date`</td></tr>
|
<tr><td><b>Generated on:</b></td><td>`date`</td></tr>
|
||||||
</table>
|
</table>
|
||||||
<p>
|
<p>
|
||||||
|
<img src="edges.png" width=1000 height=300>
|
||||||
<img src="high_freq.png" width=1000 height=300><p>
|
<img src="high_freq.png" width=1000 height=300><p>
|
||||||
<img src="low_freq.png" width=1000 height=200><p>
|
<img src="low_freq.png" width=1000 height=200><p>
|
||||||
<img src="exec_speed.png" width=1000 height=200>
|
<img src="exec_speed.png" width=1000 height=200>
|
||||||
@ -183,7 +190,7 @@ _EOF_
|
|||||||
# sensitive, this seems like a reasonable trade-off.
|
# sensitive, this seems like a reasonable trade-off.
|
||||||
|
|
||||||
chmod 755 "$outputdir"
|
chmod 755 "$outputdir"
|
||||||
chmod 644 "$outputdir/high_freq.png" "$outputdir/low_freq.png" "$outputdir/exec_speed.png" "$outputdir/index.html"
|
chmod 644 "$outputdir/high_freq.png" "$outputdir/low_freq.png" "$outputdir/exec_speed.png" "$outputdir/edges.png" "$outputdir/index.html"
|
||||||
|
|
||||||
echo "[+] All done - enjoy your charts!"
|
echo "[+] All done - enjoy your charts!"
|
||||||
|
|
||||||
|
@ -49,6 +49,12 @@ if [ "$PLATFORM" = "FreeBSD" ] ; then
|
|||||||
sysctl kern.elf64.aslr.enable=0
|
sysctl kern.elf64.aslr.enable=0
|
||||||
} > /dev/null
|
} > /dev/null
|
||||||
echo Settings applied.
|
echo Settings applied.
|
||||||
|
cat <<EOF
|
||||||
|
In order to suppress core file generation during fuzzing it is recommended to set
|
||||||
|
me:\\
|
||||||
|
:coredumpsize=0:
|
||||||
|
in the ~/.login_conf file for the user used for fuzzing.
|
||||||
|
EOF
|
||||||
echo It is recommended to boot the kernel with lots of security off - if you are running a machine that is in a secured network - so set this:
|
echo It is recommended to boot the kernel with lots of security off - if you are running a machine that is in a secured network - so set this:
|
||||||
echo ' sysctl hw.ibrs_disable=1'
|
echo ' sysctl hw.ibrs_disable=1'
|
||||||
echo 'Setting kern.pmap.pg_ps_enabled=0 into /boot/loader.conf might be helpful too.'
|
echo 'Setting kern.pmap.pg_ps_enabled=0 into /boot/loader.conf might be helpful too.'
|
||||||
@ -60,8 +66,14 @@ if [ "$PLATFORM" = "OpenBSD" ] ; then
|
|||||||
DONE=1
|
DONE=1
|
||||||
fi
|
fi
|
||||||
if [ "$PLATFORM" = "DragonFly" ] ; then
|
if [ "$PLATFORM" = "DragonFly" ] ; then
|
||||||
echo
|
#/sbin/sysctl kern.corefile=/dev/null
|
||||||
echo 'System security features cannot be disabled on DragonFly.'
|
#echo Settings applied.
|
||||||
|
cat <<EOF
|
||||||
|
In order to suppress core file generation during fuzzing it is recommended to set
|
||||||
|
me:\\
|
||||||
|
:coredumpsize=0:
|
||||||
|
in the ~/.login_conf file for the user used for fuzzing.
|
||||||
|
EOF
|
||||||
DONE=1
|
DONE=1
|
||||||
fi
|
fi
|
||||||
if [ "$PLATFORM" = "NetBSD" ] ; then
|
if [ "$PLATFORM" = "NetBSD" ] ; then
|
||||||
@ -88,7 +100,7 @@ fi
|
|||||||
if [ "$PLATFORM" = "Haiku" ] ; then
|
if [ "$PLATFORM" = "Haiku" ] ; then
|
||||||
SETTINGS=~/config/settings/system/debug_server/settings
|
SETTINGS=~/config/settings/system/debug_server/settings
|
||||||
[ -r ${SETTINGS} ] && grep -qE "default_action\s+kill" ${SETTINGS} && { echo "Nothing to do"; } || { \
|
[ -r ${SETTINGS} ] && grep -qE "default_action\s+kill" ${SETTINGS} && { echo "Nothing to do"; } || { \
|
||||||
echo We change the debug_server default_action from user to silenty kill; \
|
echo We change the debug_server default_action from user to silently kill; \
|
||||||
[ ! -r ${SETTINGS} ] && echo "default_action kill" >${SETTINGS} || { mv ${SETTINGS} s.tmp; sed -e "s/default_action\s\s*user/default_action kill/" s.tmp > ${SETTINGS}; rm s.tmp; }; \
|
[ ! -r ${SETTINGS} ] && echo "default_action kill" >${SETTINGS} || { mv ${SETTINGS} s.tmp; sed -e "s/default_action\s\s*user/default_action kill/" s.tmp > ${SETTINGS}; rm s.tmp; }; \
|
||||||
echo Settings applied.; \
|
echo Settings applied.; \
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
#include "libhfcommon/util.h"
|
#include "libhfcommon/util.h"
|
||||||
|
|
||||||
#define PROG_NAME "honggfuzz"
|
#define PROG_NAME "honggfuzz"
|
||||||
#define PROG_VERSION "2.3"
|
#define PROG_VERSION "2.4"
|
||||||
|
|
||||||
/* Name of the template which will be replaced with the proper name of the file */
|
/* Name of the template which will be replaced with the proper name of the file */
|
||||||
#define _HF_FILE_PLACEHOLDER "___FILE___"
|
#define _HF_FILE_PLACEHOLDER "___FILE___"
|
||||||
@ -208,6 +208,7 @@ typedef struct {
|
|||||||
const char* crashDir;
|
const char* crashDir;
|
||||||
const char* covDirNew;
|
const char* covDirNew;
|
||||||
bool saveUnique;
|
bool saveUnique;
|
||||||
|
bool saveSmaller;
|
||||||
size_t dynfileqMaxSz;
|
size_t dynfileqMaxSz;
|
||||||
size_t dynfileqCnt;
|
size_t dynfileqCnt;
|
||||||
dynfile_t* dynfileqCurrent;
|
dynfile_t* dynfileqCurrent;
|
||||||
@ -279,9 +280,9 @@ typedef struct {
|
|||||||
cmpfeedback_t* cmpFeedbackMap;
|
cmpfeedback_t* cmpFeedbackMap;
|
||||||
int cmpFeedbackFd;
|
int cmpFeedbackFd;
|
||||||
bool cmpFeedback;
|
bool cmpFeedback;
|
||||||
const char* blacklistFile;
|
const char* blocklistFile;
|
||||||
uint64_t* blacklist;
|
uint64_t* blocklist;
|
||||||
size_t blacklistCnt;
|
size_t blocklistCnt;
|
||||||
bool skipFeedbackOnTimeout;
|
bool skipFeedbackOnTimeout;
|
||||||
uint64_t maxCov[4];
|
uint64_t maxCov[4];
|
||||||
dynFileMethod_t dynFileMethod;
|
dynFileMethod_t dynFileMethod;
|
||||||
|
@ -77,11 +77,11 @@ static inline uint64_t util_rndGet(uint64_t min, uint64_t max) {
|
|||||||
}
|
}
|
||||||
static inline uint64_t util_rnd64() { return rand_below(afl_struct, 1 << 30); }
|
static inline uint64_t util_rnd64() { return rand_below(afl_struct, 1 << 30); }
|
||||||
|
|
||||||
static inline size_t input_getRandomInputAsBuf(run_t *run, const uint8_t **buf) {
|
static inline const uint8_t* input_getRandomInputAsBuf(run_t* run, size_t* len) {
|
||||||
*buf = queue_input;
|
*len = queue_input_size;
|
||||||
run->dynfile->data = queue_input;
|
run->dynfile->data = queue_input;
|
||||||
run->dynfile->size = queue_input_size;
|
run->dynfile->size = queue_input_size;
|
||||||
return queue_input_size;
|
return queue_input;
|
||||||
}
|
}
|
||||||
static inline void input_setSize(run_t* run, size_t sz) {
|
static inline void input_setSize(run_t* run, size_t sz) {
|
||||||
run->dynfile->size = sz;
|
run->dynfile->size = sz;
|
||||||
|
@ -1 +0,0 @@
|
|||||||
.
|
|
3
custom_mutators/honggfuzz/libhfcommon/common.h
Normal file
3
custom_mutators/honggfuzz/libhfcommon/common.h
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#ifndef LOG_E
|
||||||
|
#define LOG_E LOG_F
|
||||||
|
#endif
|
File diff suppressed because it is too large
Load Diff
10
custom_mutators/rust/.gitignore
vendored
Normal file
10
custom_mutators/rust/.gitignore
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# Generated by Cargo
|
||||||
|
# will have compiled files and executables
|
||||||
|
/target/
|
||||||
|
|
||||||
|
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||||
|
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||||
|
Cargo.lock
|
||||||
|
|
||||||
|
# These are backup files generated by rustfmt
|
||||||
|
**/*.rs.bk
|
8
custom_mutators/rust/Cargo.toml
Normal file
8
custom_mutators/rust/Cargo.toml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
[workspace]
|
||||||
|
members = [
|
||||||
|
"custom_mutator-sys",
|
||||||
|
"custom_mutator",
|
||||||
|
"example",
|
||||||
|
# Lain needs a nightly toolchain
|
||||||
|
# "example_lain",
|
||||||
|
]
|
11
custom_mutators/rust/README.md
Normal file
11
custom_mutators/rust/README.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# Rust Custom Mutators
|
||||||
|
|
||||||
|
Bindings to create custom mutators in Rust.
|
||||||
|
|
||||||
|
These bindings are documented with rustdoc. To view the documentation run
|
||||||
|
```cargo doc -p custom_mutator --open```.
|
||||||
|
|
||||||
|
A minimal example can be found in `example`. Build it using `cargo build --example example_mutator`.
|
||||||
|
|
||||||
|
An example using [lain](https://github.com/microsoft/lain) for structured fuzzing can be found in `example_lain`.
|
||||||
|
Since lain requires a nightly rust toolchain, you need to set one up before you can play with it.
|
12
custom_mutators/rust/custom_mutator-sys/Cargo.toml
Normal file
12
custom_mutators/rust/custom_mutator-sys/Cargo.toml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
[package]
|
||||||
|
name = "custom_mutator-sys"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Julius Hohnerlein <julihoh@users.noreply.github.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
bindgen = "0.56"
|
42
custom_mutators/rust/custom_mutator-sys/build.rs
Normal file
42
custom_mutators/rust/custom_mutator-sys/build.rs
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
extern crate bindgen;
|
||||||
|
|
||||||
|
use std::env;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
// this code is largely taken straight from the handbook: https://github.com/fitzgen/bindgen-tutorial-bzip2-sys
|
||||||
|
fn main() {
|
||||||
|
// Tell cargo to invalidate the built crate whenever the wrapper changes
|
||||||
|
println!("cargo:rerun-if-changed=wrapper.h");
|
||||||
|
|
||||||
|
// The bindgen::Builder is the main entry point
|
||||||
|
// to bindgen, and lets you build up options for
|
||||||
|
// the resulting bindings.
|
||||||
|
let bindings = bindgen::Builder::default()
|
||||||
|
// The input header we would like to generate
|
||||||
|
// bindings for.
|
||||||
|
.header("wrapper.h")
|
||||||
|
.whitelist_type("afl_state_t")
|
||||||
|
.blacklist_type(r"u\d+")
|
||||||
|
.opaque_type(r"_.*")
|
||||||
|
.opaque_type("FILE")
|
||||||
|
.opaque_type("in_addr(_t)?")
|
||||||
|
.opaque_type("in_port(_t)?")
|
||||||
|
.opaque_type("sa_family(_t)?")
|
||||||
|
.opaque_type("sockaddr_in(_t)?")
|
||||||
|
.opaque_type("time_t")
|
||||||
|
.rustfmt_bindings(true)
|
||||||
|
.size_t_is_usize(true)
|
||||||
|
// Tell cargo to invalidate the built crate whenever any of the
|
||||||
|
// included header files changed.
|
||||||
|
.parse_callbacks(Box::new(bindgen::CargoCallbacks))
|
||||||
|
// Finish the builder and generate the bindings.
|
||||||
|
.generate()
|
||||||
|
// Unwrap the Result and panic on failure.
|
||||||
|
.expect("Unable to generate bindings");
|
||||||
|
|
||||||
|
// Write the bindings to the $OUT_DIR/bindings.rs file.
|
||||||
|
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
|
||||||
|
bindings
|
||||||
|
.write_to_file(out_path.join("bindings.rs"))
|
||||||
|
.expect("Couldn't write bindings!");
|
||||||
|
}
|
5
custom_mutators/rust/custom_mutator-sys/src/lib.rs
Normal file
5
custom_mutators/rust/custom_mutator-sys/src/lib.rs
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#![allow(non_upper_case_globals)]
|
||||||
|
#![allow(non_camel_case_types)]
|
||||||
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
|
4
custom_mutators/rust/custom_mutator-sys/wrapper.h
Normal file
4
custom_mutators/rust/custom_mutator-sys/wrapper.h
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
#include "../../../include/afl-fuzz.h"
|
||||||
|
#include "../../../include/common.h"
|
||||||
|
#include "../../../include/config.h"
|
||||||
|
#include "../../../include/debug.h"
|
13
custom_mutators/rust/custom_mutator/Cargo.toml
Normal file
13
custom_mutators/rust/custom_mutator/Cargo.toml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
[package]
|
||||||
|
name = "custom_mutator"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Julius Hohnerlein <julihoh@users.noreply.github.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[features]
|
||||||
|
afl_internals = ["custom_mutator-sys"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
custom_mutator-sys = { path = "../custom_mutator-sys", optional=true }
|
634
custom_mutators/rust/custom_mutator/src/lib.rs
Normal file
634
custom_mutators/rust/custom_mutator/src/lib.rs
Normal file
@ -0,0 +1,634 @@
|
|||||||
|
//! Somewhat safe and somewhat ergonomic bindings for creating [AFL++](https://github.com/AFLplusplus/AFLplusplus) [custom mutators](https://github.com/AFLplusplus/AFLplusplus/blob/stable/docs/custom_mutators.md) in Rust.
|
||||||
|
//!
|
||||||
|
//! # Usage
|
||||||
|
//! AFL++ custom mutators are expected to be dynamic libraries which expose a set of symbols.
|
||||||
|
//! Check out [`CustomMutator`] to see which functions of the API are supported.
|
||||||
|
//! Then use [`export_mutator`] to export the correct symbols for your mutator.
|
||||||
|
//! In order to use the mutator, your crate needs to be a library crate and have a `crate-type` of `cdylib`.
|
||||||
|
//! Putting
|
||||||
|
//! ```yaml
|
||||||
|
//! [lib]
|
||||||
|
//! crate-type = ["cdylib"]
|
||||||
|
//! ```
|
||||||
|
//! into your `Cargo.toml` should do the trick.
|
||||||
|
//! The final executable can be found in `target/(debug|release)/your_crate_name.so`.
|
||||||
|
//! # Example
|
||||||
|
//! See [`export_mutator`] for an example.
|
||||||
|
//!
|
||||||
|
//! # On `panic`s
|
||||||
|
//! This binding is panic-safe in that it will prevent panics from unwinding into AFL++. Any panic will `abort` at the boundary between the custom mutator and AFL++.
|
||||||
|
//!
|
||||||
|
//! # Access to AFL++ internals
|
||||||
|
//! This crate has an optional feature "afl_internals", which gives access to AFL++'s internal state.
|
||||||
|
//! The state is passed to [`CustomMutator::init`], when the feature is activated.
|
||||||
|
//!
|
||||||
|
//! _This is completely unsafe and uses automatically generated types extracted from the AFL++ source._
|
||||||
|
use std::{ffi::CStr, fmt::Debug};
|
||||||
|
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub use custom_mutator_sys::afl_state;
|
||||||
|
|
||||||
|
#[allow(unused_variables)]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub trait RawCustomMutator {
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
fn init(afl: &'static afl_state, seed: c_uint) -> Self
|
||||||
|
where
|
||||||
|
Self: Sized;
|
||||||
|
#[cfg(not(feature = "afl_internals"))]
|
||||||
|
fn init(seed: u32) -> Self
|
||||||
|
where
|
||||||
|
Self: Sized;
|
||||||
|
|
||||||
|
fn fuzz<'b, 's: 'b>(
|
||||||
|
&'s mut self,
|
||||||
|
buffer: &'b mut [u8],
|
||||||
|
add_buff: Option<&[u8]>,
|
||||||
|
max_size: usize,
|
||||||
|
) -> Option<&'b [u8]>;
|
||||||
|
|
||||||
|
fn fuzz_count(&mut self, buffer: &[u8]) -> u32 {
|
||||||
|
1
|
||||||
|
}
|
||||||
|
|
||||||
|
fn queue_new_entry(&mut self, filename_new_queue: &CStr, _filename_orig_queue: Option<&CStr>) {}
|
||||||
|
|
||||||
|
fn queue_get(&mut self, filename: &CStr) -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn describe(&mut self, max_description: usize) -> Option<&CStr> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn introspection(&mut self) -> Option<&CStr> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/*fn post_process(&self, buffer: &[u8], unsigned char **out_buf)-> usize;
|
||||||
|
int afl_custom_init_trim(&self, buffer: &[u8]);
|
||||||
|
size_t afl_custom_trim(&self, unsigned char **out_buf);
|
||||||
|
int afl_custom_post_trim(&self, unsigned char success);
|
||||||
|
size_t afl_custom_havoc_mutation(&self, buffer: &[u8], unsigned char **out_buf, size_t max_size);
|
||||||
|
unsigned char afl_custom_havoc_mutation_probability(&self);*/
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrappers for the custom mutator which provide the bridging between the C API and CustomMutator.
|
||||||
|
/// These wrappers are not intended to be used directly, rather export_mutator will use them to publish the custom mutator C API.
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub mod wrappers {
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
use custom_mutator_sys::afl_state;
|
||||||
|
|
||||||
|
use core::slice;
|
||||||
|
use std::{
|
||||||
|
any::Any,
|
||||||
|
convert::TryInto,
|
||||||
|
ffi::{c_void, CStr},
|
||||||
|
mem::ManuallyDrop,
|
||||||
|
os::raw::c_char,
|
||||||
|
panic::catch_unwind,
|
||||||
|
process::abort,
|
||||||
|
ptr::null,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::RawCustomMutator;
|
||||||
|
|
||||||
|
/// A structure to be used as the data pointer for our custom mutator. This was used as additional storage and is kept for now in case its needed later.
|
||||||
|
/// Also has some convenience functions for FFI conversions (from and to ptr) and tries to make misuse hard (see [`FFIContext::from`]).
|
||||||
|
struct FFIContext<M: RawCustomMutator> {
|
||||||
|
mutator: M,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: RawCustomMutator> FFIContext<M> {
|
||||||
|
fn from(ptr: *mut c_void) -> ManuallyDrop<Box<Self>> {
|
||||||
|
assert!(!ptr.is_null());
|
||||||
|
ManuallyDrop::new(unsafe { Box::from_raw(ptr as *mut Self) })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn into_ptr(self: Box<Self>) -> *const c_void {
|
||||||
|
Box::into_raw(self) as *const c_void
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
fn new(afl: &'static afl_state, seed: u32) -> Box<Self> {
|
||||||
|
Box::new(Self {
|
||||||
|
mutator: M::init(afl, seed),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "afl_internals"))]
|
||||||
|
fn new(seed: u32) -> Box<Self> {
|
||||||
|
Box::new(Self {
|
||||||
|
mutator: M::init(seed),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// panic handler called for every panic
|
||||||
|
fn panic_handler(method: &str, panic_info: Box<dyn Any + Send + 'static>) -> ! {
|
||||||
|
use std::ops::Deref;
|
||||||
|
let cause = panic_info
|
||||||
|
.downcast_ref::<String>()
|
||||||
|
.map(String::deref)
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
panic_info
|
||||||
|
.downcast_ref::<&str>()
|
||||||
|
.copied()
|
||||||
|
.unwrap_or("<cause unknown>")
|
||||||
|
});
|
||||||
|
eprintln!("A panic occurred at {}: {}", method, cause);
|
||||||
|
abort()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal function used in the macro
|
||||||
|
#[cfg(not(feature = "afl_internals"))]
|
||||||
|
pub fn afl_custom_init_<M: RawCustomMutator>(seed: u32) -> *const c_void {
|
||||||
|
match catch_unwind(|| FFIContext::<M>::new(seed).into_ptr()) {
|
||||||
|
Ok(ret) => ret,
|
||||||
|
Err(err) => panic_handler("afl_custom_init", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal function used in the macro
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
pub fn afl_custom_init_<M: RawCustomMutator>(
|
||||||
|
afl: Option<&'static afl_state>,
|
||||||
|
seed: u32,
|
||||||
|
) -> *const c_void {
|
||||||
|
match catch_unwind(|| {
|
||||||
|
let afl = afl.expect("mutator func called with NULL afl");
|
||||||
|
FFIContext::<M>::new(afl, seed).into_ptr()
|
||||||
|
}) {
|
||||||
|
Ok(ret) => ret,
|
||||||
|
Err(err) => panic_handler("afl_custom_init", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal function used in the macro
|
||||||
|
pub unsafe fn afl_custom_fuzz_<M: RawCustomMutator>(
|
||||||
|
data: *mut c_void,
|
||||||
|
buf: *mut u8,
|
||||||
|
buf_size: usize,
|
||||||
|
out_buf: *mut *const u8,
|
||||||
|
add_buf: *mut u8,
|
||||||
|
add_buf_size: usize,
|
||||||
|
max_size: usize,
|
||||||
|
) -> usize {
|
||||||
|
match catch_unwind(|| {
|
||||||
|
let mut context = FFIContext::<M>::from(data);
|
||||||
|
if buf.is_null() {
|
||||||
|
panic!("null buf passed to afl_custom_fuzz")
|
||||||
|
}
|
||||||
|
if out_buf.is_null() {
|
||||||
|
panic!("null out_buf passed to afl_custom_fuzz")
|
||||||
|
}
|
||||||
|
let buff_slice = slice::from_raw_parts_mut(buf, buf_size);
|
||||||
|
let add_buff_slice = if add_buf.is_null() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(slice::from_raw_parts(add_buf, add_buf_size))
|
||||||
|
};
|
||||||
|
match context
|
||||||
|
.mutator
|
||||||
|
.fuzz(buff_slice, add_buff_slice, max_size.try_into().unwrap())
|
||||||
|
{
|
||||||
|
Some(buffer) => {
|
||||||
|
*out_buf = buffer.as_ptr();
|
||||||
|
buffer.len().try_into().unwrap()
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
// return the input buffer with 0-length to let AFL skip this mutation attempt
|
||||||
|
*out_buf = buf;
|
||||||
|
0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}) {
|
||||||
|
Ok(ret) => ret,
|
||||||
|
Err(err) => panic_handler("afl_custom_fuzz", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal function used in the macro
|
||||||
|
pub unsafe fn afl_custom_fuzz_count_<M: RawCustomMutator>(
|
||||||
|
data: *mut c_void,
|
||||||
|
buf: *const u8,
|
||||||
|
buf_size: usize,
|
||||||
|
) -> u32 {
|
||||||
|
match catch_unwind(|| {
|
||||||
|
let mut context = FFIContext::<M>::from(data);
|
||||||
|
if buf.is_null() {
|
||||||
|
panic!("null buf passed to afl_custom_fuzz")
|
||||||
|
}
|
||||||
|
let buf_slice = slice::from_raw_parts(buf, buf_size);
|
||||||
|
// see https://doc.rust-lang.org/nomicon/borrow-splitting.html
|
||||||
|
let ctx = &mut **context;
|
||||||
|
let mutator = &mut ctx.mutator;
|
||||||
|
mutator.fuzz_count(buf_slice)
|
||||||
|
}) {
|
||||||
|
Ok(ret) => ret,
|
||||||
|
Err(err) => panic_handler("afl_custom_fuzz_count", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal function used in the macro
|
||||||
|
pub fn afl_custom_queue_new_entry_<M: RawCustomMutator>(
|
||||||
|
data: *mut c_void,
|
||||||
|
filename_new_queue: *const c_char,
|
||||||
|
filename_orig_queue: *const c_char,
|
||||||
|
) {
|
||||||
|
match catch_unwind(|| {
|
||||||
|
let mut context = FFIContext::<M>::from(data);
|
||||||
|
if filename_new_queue.is_null() {
|
||||||
|
panic!("received null filename_new_queue in afl_custom_queue_new_entry");
|
||||||
|
}
|
||||||
|
let filename_new_queue = unsafe { CStr::from_ptr(filename_new_queue) };
|
||||||
|
let filename_orig_queue = if !filename_orig_queue.is_null() {
|
||||||
|
Some(unsafe { CStr::from_ptr(filename_orig_queue) })
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
context
|
||||||
|
.mutator
|
||||||
|
.queue_new_entry(filename_new_queue, filename_orig_queue);
|
||||||
|
}) {
|
||||||
|
Ok(ret) => ret,
|
||||||
|
Err(err) => panic_handler("afl_custom_queue_new_entry", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal function used in the macro
|
||||||
|
pub unsafe fn afl_custom_deinit_<M: RawCustomMutator>(data: *mut c_void) {
|
||||||
|
match catch_unwind(|| {
|
||||||
|
// drop the context
|
||||||
|
ManuallyDrop::into_inner(FFIContext::<M>::from(data));
|
||||||
|
}) {
|
||||||
|
Ok(ret) => ret,
|
||||||
|
Err(err) => panic_handler("afl_custom_deinit", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal function used in the macro
|
||||||
|
pub fn afl_custom_introspection_<M: RawCustomMutator>(data: *mut c_void) -> *const c_char {
|
||||||
|
match catch_unwind(|| {
|
||||||
|
let mut context = FFIContext::<M>::from(data);
|
||||||
|
if let Some(res) = context.mutator.introspection() {
|
||||||
|
res.as_ptr()
|
||||||
|
} else {
|
||||||
|
null()
|
||||||
|
}
|
||||||
|
}) {
|
||||||
|
Ok(ret) => ret,
|
||||||
|
Err(err) => panic_handler("afl_custom_introspection", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal function used in the macro
|
||||||
|
pub fn afl_custom_describe_<M: RawCustomMutator>(
|
||||||
|
data: *mut c_void,
|
||||||
|
max_description_len: usize,
|
||||||
|
) -> *const c_char {
|
||||||
|
match catch_unwind(|| {
|
||||||
|
let mut context = FFIContext::<M>::from(data);
|
||||||
|
if let Some(res) = context.mutator.describe(max_description_len) {
|
||||||
|
res.as_ptr()
|
||||||
|
} else {
|
||||||
|
null()
|
||||||
|
}
|
||||||
|
}) {
|
||||||
|
Ok(ret) => ret,
|
||||||
|
Err(err) => panic_handler("afl_custom_describe", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal function used in the macro
|
||||||
|
pub fn afl_custom_queue_get_<M: RawCustomMutator>(
|
||||||
|
data: *mut c_void,
|
||||||
|
filename: *const c_char,
|
||||||
|
) -> u8 {
|
||||||
|
match catch_unwind(|| {
|
||||||
|
let mut context = FFIContext::<M>::from(data);
|
||||||
|
assert!(!filename.is_null());
|
||||||
|
|
||||||
|
context
|
||||||
|
.mutator
|
||||||
|
.queue_get(unsafe { CStr::from_ptr(filename) }) as u8
|
||||||
|
}) {
|
||||||
|
Ok(ret) => ret,
|
||||||
|
Err(err) => panic_handler("afl_custom_queue_get", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// exports the given Mutator as a custom mutator as the C interface that AFL++ expects.
|
||||||
|
/// It is not possible to call this macro multiple times, because it would define the custom mutator symbols multiple times.
|
||||||
|
/// # Example
|
||||||
|
/// ```
|
||||||
|
/// # #[macro_use] extern crate custom_mutator;
|
||||||
|
/// # #[cfg(feature = "afl_internals")]
|
||||||
|
/// # use custom_mutator::afl_state;
|
||||||
|
/// # use custom_mutator::CustomMutator;
|
||||||
|
/// struct MyMutator;
|
||||||
|
/// impl CustomMutator for MyMutator {
|
||||||
|
/// /// ...
|
||||||
|
/// # type Error = ();
|
||||||
|
/// # #[cfg(feature = "afl_internals")]
|
||||||
|
/// # fn init(_afl_state: &afl_state, _seed: u32) -> Result<Self,()> {unimplemented!()}
|
||||||
|
/// # #[cfg(not(feature = "afl_internals"))]
|
||||||
|
/// # fn init(_seed: u32) -> Result<Self, Self::Error> {unimplemented!()}
|
||||||
|
/// # fn fuzz<'b,'s:'b>(&'s mut self, _buffer: &'b mut [u8], _add_buff: Option<&[u8]>, _max_size: usize) -> Result<Option<&'b [u8]>, Self::Error> {unimplemented!()}
|
||||||
|
/// }
|
||||||
|
/// export_mutator!(MyMutator);
|
||||||
|
/// ```
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! export_mutator {
|
||||||
|
($mutator_type:ty) => {
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn afl_custom_init(
|
||||||
|
afl: ::std::option::Option<&'static $crate::afl_state>,
|
||||||
|
seed: ::std::os::raw::c_uint,
|
||||||
|
) -> *const ::std::os::raw::c_void {
|
||||||
|
$crate::wrappers::afl_custom_init_::<$mutator_type>(afl, seed as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "afl_internals"))]
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn afl_custom_init(
|
||||||
|
_afl: *const ::std::os::raw::c_void,
|
||||||
|
seed: ::std::os::raw::c_uint,
|
||||||
|
) -> *const ::std::os::raw::c_void {
|
||||||
|
$crate::wrappers::afl_custom_init_::<$mutator_type>(seed as u32)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn afl_custom_fuzz_count(
|
||||||
|
data: *mut ::std::os::raw::c_void,
|
||||||
|
buf: *const u8,
|
||||||
|
buf_size: usize,
|
||||||
|
) -> u32 {
|
||||||
|
unsafe {
|
||||||
|
$crate::wrappers::afl_custom_fuzz_count_::<$mutator_type>(data, buf, buf_size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn afl_custom_fuzz(
|
||||||
|
data: *mut ::std::os::raw::c_void,
|
||||||
|
buf: *mut u8,
|
||||||
|
buf_size: usize,
|
||||||
|
out_buf: *mut *const u8,
|
||||||
|
add_buf: *mut u8,
|
||||||
|
add_buf_size: usize,
|
||||||
|
max_size: usize,
|
||||||
|
) -> usize {
|
||||||
|
unsafe {
|
||||||
|
$crate::wrappers::afl_custom_fuzz_::<$mutator_type>(
|
||||||
|
data,
|
||||||
|
buf,
|
||||||
|
buf_size,
|
||||||
|
out_buf,
|
||||||
|
add_buf,
|
||||||
|
add_buf_size,
|
||||||
|
max_size,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn afl_custom_queue_new_entry(
|
||||||
|
data: *mut ::std::os::raw::c_void,
|
||||||
|
filename_new_queue: *const ::std::os::raw::c_char,
|
||||||
|
filename_orig_queue: *const ::std::os::raw::c_char,
|
||||||
|
) {
|
||||||
|
$crate::wrappers::afl_custom_queue_new_entry_::<$mutator_type>(
|
||||||
|
data,
|
||||||
|
filename_new_queue,
|
||||||
|
filename_orig_queue,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn afl_custom_queue_get(
|
||||||
|
data: *mut ::std::os::raw::c_void,
|
||||||
|
filename: *const ::std::os::raw::c_char,
|
||||||
|
) -> u8 {
|
||||||
|
$crate::wrappers::afl_custom_queue_get_::<$mutator_type>(data, filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn afl_custom_introspection(
|
||||||
|
data: *mut ::std::os::raw::c_void,
|
||||||
|
) -> *const ::std::os::raw::c_char {
|
||||||
|
$crate::wrappers::afl_custom_introspection_::<$mutator_type>(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn afl_custom_describe(
|
||||||
|
data: *mut ::std::os::raw::c_void,
|
||||||
|
max_description_len: usize,
|
||||||
|
) -> *const ::std::os::raw::c_char {
|
||||||
|
$crate::wrappers::afl_custom_describe_::<$mutator_type>(data, max_description_len)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub extern "C" fn afl_custom_deinit(data: *mut ::std::os::raw::c_void) {
|
||||||
|
unsafe { $crate::wrappers::afl_custom_deinit_::<$mutator_type>(data) }
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
/// this sanity test is supposed to just find out whether an empty mutator being exported by the macro compiles
|
||||||
|
mod sanity_test {
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
use super::afl_state;
|
||||||
|
|
||||||
|
use super::{export_mutator, RawCustomMutator};
|
||||||
|
|
||||||
|
struct ExampleMutator;
|
||||||
|
|
||||||
|
impl RawCustomMutator for ExampleMutator {
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
fn init(_afl: &afl_state, _seed: u32) -> Self {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "afl_internals"))]
|
||||||
|
fn init(_seed: u32) -> Self {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fuzz<'b, 's: 'b>(
|
||||||
|
&'s mut self,
|
||||||
|
_buffer: &'b mut [u8],
|
||||||
|
_add_buff: Option<&[u8]>,
|
||||||
|
_max_size: usize,
|
||||||
|
) -> Option<&'b [u8]> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export_mutator!(ExampleMutator);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(unused_variables)]
|
||||||
|
/// A custom mutator.
|
||||||
|
/// [`CustomMutator::handle_error`] will be called in case any method returns an [`Result::Err`].
|
||||||
|
pub trait CustomMutator {
|
||||||
|
/// The error type. All methods must return the same error type.
|
||||||
|
type Error: Debug;
|
||||||
|
|
||||||
|
/// The method which handles errors.
|
||||||
|
/// By default, this method will log the error to stderr if the environment variable "`AFL_CUSTOM_MUTATOR_DEBUG`" is set and non-empty.
|
||||||
|
/// After logging the error, execution will continue on a best-effort basis.
|
||||||
|
///
|
||||||
|
/// This default behaviour can be customized by implementing this method.
|
||||||
|
fn handle_error(err: Self::Error) {
|
||||||
|
if std::env::var("AFL_CUSTOM_MUTATOR_DEBUG")
|
||||||
|
.map(|v| !v.is_empty())
|
||||||
|
.unwrap_or(false)
|
||||||
|
{
|
||||||
|
eprintln!("Error in custom mutator: {:?}", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
fn init(afl: &'static afl_state, seed: u32) -> Result<Self, Self::Error>
|
||||||
|
where
|
||||||
|
Self: Sized;
|
||||||
|
|
||||||
|
#[cfg(not(feature = "afl_internals"))]
|
||||||
|
fn init(seed: u32) -> Result<Self, Self::Error>
|
||||||
|
where
|
||||||
|
Self: Sized;
|
||||||
|
|
||||||
|
fn fuzz_count(&mut self, buffer: &[u8]) -> Result<u32, Self::Error> {
|
||||||
|
Ok(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fuzz<'b, 's: 'b>(
|
||||||
|
&'s mut self,
|
||||||
|
buffer: &'b mut [u8],
|
||||||
|
add_buff: Option<&[u8]>,
|
||||||
|
max_size: usize,
|
||||||
|
) -> Result<Option<&'b [u8]>, Self::Error>;
|
||||||
|
|
||||||
|
fn queue_new_entry(
|
||||||
|
&mut self,
|
||||||
|
filename_new_queue: &CStr,
|
||||||
|
filename_orig_queue: Option<&CStr>,
|
||||||
|
) -> Result<(), Self::Error> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn queue_get(&mut self, filename: &CStr) -> Result<bool, Self::Error> {
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn describe(&mut self, max_description: usize) -> Result<Option<&CStr>, Self::Error> {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn introspection(&mut self) -> Result<Option<&CStr>, Self::Error> {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M> RawCustomMutator for M
|
||||||
|
where
|
||||||
|
M: CustomMutator,
|
||||||
|
M::Error: Debug,
|
||||||
|
{
|
||||||
|
#[cfg(feature = "afl_internals")]
|
||||||
|
fn init(afl: &'static afl_state, seed: u32) -> Self
|
||||||
|
where
|
||||||
|
Self: Sized,
|
||||||
|
{
|
||||||
|
match Self::init(afl, seed) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
Self::handle_error(e);
|
||||||
|
panic!("Error in afl_custom_init")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "afl_internals"))]
|
||||||
|
fn init(seed: u32) -> Self
|
||||||
|
where
|
||||||
|
Self: Sized,
|
||||||
|
{
|
||||||
|
match Self::init(seed) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
Self::handle_error(e);
|
||||||
|
panic!("Error in afl_custom_init")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fuzz_count(&mut self, buffer: &[u8]) -> u32 {
|
||||||
|
match self.fuzz_count(buffer) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
Self::handle_error(e);
|
||||||
|
0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fuzz<'b, 's: 'b>(
|
||||||
|
&'s mut self,
|
||||||
|
buffer: &'b mut [u8],
|
||||||
|
add_buff: Option<&[u8]>,
|
||||||
|
max_size: usize,
|
||||||
|
) -> Option<&'b [u8]> {
|
||||||
|
match self.fuzz(buffer, add_buff, max_size) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
Self::handle_error(e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn queue_new_entry(&mut self, filename_new_queue: &CStr, filename_orig_queue: Option<&CStr>) {
|
||||||
|
match self.queue_new_entry(filename_new_queue, filename_orig_queue) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
Self::handle_error(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn queue_get(&mut self, filename: &CStr) -> bool {
|
||||||
|
match self.queue_get(filename) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
Self::handle_error(e);
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn describe(&mut self, max_description: usize) -> Option<&CStr> {
|
||||||
|
match self.describe(max_description) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
Self::handle_error(e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn introspection(&mut self) -> Option<&CStr> {
|
||||||
|
match self.introspection() {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
Self::handle_error(e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
15
custom_mutators/rust/example/Cargo.toml
Normal file
15
custom_mutators/rust/example/Cargo.toml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
[package]
|
||||||
|
name = "example_mutator"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Julius Hohnerlein <julihoh@users.noreply.github.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
custom_mutator = { path = "../custom_mutator" }
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "example_mutator"
|
||||||
|
path = "./src/example_mutator.rs"
|
||||||
|
crate-type = ["cdylib"]
|
49
custom_mutators/rust/example/src/example_mutator.rs
Normal file
49
custom_mutators/rust/example/src/example_mutator.rs
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
#![allow(unused_variables)]
|
||||||
|
|
||||||
|
use custom_mutator::{export_mutator, CustomMutator};
|
||||||
|
|
||||||
|
struct ExampleMutator;
|
||||||
|
|
||||||
|
impl CustomMutator for ExampleMutator {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn init(seed: u32) -> Result<Self, Self::Error> {
|
||||||
|
Ok(Self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fuzz<'b, 's: 'b>(
|
||||||
|
&'s mut self,
|
||||||
|
buffer: &'b mut [u8],
|
||||||
|
add_buff: Option<&[u8]>,
|
||||||
|
max_size: usize,
|
||||||
|
) -> Result<Option<&'b [u8]>, Self::Error> {
|
||||||
|
buffer.reverse();
|
||||||
|
Ok(Some(buffer))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct OwnBufferExampleMutator {
|
||||||
|
own_buffer: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CustomMutator for OwnBufferExampleMutator {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn init(seed: u32) -> Result<Self, Self::Error> {
|
||||||
|
Ok(Self {
|
||||||
|
own_buffer: Vec::new(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fuzz<'b, 's: 'b>(
|
||||||
|
&'s mut self,
|
||||||
|
buffer: &'b mut [u8],
|
||||||
|
add_buff: Option<&[u8]>,
|
||||||
|
max_size: usize,
|
||||||
|
) -> Result<Option<&'b [u8]>, ()> {
|
||||||
|
self.own_buffer.reverse();
|
||||||
|
Ok(Some(self.own_buffer.as_slice()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export_mutator!(ExampleMutator);
|
16
custom_mutators/rust/example_lain/Cargo.toml
Normal file
16
custom_mutators/rust/example_lain/Cargo.toml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
[package]
|
||||||
|
name = "example_lain"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Julius Hohnerlein <julihoh@users.noreply.github.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
custom_mutator = { path = "../custom_mutator" }
|
||||||
|
lain="0.5"
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "example_lain"
|
||||||
|
path = "./src/lain_mutator.rs"
|
||||||
|
crate-type = ["cdylib"]
|
1
custom_mutators/rust/example_lain/rust-toolchain
Normal file
1
custom_mutators/rust/example_lain/rust-toolchain
Normal file
@ -0,0 +1 @@
|
|||||||
|
nightly
|
59
custom_mutators/rust/example_lain/src/lain_mutator.rs
Normal file
59
custom_mutators/rust/example_lain/src/lain_mutator.rs
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
use custom_mutator::{export_mutator, CustomMutator};
|
||||||
|
use lain::{
|
||||||
|
mutator::Mutator,
|
||||||
|
prelude::*,
|
||||||
|
rand::{rngs::StdRng, SeedableRng},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Mutatable, NewFuzzed, BinarySerialize)]
|
||||||
|
struct MyStruct {
|
||||||
|
field_1: u8,
|
||||||
|
|
||||||
|
#[lain(bits = 3)]
|
||||||
|
field_2: u8,
|
||||||
|
|
||||||
|
#[lain(bits = 5)]
|
||||||
|
field_3: u8,
|
||||||
|
|
||||||
|
#[lain(min = 5, max = 10000)]
|
||||||
|
field_4: u32,
|
||||||
|
|
||||||
|
#[lain(ignore)]
|
||||||
|
ignored_field: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct LainMutator {
|
||||||
|
mutator: Mutator<StdRng>,
|
||||||
|
buffer: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CustomMutator for LainMutator {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn init(seed: u32) -> Result<Self, ()> {
|
||||||
|
Ok(Self {
|
||||||
|
mutator: Mutator::new(StdRng::seed_from_u64(seed as u64)),
|
||||||
|
buffer: Vec::new(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fuzz<'b, 's: 'b>(
|
||||||
|
&'s mut self,
|
||||||
|
_buffer: &'b mut [u8],
|
||||||
|
_add_buff: Option<&[u8]>,
|
||||||
|
max_size: usize,
|
||||||
|
) -> Result<Option<&'b [u8]>, ()> {
|
||||||
|
// we just sample an instance of MyStruct, ignoring the current input
|
||||||
|
let instance = MyStruct::new_fuzzed(&mut self.mutator, None);
|
||||||
|
let size = instance.serialized_size();
|
||||||
|
if size > max_size {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
self.buffer.clear();
|
||||||
|
self.buffer.reserve(size);
|
||||||
|
instance.binary_serialize::<_, BigEndian>(&mut self.buffer);
|
||||||
|
Ok(Some(self.buffer.as_slice()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export_mutator!(LainMutator);
|
@ -9,45 +9,56 @@ Want to stay in the loop on major new features? Join our mailing list by
|
|||||||
sending a mail to <afl-users+subscribe@googlegroups.com>.
|
sending a mail to <afl-users+subscribe@googlegroups.com>.
|
||||||
|
|
||||||
|
|
||||||
### Version ++3.01a (dev)
|
### Version ++3.10c (release)
|
||||||
- Mac OS ARM64 support
|
- Mac OS ARM64 support
|
||||||
- Android support fixed and updated by Joey Jiaojg - thanks!
|
- Android support fixed and updated by Joey Jiaojg - thanks!
|
||||||
- New selective instrumentation option with __AFL_COVERAGE_* commands
|
- New selective instrumentation option with __AFL_COVERAGE_* commands
|
||||||
to be placed in the source code.
|
to be placed in the source code.
|
||||||
Check out instrumentation/README.instrument_list.md
|
Check out instrumentation/README.instrument_list.md
|
||||||
- afl-fuzz
|
- afl-fuzz
|
||||||
- Making AFL_MAP_SIZE (mostly) obsolete - afl-fuzz now learns on start
|
- Making AFL_MAP_SIZE (mostly) obsolete - afl-fuzz now learns on
|
||||||
the target map size
|
start the target map size
|
||||||
- upgraded cmplog/redqueen: solving for floating point, solving
|
- upgraded cmplog/redqueen: solving for floating point, solving
|
||||||
transformations (e.g. toupper, tolower, to/from hex, xor,
|
transformations (e.g. toupper, tolower, to/from hex, xor,
|
||||||
arithmetics, etc.). this is costly hence new command line option
|
arithmetics, etc.). This is costly hence new command line option
|
||||||
-l that sets the intensity (values 1 to 3). recommended is 1 or 2.
|
`-l` that sets the intensity (values 1 to 3). Recommended is 2.
|
||||||
- added `AFL_CMPLOG_ONLY_NEW` to not use cmplog on initial testcases from
|
- added `AFL_CMPLOG_ONLY_NEW` to not use cmplog on initial seeds
|
||||||
`-i` or resumes (as these have most likely already been done)
|
from `-i` or resumes (these have most likely already been done)
|
||||||
- fix crash for very, very fast targets+systems (thanks to mhlakhani
|
- fix crash for very, very fast targets+systems (thanks to mhlakhani
|
||||||
for reporting)
|
for reporting)
|
||||||
- on restarts (-i)/autoresume (AFL_AUTORESUME) the stats are now
|
- on restarts (`-i`)/autoresume (AFL_AUTORESUME) the stats are now
|
||||||
reloaded and used, thanks to Vimal Joseph for this patch!
|
reloaded and used, thanks to Vimal Joseph for this patch!
|
||||||
- if determinstic mode is active (-D, or -M without -d) then we sync
|
- changed the meaning of '+' of the '-t' option, it now means to
|
||||||
after every queue entry as this can take very long time otherwise
|
auto-calculate the timeout with the value given being the max
|
||||||
|
timeout. The original meaning of skipping timeouts instead of
|
||||||
|
abort is now inherent to the -t option.
|
||||||
|
- if deterministic mode is active (`-D`, or `-M` without `-d`) then
|
||||||
|
we sync after every queue entry as this can take very long time
|
||||||
|
otherwise
|
||||||
|
- added minimum SYNC_TIME to include/config.h (30 minutes default)
|
||||||
- better detection if a target needs a large shared map
|
- better detection if a target needs a large shared map
|
||||||
- fix for -Z
|
- fix for `-Z`
|
||||||
|
- fixed a few crashes
|
||||||
- switched to an even faster RNG
|
- switched to an even faster RNG
|
||||||
- added hghwng's patch for faster trace map analysis
|
- added hghwng's patch for faster trace map analysis
|
||||||
|
- printing suggestions for mistyped `AFL_` env variables
|
||||||
|
- added Rust bindings for custom mutators (thanks @julihoh)
|
||||||
- afl-cc
|
- afl-cc
|
||||||
- allow instrumenting LLVMFuzzerTestOneInput
|
- allow instrumenting LLVMFuzzerTestOneInput
|
||||||
- fixed endless loop for allow/blocklist lines starting with a
|
- fixed endless loop for allow/blocklist lines starting with a
|
||||||
comment (thanks to Zherya for reporting)
|
comment (thanks to Zherya for reporting)
|
||||||
- cmplog/redqueen now also tracks floating point, _ExtInt() + 128bit
|
- cmplog/redqueen now also tracks floating point, _ExtInt() + 128bit
|
||||||
- cmplog/redqueen can now process basic libc++ and libstdc++
|
- cmplog/redqueen can now process basic libc++ and libstdc++
|
||||||
std::string comparisons (though no position or length type variants)
|
std::string comparisons (no position or length type variants)
|
||||||
- added support for __afl_coverage_interesting() for LTO and
|
- added support for __afl_coverage_interesting() for LTO and our
|
||||||
and our own PCGUARD (llvm 10.0.1+), read more about this function
|
own PCGUARD (llvm 10.0.1+), read more about this function and
|
||||||
and selective coverage in instrumentation/README.instrument_list.md
|
selective coverage in instrumentation/README.instrument_list.md
|
||||||
- added AFL_LLVM_INSTRUMENT option NATIVE for native clang pc-guard
|
- added AFL_LLVM_INSTRUMENT option NATIVE for native clang pc-guard
|
||||||
support (less performant than our own), GCC for old afl-gcc and
|
support (less performant than our own), GCC for old afl-gcc and
|
||||||
CLANG for old afl-clang
|
CLANG for old afl-clang
|
||||||
- fixed a potential crash in the LAF feature
|
- fixed a potential crash in the LAF feature
|
||||||
|
- workaround for llvm bitcast lto bug
|
||||||
|
- workaround for llvm 13
|
||||||
- qemuafl
|
- qemuafl
|
||||||
- QASan (address sanitizer for Qemu) ported to qemuafl!
|
- QASan (address sanitizer for Qemu) ported to qemuafl!
|
||||||
See qemu_mode/libqasan/README.md
|
See qemu_mode/libqasan/README.md
|
||||||
@ -55,14 +66,17 @@ sending a mail to <afl-users+subscribe@googlegroups.com>.
|
|||||||
- solved an issue when dumping the memory maps (thanks wizche)
|
- solved an issue when dumping the memory maps (thanks wizche)
|
||||||
- Android support for QASan
|
- Android support for QASan
|
||||||
- unicornafl
|
- unicornafl
|
||||||
- Substential speed gains in python bindings for certain use cases
|
- Substantial speed gains in python bindings for certain use cases
|
||||||
- Improved rust bindings
|
- Improved rust bindings
|
||||||
- Added a new example harness to compare python, c, and rust bindings
|
- Added a new example harness to compare python, c and rust bindings
|
||||||
|
- afl-cmin and afl-showmap now support the -f option
|
||||||
|
- afl_plot now also generates a graph on the discovered edges
|
||||||
- changed default: no memory limit for afl-cmin and afl-cmin.bash
|
- changed default: no memory limit for afl-cmin and afl-cmin.bash
|
||||||
- warn on any _AFL and __AFL env vars
|
- warn on any _AFL and __AFL env vars.
|
||||||
|
- set AFL_IGNORE_UNKNOWN_ENVS to not warn on unknown AFL_... env vars
|
||||||
- added dummy Makefile to instrumentation/
|
- added dummy Makefile to instrumentation/
|
||||||
- Updated utils/afl_frida to be 5% faster, 7% on x86_x64
|
- Updated utils/afl_frida to be 5% faster, 7% on x86_x64
|
||||||
- Added AFL_KILL_SIGNAL env variable (thanks @v-p-b)
|
- Added `AFL_KILL_SIGNAL` env variable (thanks @v-p-b)
|
||||||
- @Edznux added a nice documentation on how to use rpc.statsd with
|
- @Edznux added a nice documentation on how to use rpc.statsd with
|
||||||
afl++ in docs/rpc_statsd.md, thanks!
|
afl++ in docs/rpc_statsd.md, thanks!
|
||||||
|
|
||||||
|
@ -4,6 +4,11 @@ This file describes how you can implement custom mutations to be used in AFL.
|
|||||||
For now, we support C/C++ library and Python module, collectivelly named as the
|
For now, we support C/C++ library and Python module, collectivelly named as the
|
||||||
custom mutator.
|
custom mutator.
|
||||||
|
|
||||||
|
There is also experimental support for Rust in `custom_mutators/rust`.
|
||||||
|
Please refer to that directory for documentation.
|
||||||
|
Run ```cargo doc -p custom_mutator --open``` in that directory to view the
|
||||||
|
documentation in your web browser.
|
||||||
|
|
||||||
Implemented by
|
Implemented by
|
||||||
- C/C++ library (`*.so`): Khaled Yakdan from Code Intelligence (<yakdan@code-intelligence.de>)
|
- C/C++ library (`*.so`): Khaled Yakdan from Code Intelligence (<yakdan@code-intelligence.de>)
|
||||||
- Python module: Christian Holler from Mozilla (<choller@mozilla.com>)
|
- Python module: Christian Holler from Mozilla (<choller@mozilla.com>)
|
||||||
|
122
docs/docs.md
Normal file
122
docs/docs.md
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
# Restructure afl++'s documentation
|
||||||
|
|
||||||
|
## About us
|
||||||
|
|
||||||
|
We are dedicated to everything around fuzzing, our main and most well known
|
||||||
|
contribution is the fuzzer `afl++` which is part of all major Unix
|
||||||
|
distributions (e.g. Debian, Arch, FreeBSD, etc.) and is deployed on Google's
|
||||||
|
oss-fuzz and clusterfuzz. It is rated the top fuzzer on Google's fuzzbench.
|
||||||
|
|
||||||
|
We are four individuals from Europe supported by a large community.
|
||||||
|
|
||||||
|
All our tools are open source.
|
||||||
|
|
||||||
|
## About the afl++ fuzzer project
|
||||||
|
|
||||||
|
afl++ inherited it's documentation from the original Google afl project.
|
||||||
|
Since then it has been massively improved - feature and performance wise -
|
||||||
|
and although the documenation has likewise been continued it has grown out
|
||||||
|
of proportion.
|
||||||
|
The documentation is done by non-natives to the English language, plus
|
||||||
|
none of us has a writer background.
|
||||||
|
|
||||||
|
We see questions on afl++ usage on mailing lists (e.g. afl-users), discord
|
||||||
|
channels, web forums and as issues in our repository.
|
||||||
|
|
||||||
|
This only increases as afl++ has been on the top of Google's fuzzbench
|
||||||
|
statistics (which measures the performance of fuzzers) and is now being
|
||||||
|
integrated in Google's oss-fuzz and clusterfuzz - and is in many Unix
|
||||||
|
packaging repositories, e.g. Debian, FreeBSD, etc.
|
||||||
|
|
||||||
|
afl++ now has 44 (!) documentation files with 13k total lines of content.
|
||||||
|
This is way too much.
|
||||||
|
|
||||||
|
Hence afl++ needs a complete overhaul of it's documentation, both on a
|
||||||
|
organisation/structural level as well as the content.
|
||||||
|
|
||||||
|
Overall the following actions have to be performed:
|
||||||
|
* Create a better structure of documentation so it is easier to find the
|
||||||
|
information that is being looked for, combining and/or splitting up the
|
||||||
|
existing documents as needed.
|
||||||
|
* Rewrite some documentation to remove duplication. Several information is
|
||||||
|
present several times in the documentation. These should be removed to
|
||||||
|
where needed so that we have as little bloat as possible.
|
||||||
|
* The documents have been written and modified by a lot of different people,
|
||||||
|
most of them non-native English speaker. Hence an overall review where
|
||||||
|
parts should be rewritten has to be performed and then the rewrite done.
|
||||||
|
* Create a cheat-sheet for a very short best-setup build and run of afl++
|
||||||
|
* Pictures explain more than 1000 words. We need at least 4 images that
|
||||||
|
explain the workflow with afl++:
|
||||||
|
- the build workflow
|
||||||
|
- the fuzzing workflow
|
||||||
|
- the fuzzing campaign management workflow
|
||||||
|
- the overall workflow that is an overview of the above
|
||||||
|
- maybe more? where the technical writes seems it necessary for
|
||||||
|
understanding.
|
||||||
|
|
||||||
|
Requirements:
|
||||||
|
* Documentation has to be in Markdown format
|
||||||
|
* Images have to be either in SVG or PNG format.
|
||||||
|
* All documentation should be (moved) in(to) docs/
|
||||||
|
|
||||||
|
The project does not require writing new documentation or tutorials beside the
|
||||||
|
cheat sheet. The technical information for the cheat sheet will be provided by
|
||||||
|
us.
|
||||||
|
|
||||||
|
## Metrics
|
||||||
|
|
||||||
|
afl++ is a the highest performant fuzzer publicly available - but is also the
|
||||||
|
most feature rich and complex. With the publicity of afl++' success and
|
||||||
|
deployment in Google projects internally and externally and availability as
|
||||||
|
a package on most Linux distributions we see more and more issues being
|
||||||
|
created and help requests on our Discord channel that would not be
|
||||||
|
necessary if people would have read through all our documentation - which
|
||||||
|
is unrealistic.
|
||||||
|
|
||||||
|
We expect the the new documenation after this project to be cleaner, easier
|
||||||
|
accessible and lighter to digest by our users, resulting in much less
|
||||||
|
help requests. On the other hand the amount of users using afl++ should
|
||||||
|
increase as well as it will be more accessible which would also increase
|
||||||
|
questions again - but overall resulting in a reduction of help requests.
|
||||||
|
|
||||||
|
In numbers: we currently have per week on average 5 issues on Github,
|
||||||
|
10 questions on discord and 1 on mailing lists that would not be necessary
|
||||||
|
with perfect documentation and perfect people.
|
||||||
|
|
||||||
|
We would consider this project a success if afterwards we only have
|
||||||
|
2 issues on Github and 3 questions on discord anymore that would be answered
|
||||||
|
by reading the documentation. The mailing list is usually used by the most
|
||||||
|
novice users and we don't expect any less questions there.
|
||||||
|
|
||||||
|
## Project Budget
|
||||||
|
|
||||||
|
We have zero experience with technical writers, so this is very hard for us
|
||||||
|
to calculate. We expect it to be a lot of work though because of the amount
|
||||||
|
of documentation we have that needs to be restructured and partially rewritten
|
||||||
|
(44 documents with 13k total lines of content).
|
||||||
|
|
||||||
|
We assume the daily rate of a very good and experienced technical writer in
|
||||||
|
times of a pandemic to be ~500$ (according to web research), and calculate
|
||||||
|
the overall amout of work to be around 20 days for everything incl. the
|
||||||
|
graphics (but again - this is basically just guessing).
|
||||||
|
|
||||||
|
Technical Writer 10000$
|
||||||
|
Volunteer stipends 0$ (waved)
|
||||||
|
T-Shirts for the top 10 contributors and helpers to this documentation project:
|
||||||
|
10 afl++ logo t-shirts 20$ each 200$
|
||||||
|
10 shipping cost of t-shirts 10$ each 100$
|
||||||
|
|
||||||
|
Total: 10.300$
|
||||||
|
(in the submission form 10.280$ was entered)
|
||||||
|
|
||||||
|
## Additional Information
|
||||||
|
|
||||||
|
We have participated in Google Summer of Code in 2020 and hope to be selected
|
||||||
|
again in 2021.
|
||||||
|
|
||||||
|
We have no experience with a technical writer, but we will support that person
|
||||||
|
with video calls, chats, emails and messaging, provide all necessary information
|
||||||
|
and write technical contents that is required for the success of this project.
|
||||||
|
It is clear to us that a technical writer knows how to write, but cannot know
|
||||||
|
the technical details in a complex tooling like in afl++. This guidance, input,
|
||||||
|
etc. has to come from us.
|
@ -5,6 +5,10 @@
|
|||||||
users or for some types of custom fuzzing setups. See [README.md](README.md) for the general
|
users or for some types of custom fuzzing setups. See [README.md](README.md) for the general
|
||||||
instruction manual.
|
instruction manual.
|
||||||
|
|
||||||
|
Note that most tools will warn on any unknown AFL environment variables.
|
||||||
|
This is for warning on typos that can happen. If you want to disable this
|
||||||
|
check then set the `AFL_IGNORE_UNKNOWN_ENVS` environment variable.
|
||||||
|
|
||||||
## 1) Settings for all compilers
|
## 1) Settings for all compilers
|
||||||
|
|
||||||
Starting with afl++ 3.0 there is only one compiler: afl-cc
|
Starting with afl++ 3.0 there is only one compiler: afl-cc
|
||||||
@ -18,7 +22,6 @@ To select the different instrumentation modes this can be done by
|
|||||||
`MODE` can be one of `LTO` (afl-clang-lto*), `LLVM` (afl-clang-fast*), `GCC_PLUGIN`
|
`MODE` can be one of `LTO` (afl-clang-lto*), `LLVM` (afl-clang-fast*), `GCC_PLUGIN`
|
||||||
(afl-g*-fast) or `GCC` (afl-gcc/afl-g++).
|
(afl-g*-fast) or `GCC` (afl-gcc/afl-g++).
|
||||||
|
|
||||||
|
|
||||||
Because (with the exception of the --afl-MODE command line option) the
|
Because (with the exception of the --afl-MODE command line option) the
|
||||||
compile-time tools do not accept afl specific command-line options, they
|
compile-time tools do not accept afl specific command-line options, they
|
||||||
make fairly broad use of environmental variables instead:
|
make fairly broad use of environmental variables instead:
|
||||||
@ -448,6 +451,7 @@ checks or alter some of the more exotic semantics of the tool:
|
|||||||
`banner` corresponds to the name of the fuzzer provided through `-M/-S`.
|
`banner` corresponds to the name of the fuzzer provided through `-M/-S`.
|
||||||
`afl_version` corresponds to the currently running afl version (e.g `++3.0c`).
|
`afl_version` corresponds to the currently running afl version (e.g `++3.0c`).
|
||||||
Default (empty/non present) will add no tags to the metrics.
|
Default (empty/non present) will add no tags to the metrics.
|
||||||
|
See [rpc_statsd.md](rpc_statsd.md) for more information.
|
||||||
|
|
||||||
- Setting `AFL_CRASH_EXITCODE` sets the exit code afl treats as crash.
|
- Setting `AFL_CRASH_EXITCODE` sets the exit code afl treats as crash.
|
||||||
For example, if `AFL_CRASH_EXITCODE='-1'` is set, each input resulting
|
For example, if `AFL_CRASH_EXITCODE='-1'` is set, each input resulting
|
||||||
@ -517,7 +521,7 @@ The QEMU wrapper used to instrument binary-only code supports several settings:
|
|||||||
- With `AFL_USE_QASAN` you can enable QEMU AddressSanitizer for dynamically
|
- With `AFL_USE_QASAN` you can enable QEMU AddressSanitizer for dynamically
|
||||||
linked binaries.
|
linked binaries.
|
||||||
|
|
||||||
- With `AFL_QEMU_FORCE_DFL` you force QEMU to ignore the registered singal
|
- With `AFL_QEMU_FORCE_DFL` you force QEMU to ignore the registered signal
|
||||||
handlers of the target.
|
handlers of the target.
|
||||||
|
|
||||||
## 6) Settings for afl-cmin
|
## 6) Settings for afl-cmin
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Remote monitoring with StatsD
|
# Remote monitoring with StatsD
|
||||||
|
|
||||||
StatsD allows you to receive and aggregate metrics from a wide range of application and retransmit them to the backend of your choice.
|
StatsD allows you to receive and aggregate metrics from a wide range of applications and retransmit them to the backend of your choice.
|
||||||
This enables you to create nice and readable dashboards containing all the information you need on your fuzzer instances.
|
This enables you to create nice and readable dashboards containing all the information you need on your fuzzer instances.
|
||||||
No need to write your own statistics parsing system, deploy and maintain it to all your instances, sync with your graph rendering system...
|
No need to write your own statistics parsing system, deploy and maintain it to all your instances, sync with your graph rendering system...
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ For more information on these env vars, check out `docs/env_variables.md`.
|
|||||||
|
|
||||||
The simplest way of using this feature is to use any metric provider and change the host/port of your StatsD daemon,
|
The simplest way of using this feature is to use any metric provider and change the host/port of your StatsD daemon,
|
||||||
with `AFL_STATSD_HOST` and `AFL_STATSD_PORT`, if required (defaults are `localhost` and port `8125`).
|
with `AFL_STATSD_HOST` and `AFL_STATSD_PORT`, if required (defaults are `localhost` and port `8125`).
|
||||||
To get started, here are some instruction with free and open source tools.
|
To get started, here are some instructions with free and open source tools.
|
||||||
The following setup is based on Prometheus, statsd_exporter and Grafana.
|
The following setup is based on Prometheus, statsd_exporter and Grafana.
|
||||||
Grafana here is not mandatory, but gives you some nice graphs and features.
|
Grafana here is not mandatory, but gives you some nice graphs and features.
|
||||||
|
|
||||||
@ -131,7 +131,7 @@ mappings:
|
|||||||
|
|
||||||
Run `docker-compose up -d`.
|
Run `docker-compose up -d`.
|
||||||
|
|
||||||
Everything should be now setup, you are now able to run your fuzzers with
|
Everything should now be setup, you are now able to run your fuzzers with
|
||||||
|
|
||||||
```
|
```
|
||||||
AFL_STATSD_TAGS_FLAVOR=dogstatsd AFL_STATSD=1 afl-fuzz -M test-fuzzer-1 -i i -o o ./bin/my-application @@
|
AFL_STATSD_TAGS_FLAVOR=dogstatsd AFL_STATSD=1 afl-fuzz -M test-fuzzer-1 -i i -o o ./bin/my-application @@
|
||||||
@ -139,5 +139,5 @@ AFL_STATSD_TAGS_FLAVOR=dogstatsd AFL_STATSD=1 afl-fuzz -S test-fuzzer-2 -i i -o
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
This setup may be modified before use in production environment. Depending on your needs: addind passwords, creating volumes for storage,
|
This setup may be modified before use in a production environment. Depending on your needs: adding passwords, creating volumes for storage,
|
||||||
tweaking the metrics gathering to get host metrics (CPU, RAM ...).
|
tweaking the metrics gathering to get host metrics (CPU, RAM ...).
|
||||||
|
@ -570,6 +570,7 @@ typedef struct afl_state {
|
|||||||
blocks_eff_total, /* Blocks subject to effector maps */
|
blocks_eff_total, /* Blocks subject to effector maps */
|
||||||
blocks_eff_select, /* Blocks selected as fuzzable */
|
blocks_eff_select, /* Blocks selected as fuzzable */
|
||||||
start_time, /* Unix start time (ms) */
|
start_time, /* Unix start time (ms) */
|
||||||
|
last_sync_time, /* Time of last sync */
|
||||||
last_path_time, /* Time for most recent path (ms) */
|
last_path_time, /* Time for most recent path (ms) */
|
||||||
last_crash_time, /* Time for most recent crash (ms) */
|
last_crash_time, /* Time for most recent crash (ms) */
|
||||||
last_hang_time; /* Time for most recent hang (ms) */
|
last_hang_time; /* Time for most recent hang (ms) */
|
||||||
@ -649,6 +650,7 @@ typedef struct afl_state {
|
|||||||
u32 cmplog_max_filesize;
|
u32 cmplog_max_filesize;
|
||||||
u32 cmplog_lvl;
|
u32 cmplog_lvl;
|
||||||
u32 colorize_success;
|
u32 colorize_success;
|
||||||
|
u8 cmplog_enable_arith, cmplog_enable_transform;
|
||||||
|
|
||||||
struct afl_pass_stat *pass_stats;
|
struct afl_pass_stat *pass_stats;
|
||||||
struct cmp_map * orig_cmp_map;
|
struct cmp_map * orig_cmp_map;
|
||||||
@ -1070,8 +1072,8 @@ void destroy_extras(afl_state_t *);
|
|||||||
|
|
||||||
void load_stats_file(afl_state_t *);
|
void load_stats_file(afl_state_t *);
|
||||||
void write_setup_file(afl_state_t *, u32, char **);
|
void write_setup_file(afl_state_t *, u32, char **);
|
||||||
void write_stats_file(afl_state_t *, double, double, double);
|
void write_stats_file(afl_state_t *, u32, double, double, double);
|
||||||
void maybe_update_plot_file(afl_state_t *, double, double);
|
void maybe_update_plot_file(afl_state_t *, u32, double, double);
|
||||||
void show_stats(afl_state_t *);
|
void show_stats(afl_state_t *);
|
||||||
void show_init_stats(afl_state_t *);
|
void show_init_stats(afl_state_t *);
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
#define STRINGIFY_VAL_SIZE_MAX (16)
|
#define STRINGIFY_VAL_SIZE_MAX (16)
|
||||||
|
|
||||||
void detect_file_args(char **argv, u8 *prog_in, bool *use_stdin);
|
void detect_file_args(char **argv, u8 *prog_in, bool *use_stdin);
|
||||||
|
void print_suggested_envs(char *mispelled_env);
|
||||||
void check_environment_vars(char **env);
|
void check_environment_vars(char **env);
|
||||||
|
|
||||||
char **argv_cpy_dup(int argc, char **argv);
|
char **argv_cpy_dup(int argc, char **argv);
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
Dominik Maier <mail@dmnk.co>
|
Dominik Maier <mail@dmnk.co>
|
||||||
|
|
||||||
Copyright 2016, 2017 Google Inc. All rights reserved.
|
Copyright 2016, 2017 Google Inc. All rights reserved.
|
||||||
Copyright 2019-2020 AFLplusplus Project. All rights reserved.
|
Copyright 2019-2021 AFLplusplus Project. All rights reserved.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@ -25,8 +25,8 @@
|
|||||||
|
|
||||||
/* Version string: */
|
/* Version string: */
|
||||||
|
|
||||||
// c = release, d = volatile github dev, e = experimental branch
|
// c = release, a = volatile github dev, e = experimental branch
|
||||||
#define VERSION "++3.01a"
|
#define VERSION "++3.10c"
|
||||||
|
|
||||||
/******************************************************
|
/******************************************************
|
||||||
* *
|
* *
|
||||||
@ -42,27 +42,22 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Enable arithmetic compare solving for both path */
|
/* if TRANSFORM is enabled with '-l T', this additionally enables base64
|
||||||
#define CMPLOG_SOLVE_ARITHMETIC
|
encoding/decoding */
|
||||||
|
|
||||||
/* Enable transform following (XOR/ADD/SUB manipulations, hex en/decoding) */
|
|
||||||
#define CMPLOG_SOLVE_TRANSFORM
|
|
||||||
|
|
||||||
/* if TRANSFORM is enabled, this additionally enables base64 en/decoding */
|
|
||||||
// #define CMPLOG_SOLVE_TRANSFORM_BASE64
|
// #define CMPLOG_SOLVE_TRANSFORM_BASE64
|
||||||
|
|
||||||
/* If a redqueen pass finds more than one solve, try to combine them? */
|
/* If a redqueen pass finds more than one solution, try to combine them? */
|
||||||
#define CMPLOG_COMBINE
|
#define CMPLOG_COMBINE
|
||||||
|
|
||||||
/* Minimum % of the corpus to perform cmplog on. Default: 20% */
|
/* Minimum % of the corpus to perform cmplog on. Default: 10% */
|
||||||
#define CMPLOG_CORPUS_PERCENT 20U
|
#define CMPLOG_CORPUS_PERCENT 5U
|
||||||
|
|
||||||
/* Number of potential posititions from which we decide the cmplog becomes
|
/* Number of potential positions from which we decide if cmplog becomes
|
||||||
useless, default 16384 */
|
useless, default 8096 */
|
||||||
#define CMPLOG_POSITIONS_MAX 16384U
|
#define CMPLOG_POSITIONS_MAX 8096U
|
||||||
|
|
||||||
/* Maximum allowed fails per CMP value. Default: 32 * 3 */
|
/* Maximum allowed fails per CMP value. Default: 128 */
|
||||||
#define CMPLOG_FAIL_MAX 96
|
#define CMPLOG_FAIL_MAX 128
|
||||||
|
|
||||||
/* Now non-cmplog configuration options */
|
/* Now non-cmplog configuration options */
|
||||||
|
|
||||||
@ -285,6 +280,11 @@
|
|||||||
|
|
||||||
#define SYNC_INTERVAL 8
|
#define SYNC_INTERVAL 8
|
||||||
|
|
||||||
|
/* Sync time (minimum time between syncing in ms, time is halfed for -M main
|
||||||
|
nodes) - default is 30 minutes: */
|
||||||
|
|
||||||
|
#define SYNC_TIME (30 * 60 * 1000)
|
||||||
|
|
||||||
/* Output directory reuse grace period (minutes): */
|
/* Output directory reuse grace period (minutes): */
|
||||||
|
|
||||||
#define OUTPUT_GRACE 25
|
#define OUTPUT_GRACE 25
|
||||||
|
@ -61,6 +61,7 @@ static char *afl_environment_variables[] = {
|
|||||||
"AFL_FORKSRV_INIT_TMOUT",
|
"AFL_FORKSRV_INIT_TMOUT",
|
||||||
"AFL_HARDEN",
|
"AFL_HARDEN",
|
||||||
"AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES",
|
"AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES",
|
||||||
|
"AFL_IGNORE_UNKNOWN_ENVS",
|
||||||
"AFL_IMPORT_FIRST",
|
"AFL_IMPORT_FIRST",
|
||||||
"AFL_INST_LIBS",
|
"AFL_INST_LIBS",
|
||||||
"AFL_INST_RATIO",
|
"AFL_INST_RATIO",
|
||||||
@ -130,6 +131,7 @@ static char *afl_environment_variables[] = {
|
|||||||
"AFL_QEMU_DEBUG_MAPS",
|
"AFL_QEMU_DEBUG_MAPS",
|
||||||
"AFL_QEMU_DISABLE_CACHE",
|
"AFL_QEMU_DISABLE_CACHE",
|
||||||
"AFL_QEMU_DRIVER_NO_HOOK",
|
"AFL_QEMU_DRIVER_NO_HOOK",
|
||||||
|
"AFL_QEMU_FORCE_DFL",
|
||||||
"AFL_QEMU_PERSISTENT_ADDR",
|
"AFL_QEMU_PERSISTENT_ADDR",
|
||||||
"AFL_QEMU_PERSISTENT_CNT",
|
"AFL_QEMU_PERSISTENT_CNT",
|
||||||
"AFL_QEMU_PERSISTENT_GPR",
|
"AFL_QEMU_PERSISTENT_GPR",
|
||||||
|
@ -50,7 +50,7 @@ by you, the `id` parameter is for afl-fuzz and will be overwritten.
|
|||||||
Note that useful parameters for `val` are: 1, 2, 3, 4, 8, 16, 32, 64, 128.
|
Note that useful parameters for `val` are: 1, 2, 3, 4, 8, 16, 32, 64, 128.
|
||||||
A value of e.g. 33 will be seen as 32 for coverage purposes.
|
A value of e.g. 33 will be seen as 32 for coverage purposes.
|
||||||
|
|
||||||
## 3) Selective instrumenation with AFL_LLVM_ALLOWLIST/AFL_LLVM_DENYLIST
|
## 3) Selective instrumentation with AFL_LLVM_ALLOWLIST/AFL_LLVM_DENYLIST
|
||||||
|
|
||||||
This feature is equivalent to llvm 12 sancov feature and allows to specify
|
This feature is equivalent to llvm 12 sancov feature and allows to specify
|
||||||
on a filename and/or function name level to instrument these or skip them.
|
on a filename and/or function name level to instrument these or skip them.
|
||||||
|
@ -88,16 +88,35 @@ apt-get install -y clang-12 clang-tools-12 libc++1-12 libc++-12-dev \
|
|||||||
### Building llvm yourself (version 12)
|
### Building llvm yourself (version 12)
|
||||||
|
|
||||||
Building llvm from github takes quite some long time and is not painless:
|
Building llvm from github takes quite some long time and is not painless:
|
||||||
```
|
```sh
|
||||||
sudo apt install binutils-dev # this is *essential*!
|
sudo apt install binutils-dev # this is *essential*!
|
||||||
git clone https://github.com/llvm/llvm-project
|
git clone --depth=1 https://github.com/llvm/llvm-project
|
||||||
cd llvm-project
|
cd llvm-project
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake -DLLVM_ENABLE_PROJECTS='clang;clang-tools-extra;compiler-rt;libclc;libcxx;libcxxabi;libunwind;lld' -DCMAKE_BUILD_TYPE=Release -DLLVM_BINUTILS_INCDIR=/usr/include/ ../llvm/
|
|
||||||
make -j $(nproc)
|
# Add -G Ninja if ninja-build installed
|
||||||
export PATH=`pwd`/bin:$PATH
|
# "Building with ninja significantly improves your build time, especially with
|
||||||
export LLVM_CONFIG=`pwd`/bin/llvm-config
|
# incremental builds, and improves your memory usage."
|
||||||
|
cmake \
|
||||||
|
-DCLANG_INCLUDE_DOCS="OFF" \
|
||||||
|
-DCMAKE_BUILD_TYPE=Release \
|
||||||
|
-DLLVM_BINUTILS_INCDIR=/usr/include/ \
|
||||||
|
-DLLVM_BUILD_LLVM_DYLIB="ON" \
|
||||||
|
-DLLVM_ENABLE_BINDINGS="OFF" \
|
||||||
|
-DLLVM_ENABLE_PROJECTS='clang;compiler-rt;libcxx;libcxxabi;libunwind;lld' \
|
||||||
|
-DLLVM_ENABLE_WARNINGS="OFF" \
|
||||||
|
-DLLVM_INCLUDE_BENCHMARKS="OFF" \
|
||||||
|
-DLLVM_INCLUDE_DOCS="OFF" \
|
||||||
|
-DLLVM_INCLUDE_EXAMPLES="OFF" \
|
||||||
|
-DLLVM_INCLUDE_TESTS="OFF" \
|
||||||
|
-DLLVM_LINK_LLVM_DYLIB="ON" \
|
||||||
|
-DLLVM_TARGETS_TO_BUILD="host" \
|
||||||
|
../llvm/
|
||||||
|
cmake --build . --parallel
|
||||||
|
export PATH="$(pwd)/bin:$PATH"
|
||||||
|
export LLVM_CONFIG="$(pwd)/bin/llvm-config"
|
||||||
|
export LD_LIBRARY_PATH="$(llvm-config --libdir)${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
|
||||||
cd /path/to/AFLplusplus/
|
cd /path/to/AFLplusplus/
|
||||||
make
|
make
|
||||||
sudo make install
|
sudo make install
|
||||||
|
@ -1088,7 +1088,7 @@ void ModuleSanitizerCoverage::InjectTraceForSwitch(
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
llvm::sort(Initializers.begin() + 2, Initializers.end(),
|
llvm::sort(drop_begin(Initializers, 2),
|
||||||
[](const Constant *A, const Constant *B) {
|
[](const Constant *A, const Constant *B) {
|
||||||
|
|
||||||
return cast<ConstantInt>(A)->getLimitedValue() <
|
return cast<ConstantInt>(A)->getLimitedValue() <
|
||||||
@ -1136,10 +1136,10 @@ void ModuleSanitizerCoverage::InjectTraceForGep(
|
|||||||
for (auto GEP : GepTraceTargets) {
|
for (auto GEP : GepTraceTargets) {
|
||||||
|
|
||||||
IRBuilder<> IRB(GEP);
|
IRBuilder<> IRB(GEP);
|
||||||
for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I)
|
for (Use &Idx : GEP->indices())
|
||||||
if (!isa<ConstantInt>(*I) && (*I)->getType()->isIntegerTy())
|
if (!isa<ConstantInt>(Idx) && Idx->getType()->isIntegerTy())
|
||||||
IRB.CreateCall(SanCovTraceGepFunction,
|
IRB.CreateCall(SanCovTraceGepFunction,
|
||||||
{IRB.CreateIntCast(*I, IntptrTy, true)});
|
{IRB.CreateIntCast(Idx, IntptrTy, true)});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,8 +244,12 @@ static void __afl_map_shm(void) {
|
|||||||
|
|
||||||
if (__afl_final_loc) {
|
if (__afl_final_loc) {
|
||||||
|
|
||||||
if (__afl_final_loc % 32)
|
if (__afl_final_loc % 64) {
|
||||||
__afl_final_loc = (((__afl_final_loc + 31) >> 5) << 5);
|
|
||||||
|
__afl_final_loc = (((__afl_final_loc + 63) >> 6) << 6);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
__afl_map_size = __afl_final_loc;
|
__afl_map_size = __afl_final_loc;
|
||||||
|
|
||||||
if (__afl_final_loc > MAP_SIZE) {
|
if (__afl_final_loc > MAP_SIZE) {
|
||||||
@ -1090,7 +1094,7 @@ __attribute__((constructor(0))) void __afl_auto_first(void) {
|
|||||||
if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
|
if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
|
||||||
u8 *ptr;
|
u8 *ptr;
|
||||||
|
|
||||||
ptr = (u8 *)malloc(2097152);
|
ptr = (u8 *)malloc(MAP_INITIAL_SIZE);
|
||||||
|
|
||||||
if (ptr && (ssize_t)ptr != -1) {
|
if (ptr && (ssize_t)ptr != -1) {
|
||||||
|
|
||||||
@ -1171,7 +1175,7 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) {
|
|||||||
|
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"Running __sanitizer_cov_trace_pc_guard_init: %p-%p (%lu edges)\n",
|
"Running __sanitizer_cov_trace_pc_guard_init: %p-%p (%lu edges)\n",
|
||||||
start, stop, stop - start);
|
start, stop, (unsigned long)(stop - start));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1653,12 +1657,19 @@ static u8 *get_llvm_stdstring(u8 *string) {
|
|||||||
|
|
||||||
void __cmplog_rtn_gcc_stdstring_cstring(u8 *stdstring, u8 *cstring) {
|
void __cmplog_rtn_gcc_stdstring_cstring(u8 *stdstring, u8 *cstring) {
|
||||||
|
|
||||||
|
if (unlikely(!__afl_cmp_map)) return;
|
||||||
|
if (!area_is_mapped(stdstring, 32) || !area_is_mapped(cstring, 32)) return;
|
||||||
|
|
||||||
__cmplog_rtn_hook(get_gcc_stdstring(stdstring), cstring);
|
__cmplog_rtn_hook(get_gcc_stdstring(stdstring), cstring);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __cmplog_rtn_gcc_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
|
void __cmplog_rtn_gcc_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
|
||||||
|
|
||||||
|
if (unlikely(!__afl_cmp_map)) return;
|
||||||
|
if (!area_is_mapped(stdstring1, 32) || !area_is_mapped(stdstring2, 32))
|
||||||
|
return;
|
||||||
|
|
||||||
__cmplog_rtn_hook(get_gcc_stdstring(stdstring1),
|
__cmplog_rtn_hook(get_gcc_stdstring(stdstring1),
|
||||||
get_gcc_stdstring(stdstring2));
|
get_gcc_stdstring(stdstring2));
|
||||||
|
|
||||||
@ -1666,12 +1677,19 @@ void __cmplog_rtn_gcc_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
|
|||||||
|
|
||||||
void __cmplog_rtn_llvm_stdstring_cstring(u8 *stdstring, u8 *cstring) {
|
void __cmplog_rtn_llvm_stdstring_cstring(u8 *stdstring, u8 *cstring) {
|
||||||
|
|
||||||
|
if (unlikely(!__afl_cmp_map)) return;
|
||||||
|
if (!area_is_mapped(stdstring, 32) || !area_is_mapped(cstring, 32)) return;
|
||||||
|
|
||||||
__cmplog_rtn_hook(get_llvm_stdstring(stdstring), cstring);
|
__cmplog_rtn_hook(get_llvm_stdstring(stdstring), cstring);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __cmplog_rtn_llvm_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
|
void __cmplog_rtn_llvm_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
|
||||||
|
|
||||||
|
if (unlikely(!__afl_cmp_map)) return;
|
||||||
|
if (!area_is_mapped(stdstring1, 32) || !area_is_mapped(stdstring2, 32))
|
||||||
|
return;
|
||||||
|
|
||||||
__cmplog_rtn_hook(get_llvm_stdstring(stdstring1),
|
__cmplog_rtn_hook(get_llvm_stdstring(stdstring1),
|
||||||
get_llvm_stdstring(stdstring2));
|
get_llvm_stdstring(stdstring2));
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ void dict2file(int fd, u8 *mem, u32 len) {
|
|||||||
j = 1;
|
j = 1;
|
||||||
for (i = 0; i < len; i++) {
|
for (i = 0; i < len; i++) {
|
||||||
|
|
||||||
if (isprint(mem[i])) {
|
if (isprint(mem[i]) && mem[i] != '\\' && mem[i] != '"') {
|
||||||
|
|
||||||
line[j++] = mem[i];
|
line[j++] = mem[i];
|
||||||
|
|
||||||
|
@ -924,9 +924,7 @@ bool AFLLTOPass::runOnModule(Module &M) {
|
|||||||
|
|
||||||
if (getenv("AFL_LLVM_LTO_DONTWRITEID") == NULL) {
|
if (getenv("AFL_LLVM_LTO_DONTWRITEID") == NULL) {
|
||||||
|
|
||||||
uint32_t write_loc = afl_global_id;
|
uint32_t write_loc = (((afl_global_id + 63) >> 6) << 6);
|
||||||
|
|
||||||
if (afl_global_id % 32) write_loc = (((afl_global_id + 32) >> 4) << 4);
|
|
||||||
|
|
||||||
GlobalVariable *AFLFinalLoc = new GlobalVariable(
|
GlobalVariable *AFLFinalLoc = new GlobalVariable(
|
||||||
M, Int32Ty, true, GlobalValue::ExternalLinkage, 0, "__afl_final_loc");
|
M, Int32Ty, true, GlobalValue::ExternalLinkage, 0, "__afl_final_loc");
|
||||||
|
@ -19,12 +19,13 @@
|
|||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include "llvm/Config/llvm-config.h"
|
|
||||||
|
|
||||||
|
#include "llvm/Config/llvm-config.h"
|
||||||
#include "llvm/ADT/Statistic.h"
|
#include "llvm/ADT/Statistic.h"
|
||||||
#include "llvm/IR/IRBuilder.h"
|
#include "llvm/IR/IRBuilder.h"
|
||||||
#include "llvm/IR/LegacyPassManager.h"
|
#include "llvm/IR/LegacyPassManager.h"
|
||||||
@ -265,13 +266,20 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
|
|||||||
unsigned int max_size = Val->getType()->getIntegerBitWidth(), cast_size;
|
unsigned int max_size = Val->getType()->getIntegerBitWidth(), cast_size;
|
||||||
unsigned char do_cast = 0;
|
unsigned char do_cast = 0;
|
||||||
|
|
||||||
if (!SI->getNumCases() || max_size < 16 || max_size % 8) {
|
if (!SI->getNumCases() || max_size < 16) {
|
||||||
|
|
||||||
// if (!be_quiet) errs() << "skip trivial switch..\n";
|
// if (!be_quiet) errs() << "skip trivial switch..\n";
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (max_size % 8) {
|
||||||
|
|
||||||
|
max_size = (((max_size / 8) + 1) * 8);
|
||||||
|
do_cast = 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
IRBuilder<> IRB(SI->getParent());
|
IRBuilder<> IRB(SI->getParent());
|
||||||
IRB.SetInsertPoint(SI);
|
IRB.SetInsertPoint(SI);
|
||||||
|
|
||||||
@ -310,36 +318,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
|
|||||||
|
|
||||||
if (do_cast) {
|
if (do_cast) {
|
||||||
|
|
||||||
ConstantInt *cint = dyn_cast<ConstantInt>(Val);
|
CompareTo =
|
||||||
if (cint) {
|
IRB.CreateIntCast(CompareTo, IntegerType::get(C, cast_size), false);
|
||||||
|
|
||||||
uint64_t val = cint->getZExtValue();
|
|
||||||
// fprintf(stderr, "ConstantInt: %lu\n", val);
|
|
||||||
switch (cast_size) {
|
|
||||||
|
|
||||||
case 8:
|
|
||||||
CompareTo = ConstantInt::get(Int8Ty, val);
|
|
||||||
break;
|
|
||||||
case 16:
|
|
||||||
CompareTo = ConstantInt::get(Int16Ty, val);
|
|
||||||
break;
|
|
||||||
case 32:
|
|
||||||
CompareTo = ConstantInt::get(Int32Ty, val);
|
|
||||||
break;
|
|
||||||
case 64:
|
|
||||||
CompareTo = ConstantInt::get(Int64Ty, val);
|
|
||||||
break;
|
|
||||||
case 128:
|
|
||||||
CompareTo = ConstantInt::get(Int128Ty, val);
|
|
||||||
break;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
CompareTo = IRB.CreateBitCast(Val, IntegerType::get(C, cast_size));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -361,27 +341,8 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
|
|||||||
|
|
||||||
if (do_cast) {
|
if (do_cast) {
|
||||||
|
|
||||||
uint64_t val = cint->getZExtValue();
|
new_param =
|
||||||
// fprintf(stderr, "ConstantInt: %lu\n", val);
|
IRB.CreateIntCast(cint, IntegerType::get(C, cast_size), false);
|
||||||
switch (cast_size) {
|
|
||||||
|
|
||||||
case 8:
|
|
||||||
new_param = ConstantInt::get(Int8Ty, val);
|
|
||||||
break;
|
|
||||||
case 16:
|
|
||||||
new_param = ConstantInt::get(Int16Ty, val);
|
|
||||||
break;
|
|
||||||
case 32:
|
|
||||||
new_param = ConstantInt::get(Int32Ty, val);
|
|
||||||
break;
|
|
||||||
case 64:
|
|
||||||
new_param = ConstantInt::get(Int64Ty, val);
|
|
||||||
break;
|
|
||||||
case 128:
|
|
||||||
new_param = ConstantInt::get(Int128Ty, val);
|
|
||||||
break;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -540,7 +501,14 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!max_size || max_size % 8 || max_size < 16) { continue; }
|
if (!max_size || max_size < 16) { continue; }
|
||||||
|
|
||||||
|
if (max_size % 8) {
|
||||||
|
|
||||||
|
max_size = (((max_size / 8) + 1) * 8);
|
||||||
|
do_cast = 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if (max_size > 128) {
|
if (max_size > 128) {
|
||||||
|
|
||||||
@ -573,88 +541,27 @@ bool CmpLogInstructions::hookInstrs(Module &M) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (do_cast) {
|
// errs() << "[CMPLOG] cmp " << *cmpInst << "(in function " <<
|
||||||
|
// cmpInst->getFunction()->getName() << ")\n";
|
||||||
|
|
||||||
// F*cking LLVM optimized out any kind of bitcasts of ConstantInt values
|
// first bitcast to integer type of the same bitsize as the original
|
||||||
// creating illegal calls. WTF. So we have to work around this.
|
// type (this is a nop, if already integer)
|
||||||
|
Value *op0_i = IRB.CreateBitCast(
|
||||||
|
op0, IntegerType::get(C, op0->getType()->getPrimitiveSizeInBits()));
|
||||||
|
// then create a int cast, which does zext, trunc or bitcast. In our case
|
||||||
|
// usually zext to the next larger supported type (this is a nop if
|
||||||
|
// already the right type)
|
||||||
|
Value *V0 =
|
||||||
|
IRB.CreateIntCast(op0_i, IntegerType::get(C, cast_size), false);
|
||||||
|
args.push_back(V0);
|
||||||
|
Value *op1_i = IRB.CreateBitCast(
|
||||||
|
op1, IntegerType::get(C, op1->getType()->getPrimitiveSizeInBits()));
|
||||||
|
Value *V1 =
|
||||||
|
IRB.CreateIntCast(op1_i, IntegerType::get(C, cast_size), false);
|
||||||
|
args.push_back(V1);
|
||||||
|
|
||||||
ConstantInt *cint = dyn_cast<ConstantInt>(op0);
|
// errs() << "[CMPLOG] casted parameters:\n0: " << *V0 << "\n1: " << *V1
|
||||||
if (cint) {
|
// << "\n";
|
||||||
|
|
||||||
uint64_t val = cint->getZExtValue();
|
|
||||||
// fprintf(stderr, "ConstantInt: %lu\n", val);
|
|
||||||
ConstantInt *new_param = NULL;
|
|
||||||
switch (cast_size) {
|
|
||||||
|
|
||||||
case 8:
|
|
||||||
new_param = ConstantInt::get(Int8Ty, val);
|
|
||||||
break;
|
|
||||||
case 16:
|
|
||||||
new_param = ConstantInt::get(Int16Ty, val);
|
|
||||||
break;
|
|
||||||
case 32:
|
|
||||||
new_param = ConstantInt::get(Int32Ty, val);
|
|
||||||
break;
|
|
||||||
case 64:
|
|
||||||
new_param = ConstantInt::get(Int64Ty, val);
|
|
||||||
break;
|
|
||||||
case 128:
|
|
||||||
new_param = ConstantInt::get(Int128Ty, val);
|
|
||||||
break;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!new_param) { continue; }
|
|
||||||
args.push_back(new_param);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
Value *V0 = IRB.CreateBitCast(op0, IntegerType::get(C, cast_size));
|
|
||||||
args.push_back(V0);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
cint = dyn_cast<ConstantInt>(op1);
|
|
||||||
if (cint) {
|
|
||||||
|
|
||||||
uint64_t val = cint->getZExtValue();
|
|
||||||
ConstantInt *new_param = NULL;
|
|
||||||
switch (cast_size) {
|
|
||||||
|
|
||||||
case 8:
|
|
||||||
new_param = ConstantInt::get(Int8Ty, val);
|
|
||||||
break;
|
|
||||||
case 16:
|
|
||||||
new_param = ConstantInt::get(Int16Ty, val);
|
|
||||||
break;
|
|
||||||
case 32:
|
|
||||||
new_param = ConstantInt::get(Int32Ty, val);
|
|
||||||
break;
|
|
||||||
case 64:
|
|
||||||
new_param = ConstantInt::get(Int64Ty, val);
|
|
||||||
break;
|
|
||||||
case 128:
|
|
||||||
new_param = ConstantInt::get(Int128Ty, val);
|
|
||||||
break;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!new_param) { continue; }
|
|
||||||
args.push_back(new_param);
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
Value *V1 = IRB.CreateBitCast(op1, IntegerType::get(C, cast_size));
|
|
||||||
args.push_back(V1);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
args.push_back(op0);
|
|
||||||
args.push_back(op1);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
ConstantInt *attribute = ConstantInt::get(Int8Ty, attr);
|
ConstantInt *attribute = ConstantInt::get(Int8Ty, attr);
|
||||||
args.push_back(attribute);
|
args.push_back(attribute);
|
||||||
|
@ -1 +1 @@
|
|||||||
47722f64e4
|
e36a30ebca
|
||||||
|
@ -17,7 +17,7 @@ The idea and much of the initial implementation comes from Andrew Griffiths.
|
|||||||
The actual implementation on current QEMU (shipped as qemuafl) is from
|
The actual implementation on current QEMU (shipped as qemuafl) is from
|
||||||
Andrea Fioraldi. Special thanks to abiondo that re-enabled TCG chaining.
|
Andrea Fioraldi. Special thanks to abiondo that re-enabled TCG chaining.
|
||||||
|
|
||||||
## 2) How to use
|
## 2) How to use qemu_mode
|
||||||
|
|
||||||
The feature is implemented with a patched QEMU. The simplest way
|
The feature is implemented with a patched QEMU. The simplest way
|
||||||
to build it is to run ./build_qemu_support.sh. The script will download,
|
to build it is to run ./build_qemu_support.sh. The script will download,
|
||||||
@ -176,7 +176,12 @@ Comparative measurements of execution speed or instrumentation coverage will be
|
|||||||
fairly meaningless if the optimization levels or instrumentation scopes don't
|
fairly meaningless if the optimization levels or instrumentation scopes don't
|
||||||
match.
|
match.
|
||||||
|
|
||||||
## 12) Gotchas, feedback, bugs
|
## 12) Other features
|
||||||
|
|
||||||
|
With `AFL_QEMU_FORCE_DFL` you force QEMU to ignore the registered signal
|
||||||
|
handlers of the target.
|
||||||
|
|
||||||
|
## 13) Gotchas, feedback, bugs
|
||||||
|
|
||||||
If you need to fix up checksums or do other cleanup on mutated test cases, see
|
If you need to fix up checksums or do other cleanup on mutated test cases, see
|
||||||
utils/custom_mutators/ for a viable solution.
|
utils/custom_mutators/ for a viable solution.
|
||||||
@ -197,19 +202,12 @@ with -march=core2, can help.
|
|||||||
Beyond that, this is an early-stage mechanism, so fields reports are welcome.
|
Beyond that, this is an early-stage mechanism, so fields reports are welcome.
|
||||||
You can send them to <afl-users@googlegroups.com>.
|
You can send them to <afl-users@googlegroups.com>.
|
||||||
|
|
||||||
## 13) Alternatives: static rewriting
|
## 14) Alternatives: static rewriting
|
||||||
|
|
||||||
Statically rewriting binaries just once, instead of attempting to translate
|
Statically rewriting binaries just once, instead of attempting to translate
|
||||||
them at run time, can be a faster alternative. That said, static rewriting is
|
them at run time, can be a faster alternative. That said, static rewriting is
|
||||||
fraught with peril, because it depends on being able to properly and fully model
|
fraught with peril, because it depends on being able to properly and fully model
|
||||||
program control flow without actually executing each and every code path.
|
program control flow without actually executing each and every code path.
|
||||||
|
|
||||||
The best implementation is this one:
|
Checkout the "Fuzzing binary-only targets" section in our main README.md and
|
||||||
|
the docs/binaryonly_fuzzing.md document for more information and hints.
|
||||||
https://github.com/vanhauser-thc/afl-dyninst
|
|
||||||
|
|
||||||
The issue however is Dyninst which is not rewriting the binaries so that
|
|
||||||
they run stable. A lot of crashes happen, especially in C++ programs that
|
|
||||||
use throw/catch. Try it first, and if it works for you be happy as it is
|
|
||||||
2-3x as fast as qemu_mode, however usually not as fast as QEMU persistent mode.
|
|
||||||
|
|
||||||
|
@ -233,7 +233,6 @@ QEMU_CONF_FLAGS=" \
|
|||||||
--disable-xen \
|
--disable-xen \
|
||||||
--disable-xen-pci-passthrough \
|
--disable-xen-pci-passthrough \
|
||||||
--disable-xfsctl \
|
--disable-xfsctl \
|
||||||
--enable-pie \
|
|
||||||
--python=${PYTHONBIN} \
|
--python=${PYTHONBIN} \
|
||||||
--target-list="${CPU_TARGET}-linux-user" \
|
--target-list="${CPU_TARGET}-linux-user" \
|
||||||
--without-default-devices \
|
--without-default-devices \
|
||||||
@ -241,7 +240,7 @@ QEMU_CONF_FLAGS=" \
|
|||||||
|
|
||||||
if [ -n "${CROSS_PREFIX}" ]; then
|
if [ -n "${CROSS_PREFIX}" ]; then
|
||||||
|
|
||||||
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} --cross-prefix=${CROSS_PREFIX}"
|
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS --cross-prefix=$CROSS_PREFIX"
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -249,10 +248,15 @@ if [ "$STATIC" = "1" ]; then
|
|||||||
|
|
||||||
echo Building STATIC binary
|
echo Building STATIC binary
|
||||||
|
|
||||||
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} \
|
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS \
|
||||||
--static \
|
--static \
|
||||||
--extra-cflags=-DAFL_QEMU_STATIC_BUILD=1 \
|
--extra-cflags=-DAFL_QEMU_STATIC_BUILD=1 \
|
||||||
"
|
"
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} --enable-pie "
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$DEBUG" = "1" ]; then
|
if [ "$DEBUG" = "1" ]; then
|
||||||
@ -262,7 +266,7 @@ if [ "$DEBUG" = "1" ]; then
|
|||||||
# --enable-gcov might go here but incurs a mesonbuild error on meson
|
# --enable-gcov might go here but incurs a mesonbuild error on meson
|
||||||
# versions prior to 0.56:
|
# versions prior to 0.56:
|
||||||
# https://github.com/qemu/meson/commit/903d5dd8a7dc1d6f8bef79e66d6ebc07c
|
# https://github.com/qemu/meson/commit/903d5dd8a7dc1d6f8bef79e66d6ebc07c
|
||||||
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} \
|
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS \
|
||||||
--disable-strip \
|
--disable-strip \
|
||||||
--enable-debug \
|
--enable-debug \
|
||||||
--enable-debug-info \
|
--enable-debug-info \
|
||||||
@ -275,7 +279,7 @@ if [ "$DEBUG" = "1" ]; then
|
|||||||
|
|
||||||
else
|
else
|
||||||
|
|
||||||
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} \
|
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS \
|
||||||
--disable-debug-info \
|
--disable-debug-info \
|
||||||
--disable-debug-mutex \
|
--disable-debug-mutex \
|
||||||
--disable-debug-tcg \
|
--disable-debug-tcg \
|
||||||
@ -290,7 +294,7 @@ if [ "$PROFILING" = "1" ]; then
|
|||||||
|
|
||||||
echo Building PROFILED binary
|
echo Building PROFILED binary
|
||||||
|
|
||||||
QEMU_CONF_FLAGS="${QEMU_CONF_FLAGS} \
|
QEMU_CONF_FLAGS="$QEMU_CONF_FLAGS \
|
||||||
--enable-gprof \
|
--enable-gprof \
|
||||||
--enable-profiler \
|
--enable-profiler \
|
||||||
"
|
"
|
||||||
@ -298,7 +302,7 @@ if [ "$PROFILING" = "1" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
./configure ${QEMU_CONF_FLAGS} || exit 1
|
./configure $QEMU_CONF_FLAGS || exit 1
|
||||||
|
|
||||||
echo "[+] Configuration complete."
|
echo "[+] Configuration complete."
|
||||||
|
|
||||||
@ -370,10 +374,20 @@ if [ "$ORIG_CROSS" = "" ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! command -v "$CROSS" > /dev/null
|
if ! command -v "$CROSS" > /dev/null ; then
|
||||||
then
|
if [ "$CPU_TARGET" = "$(uname -m)" ] ; then
|
||||||
|
echo "[+] Building afl++ qemu support libraries with CC=$CC"
|
||||||
|
echo "[+] Building libcompcov ..."
|
||||||
|
make -C libcompcov && echo "[+] libcompcov ready"
|
||||||
|
echo "[+] Building unsigaction ..."
|
||||||
|
make -C unsigaction && echo "[+] unsigaction ready"
|
||||||
|
echo "[+] Building libqasan ..."
|
||||||
|
make -C libqasan && echo "[+] unsigaction ready"
|
||||||
|
else
|
||||||
echo "[!] Cross compiler $CROSS could not be found, cannot compile libcompcov libqasan and unsigaction"
|
echo "[!] Cross compiler $CROSS could not be found, cannot compile libcompcov libqasan and unsigaction"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
|
echo "[+] Building afl++ qemu support libraries with CC=$CROSS"
|
||||||
echo "[+] Building libcompcov ..."
|
echo "[+] Building libcompcov ..."
|
||||||
make -C libcompcov CC=$CROSS && echo "[+] libcompcov ready"
|
make -C libcompcov CC=$CROSS && echo "[+] libcompcov ready"
|
||||||
echo "[+] Building unsigaction ..."
|
echo "[+] Building unsigaction ..."
|
||||||
|
@ -29,6 +29,8 @@
|
|||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/shm.h>
|
#include <sys/shm.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <inttypes.h>
|
||||||
|
|
||||||
#include "types.h"
|
#include "types.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
@ -159,14 +161,15 @@ static void __compcov_load(void) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __compcov_trace(u64 cur_loc, const u8 *v0, const u8 *v1, size_t n) {
|
static void __compcov_trace(uintptr_t cur_loc, const u8 *v0, const u8 *v1,
|
||||||
|
size_t n) {
|
||||||
|
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
if (debug_fd != 1) {
|
if (debug_fd != 1) {
|
||||||
|
|
||||||
char debugbuf[4096];
|
char debugbuf[4096];
|
||||||
snprintf(debugbuf, sizeof(debugbuf), "0x%llx %s %s %zu\n", cur_loc,
|
snprintf(debugbuf, sizeof(debugbuf), "0x%" PRIxPTR " %s %s %zu\n", cur_loc,
|
||||||
v0 == NULL ? "(null)" : (char *)v0,
|
v0 == NULL ? "(null)" : (char *)v0,
|
||||||
v1 == NULL ? "(null)" : (char *)v1, n);
|
v1 == NULL ? "(null)" : (char *)v1, n);
|
||||||
write(debug_fd, debugbuf, strlen(debugbuf));
|
write(debug_fd, debugbuf, strlen(debugbuf));
|
||||||
@ -206,7 +209,7 @@ int strcmp(const char *str1, const char *str2) {
|
|||||||
|
|
||||||
if (n <= MAX_CMP_LENGTH) {
|
if (n <= MAX_CMP_LENGTH) {
|
||||||
|
|
||||||
u64 cur_loc = (u64)retaddr;
|
uintptr_t cur_loc = (uintptr_t)retaddr;
|
||||||
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
||||||
cur_loc &= MAP_SIZE - 1;
|
cur_loc &= MAP_SIZE - 1;
|
||||||
|
|
||||||
@ -235,7 +238,7 @@ int strncmp(const char *str1, const char *str2, size_t len) {
|
|||||||
|
|
||||||
if (n <= MAX_CMP_LENGTH) {
|
if (n <= MAX_CMP_LENGTH) {
|
||||||
|
|
||||||
u64 cur_loc = (u64)retaddr;
|
uintptr_t cur_loc = (uintptr_t)retaddr;
|
||||||
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
||||||
cur_loc &= MAP_SIZE - 1;
|
cur_loc &= MAP_SIZE - 1;
|
||||||
|
|
||||||
@ -265,7 +268,7 @@ int strcasecmp(const char *str1, const char *str2) {
|
|||||||
|
|
||||||
if (n <= MAX_CMP_LENGTH) {
|
if (n <= MAX_CMP_LENGTH) {
|
||||||
|
|
||||||
u64 cur_loc = (u64)retaddr;
|
uintptr_t cur_loc = (uintptr_t)retaddr;
|
||||||
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
||||||
cur_loc &= MAP_SIZE - 1;
|
cur_loc &= MAP_SIZE - 1;
|
||||||
|
|
||||||
@ -296,7 +299,7 @@ int strncasecmp(const char *str1, const char *str2, size_t len) {
|
|||||||
|
|
||||||
if (n <= MAX_CMP_LENGTH) {
|
if (n <= MAX_CMP_LENGTH) {
|
||||||
|
|
||||||
u64 cur_loc = (u64)retaddr;
|
uintptr_t cur_loc = (uintptr_t)retaddr;
|
||||||
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
||||||
cur_loc &= MAP_SIZE - 1;
|
cur_loc &= MAP_SIZE - 1;
|
||||||
|
|
||||||
@ -324,7 +327,7 @@ int memcmp(const void *mem1, const void *mem2, size_t len) {
|
|||||||
|
|
||||||
if (n <= MAX_CMP_LENGTH) {
|
if (n <= MAX_CMP_LENGTH) {
|
||||||
|
|
||||||
u64 cur_loc = (u64)retaddr;
|
uintptr_t cur_loc = (uintptr_t)retaddr;
|
||||||
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
cur_loc = (cur_loc >> 4) ^ (cur_loc << 8);
|
||||||
cur_loc &= MAP_SIZE - 1;
|
cur_loc &= MAP_SIZE - 1;
|
||||||
|
|
||||||
|
@ -4,16 +4,25 @@ This library is the injected runtime used by QEMU AddressSanitizer (QASan).
|
|||||||
|
|
||||||
The original repository is [here](https://github.com/andreafioraldi/qasan).
|
The original repository is [here](https://github.com/andreafioraldi/qasan).
|
||||||
|
|
||||||
The version embedded in qemuafl is an updated version of just the usermode part and this runtime in injected via LD_PRELOAD (so works just for dynamically linked binaries).
|
The version embedded in qemuafl is an updated version of just the usermode part
|
||||||
|
and this runtime is injected via LD_PRELOAD (so works just for dynamically
|
||||||
|
linked binaries).
|
||||||
|
|
||||||
The usage is super simple, just set the env var `AFL_USE_QASAN=1` when fuzzing in qemu mode (-Q). afl-fuzz will automatically set AFL_PRELOAD to load this library and enable the QASan instrumentation in afl-qemu-trace.
|
The usage is super simple, just set the env var `AFL_USE_QASAN=1` when fuzzing
|
||||||
|
in qemu mode (-Q). afl-fuzz will automatically set AFL_PRELOAD to load this
|
||||||
|
library and enable the QASan instrumentation in afl-qemu-trace.
|
||||||
|
|
||||||
For debugging purposes, we still suggest to run the original QASan as the stacktrace support for ARM (just a debug feature, it does not affect the bug finding capabilities during fuzzing) is WIP.
|
For debugging purposes, we still suggest to run the original QASan as the
|
||||||
|
stacktrace support for ARM (just a debug feature, it does not affect the bug
|
||||||
|
finding capabilities during fuzzing) is WIP.
|
||||||
|
|
||||||
### When I should use QASan?
|
### When should I use QASan?
|
||||||
|
|
||||||
If your target binary is PIC x86_64, you should also give a try to [retrowrite](https://github.com/HexHive/retrowrite) for static rewriting.
|
If your target binary is PIC x86_64, you should also give a try to
|
||||||
|
[retrowrite](https://github.com/HexHive/retrowrite) for static rewriting.
|
||||||
|
|
||||||
If it fails, or if your binary is for another architecture, or you want to use persistent and snapshot mdoe, AFL++ QASan mode is what you want/have to use.
|
If it fails, or if your binary is for another architecture, or you want to use
|
||||||
|
persistent and snapshot mode, AFL++ QASan mode is what you want/have to use.
|
||||||
|
|
||||||
Note that the overhead of libdislocator when combined with QEMU mode is much lower but it can catch less bugs. This is a short blanket, take your choice.
|
Note that the overhead of libdislocator when combined with QEMU mode is much
|
||||||
|
lower but it can catch less bugs. This is a short blanket, take your choice.
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -26,6 +26,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||||||
#include "libqasan.h"
|
#include "libqasan.h"
|
||||||
#include "map_macro.h"
|
#include "map_macro.h"
|
||||||
|
|
||||||
|
ssize_t (*__lq_libc_write)(int, const void *, size_t);
|
||||||
|
ssize_t (*__lq_libc_read)(int, void *, size_t);
|
||||||
char *(*__lq_libc_fgets)(char *, int, FILE *);
|
char *(*__lq_libc_fgets)(char *, int, FILE *);
|
||||||
int (*__lq_libc_atoi)(const char *);
|
int (*__lq_libc_atoi)(const char *);
|
||||||
long (*__lq_libc_atol)(const char *);
|
long (*__lq_libc_atol)(const char *);
|
||||||
@ -35,6 +37,8 @@ void __libqasan_init_hooks(void) {
|
|||||||
|
|
||||||
__libqasan_init_malloc();
|
__libqasan_init_malloc();
|
||||||
|
|
||||||
|
__lq_libc_write = ASSERT_DLSYM(write);
|
||||||
|
__lq_libc_read = ASSERT_DLSYM(read);
|
||||||
__lq_libc_fgets = ASSERT_DLSYM(fgets);
|
__lq_libc_fgets = ASSERT_DLSYM(fgets);
|
||||||
__lq_libc_atoi = ASSERT_DLSYM(atoi);
|
__lq_libc_atoi = ASSERT_DLSYM(atoi);
|
||||||
__lq_libc_atol = ASSERT_DLSYM(atol);
|
__lq_libc_atol = ASSERT_DLSYM(atol);
|
||||||
@ -42,6 +46,30 @@ void __libqasan_init_hooks(void) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ssize_t write(int fd, const void *buf, size_t count) {
|
||||||
|
|
||||||
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
|
QASAN_DEBUG("%14p: write(%d, %p, %zu)\n", rtv, fd, buf, count);
|
||||||
|
ssize_t r = __lq_libc_write(fd, buf, count);
|
||||||
|
QASAN_DEBUG("\t\t = %zd\n", r);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
ssize_t read(int fd, void *buf, size_t count) {
|
||||||
|
|
||||||
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
|
QASAN_DEBUG("%14p: read(%d, %p, %zu)\n", rtv, fd, buf, count);
|
||||||
|
ssize_t r = __lq_libc_read(fd, buf, count);
|
||||||
|
QASAN_DEBUG("\t\t = %zd\n", r);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef __ANDROID__
|
#ifdef __ANDROID__
|
||||||
size_t malloc_usable_size(const void *ptr) {
|
size_t malloc_usable_size(const void *ptr) {
|
||||||
|
|
||||||
@ -54,7 +82,7 @@ size_t malloc_usable_size(void *ptr) {
|
|||||||
|
|
||||||
QASAN_DEBUG("%14p: malloc_usable_size(%p)\n", rtv, ptr);
|
QASAN_DEBUG("%14p: malloc_usable_size(%p)\n", rtv, ptr);
|
||||||
size_t r = __libqasan_malloc_usable_size((void *)ptr);
|
size_t r = __libqasan_malloc_usable_size((void *)ptr);
|
||||||
QASAN_DEBUG("\t\t = %ld\n", r);
|
QASAN_DEBUG("\t\t = %zu\n", r);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -64,7 +92,7 @@ void *malloc(size_t size) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: malloc(%ld)\n", rtv, size);
|
QASAN_DEBUG("%14p: malloc(%zu)\n", rtv, size);
|
||||||
void *r = __libqasan_malloc(size);
|
void *r = __libqasan_malloc(size);
|
||||||
QASAN_DEBUG("\t\t = %p\n", r);
|
QASAN_DEBUG("\t\t = %p\n", r);
|
||||||
|
|
||||||
@ -76,7 +104,7 @@ void *calloc(size_t nmemb, size_t size) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: calloc(%ld, %ld)\n", rtv, nmemb, size);
|
QASAN_DEBUG("%14p: calloc(%zu, %zu)\n", rtv, nmemb, size);
|
||||||
void *r = __libqasan_calloc(nmemb, size);
|
void *r = __libqasan_calloc(nmemb, size);
|
||||||
QASAN_DEBUG("\t\t = %p\n", r);
|
QASAN_DEBUG("\t\t = %p\n", r);
|
||||||
|
|
||||||
@ -88,7 +116,7 @@ void *realloc(void *ptr, size_t size) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: realloc(%p, %ld)\n", rtv, ptr, size);
|
QASAN_DEBUG("%14p: realloc(%p, %zu)\n", rtv, ptr, size);
|
||||||
void *r = __libqasan_realloc(ptr, size);
|
void *r = __libqasan_realloc(ptr, size);
|
||||||
QASAN_DEBUG("\t\t = %p\n", r);
|
QASAN_DEBUG("\t\t = %p\n", r);
|
||||||
|
|
||||||
@ -100,7 +128,7 @@ int posix_memalign(void **memptr, size_t alignment, size_t size) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: posix_memalign(%p, %ld, %ld)\n", rtv, memptr, alignment,
|
QASAN_DEBUG("%14p: posix_memalign(%p, %zu, %zu)\n", rtv, memptr, alignment,
|
||||||
size);
|
size);
|
||||||
int r = __libqasan_posix_memalign(memptr, alignment, size);
|
int r = __libqasan_posix_memalign(memptr, alignment, size);
|
||||||
QASAN_DEBUG("\t\t = %d [*memptr = %p]\n", r, *memptr);
|
QASAN_DEBUG("\t\t = %d [*memptr = %p]\n", r, *memptr);
|
||||||
@ -113,7 +141,7 @@ void *memalign(size_t alignment, size_t size) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: memalign(%ld, %ld)\n", rtv, alignment, size);
|
QASAN_DEBUG("%14p: memalign(%zu, %zu)\n", rtv, alignment, size);
|
||||||
void *r = __libqasan_memalign(alignment, size);
|
void *r = __libqasan_memalign(alignment, size);
|
||||||
QASAN_DEBUG("\t\t = %p\n", r);
|
QASAN_DEBUG("\t\t = %p\n", r);
|
||||||
|
|
||||||
@ -125,7 +153,7 @@ void *aligned_alloc(size_t alignment, size_t size) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: aligned_alloc(%ld, %ld)\n", rtv, alignment, size);
|
QASAN_DEBUG("%14p: aligned_alloc(%zu, %zu)\n", rtv, alignment, size);
|
||||||
void *r = __libqasan_aligned_alloc(alignment, size);
|
void *r = __libqasan_aligned_alloc(alignment, size);
|
||||||
QASAN_DEBUG("\t\t = %p\n", r);
|
QASAN_DEBUG("\t\t = %p\n", r);
|
||||||
|
|
||||||
@ -137,7 +165,7 @@ void *valloc(size_t size) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: valloc(%ld)\n", rtv, size);
|
QASAN_DEBUG("%14p: valloc(%zu)\n", rtv, size);
|
||||||
void *r = __libqasan_memalign(sysconf(_SC_PAGESIZE), size);
|
void *r = __libqasan_memalign(sysconf(_SC_PAGESIZE), size);
|
||||||
QASAN_DEBUG("\t\t = %p\n", r);
|
QASAN_DEBUG("\t\t = %p\n", r);
|
||||||
|
|
||||||
@ -149,7 +177,7 @@ void *pvalloc(size_t size) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: pvalloc(%ld)\n", rtv, size);
|
QASAN_DEBUG("%14p: pvalloc(%zu)\n", rtv, size);
|
||||||
size_t page_size = sysconf(_SC_PAGESIZE);
|
size_t page_size = sysconf(_SC_PAGESIZE);
|
||||||
size = (size & (page_size - 1)) + page_size;
|
size = (size & (page_size - 1)) + page_size;
|
||||||
void *r = __libqasan_memalign(page_size, size);
|
void *r = __libqasan_memalign(page_size, size);
|
||||||
@ -174,7 +202,9 @@ char *fgets(char *s, int size, FILE *stream) {
|
|||||||
|
|
||||||
QASAN_DEBUG("%14p: fgets(%p, %d, %p)\n", rtv, s, size, stream);
|
QASAN_DEBUG("%14p: fgets(%p, %d, %p)\n", rtv, s, size, stream);
|
||||||
QASAN_STORE(s, size);
|
QASAN_STORE(s, size);
|
||||||
|
#ifndef __ANDROID__
|
||||||
QASAN_LOAD(stream, sizeof(FILE));
|
QASAN_LOAD(stream, sizeof(FILE));
|
||||||
|
#endif
|
||||||
char *r = __lq_libc_fgets(s, size, stream);
|
char *r = __lq_libc_fgets(s, size, stream);
|
||||||
QASAN_DEBUG("\t\t = %p\n", r);
|
QASAN_DEBUG("\t\t = %p\n", r);
|
||||||
|
|
||||||
@ -186,7 +216,7 @@ int memcmp(const void *s1, const void *s2, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: memcmp(%p, %p, %ld)\n", rtv, s1, s2, n);
|
QASAN_DEBUG("%14p: memcmp(%p, %p, %zu)\n", rtv, s1, s2, n);
|
||||||
QASAN_LOAD(s1, n);
|
QASAN_LOAD(s1, n);
|
||||||
QASAN_LOAD(s2, n);
|
QASAN_LOAD(s2, n);
|
||||||
int r = __libqasan_memcmp(s1, s2, n);
|
int r = __libqasan_memcmp(s1, s2, n);
|
||||||
@ -200,7 +230,7 @@ void *memcpy(void *dest, const void *src, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: memcpy(%p, %p, %ld)\n", rtv, dest, src, n);
|
QASAN_DEBUG("%14p: memcpy(%p, %p, %zu)\n", rtv, dest, src, n);
|
||||||
QASAN_LOAD(src, n);
|
QASAN_LOAD(src, n);
|
||||||
QASAN_STORE(dest, n);
|
QASAN_STORE(dest, n);
|
||||||
void *r = __libqasan_memcpy(dest, src, n);
|
void *r = __libqasan_memcpy(dest, src, n);
|
||||||
@ -214,7 +244,7 @@ void *mempcpy(void *dest, const void *src, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: mempcpy(%p, %p, %ld)\n", rtv, dest, src, n);
|
QASAN_DEBUG("%14p: mempcpy(%p, %p, %zu)\n", rtv, dest, src, n);
|
||||||
QASAN_LOAD(src, n);
|
QASAN_LOAD(src, n);
|
||||||
QASAN_STORE(dest, n);
|
QASAN_STORE(dest, n);
|
||||||
void *r = (uint8_t *)__libqasan_memcpy(dest, src, n) + n;
|
void *r = (uint8_t *)__libqasan_memcpy(dest, src, n) + n;
|
||||||
@ -228,7 +258,7 @@ void *memmove(void *dest, const void *src, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: memmove(%p, %p, %ld)\n", rtv, dest, src, n);
|
QASAN_DEBUG("%14p: memmove(%p, %p, %zu)\n", rtv, dest, src, n);
|
||||||
QASAN_LOAD(src, n);
|
QASAN_LOAD(src, n);
|
||||||
QASAN_STORE(dest, n);
|
QASAN_STORE(dest, n);
|
||||||
void *r = __libqasan_memmove(dest, src, n);
|
void *r = __libqasan_memmove(dest, src, n);
|
||||||
@ -242,7 +272,7 @@ void *memset(void *s, int c, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: memset(%p, %d, %ld)\n", rtv, s, c, n);
|
QASAN_DEBUG("%14p: memset(%p, %d, %zu)\n", rtv, s, c, n);
|
||||||
QASAN_STORE(s, n);
|
QASAN_STORE(s, n);
|
||||||
void *r = __libqasan_memset(s, c, n);
|
void *r = __libqasan_memset(s, c, n);
|
||||||
QASAN_DEBUG("\t\t = %p\n", r);
|
QASAN_DEBUG("\t\t = %p\n", r);
|
||||||
@ -255,7 +285,7 @@ void *memchr(const void *s, int c, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: memchr(%p, %d, %ld)\n", rtv, s, c, n);
|
QASAN_DEBUG("%14p: memchr(%p, %d, %zu)\n", rtv, s, c, n);
|
||||||
void *r = __libqasan_memchr(s, c, n);
|
void *r = __libqasan_memchr(s, c, n);
|
||||||
if (r == NULL)
|
if (r == NULL)
|
||||||
QASAN_LOAD(s, n);
|
QASAN_LOAD(s, n);
|
||||||
@ -271,7 +301,7 @@ void *memrchr(const void *s, int c, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: memrchr(%p, %d, %ld)\n", rtv, s, c, n);
|
QASAN_DEBUG("%14p: memrchr(%p, %d, %zu)\n", rtv, s, c, n);
|
||||||
QASAN_LOAD(s, n);
|
QASAN_LOAD(s, n);
|
||||||
void *r = __libqasan_memrchr(s, c, n);
|
void *r = __libqasan_memrchr(s, c, n);
|
||||||
QASAN_DEBUG("\t\t = %p\n", r);
|
QASAN_DEBUG("\t\t = %p\n", r);
|
||||||
@ -285,7 +315,7 @@ void *memmem(const void *haystack, size_t haystacklen, const void *needle,
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: memmem(%p, %ld, %p, %ld)\n", rtv, haystack, haystacklen,
|
QASAN_DEBUG("%14p: memmem(%p, %zu, %p, %zu)\n", rtv, haystack, haystacklen,
|
||||||
needle, needlelen);
|
needle, needlelen);
|
||||||
QASAN_LOAD(haystack, haystacklen);
|
QASAN_LOAD(haystack, haystacklen);
|
||||||
QASAN_LOAD(needle, needlelen);
|
QASAN_LOAD(needle, needlelen);
|
||||||
@ -301,7 +331,7 @@ void bzero(void *s, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: bzero(%p, %ld)\n", rtv, s, n);
|
QASAN_DEBUG("%14p: bzero(%p, %zu)\n", rtv, s, n);
|
||||||
QASAN_STORE(s, n);
|
QASAN_STORE(s, n);
|
||||||
__libqasan_memset(s, 0, n);
|
__libqasan_memset(s, 0, n);
|
||||||
|
|
||||||
@ -313,7 +343,7 @@ void explicit_bzero(void *s, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: bzero(%p, %ld)\n", rtv, s, n);
|
QASAN_DEBUG("%14p: bzero(%p, %zu)\n", rtv, s, n);
|
||||||
QASAN_STORE(s, n);
|
QASAN_STORE(s, n);
|
||||||
__libqasan_memset(s, 0, n);
|
__libqasan_memset(s, 0, n);
|
||||||
|
|
||||||
@ -323,7 +353,7 @@ int bcmp(const void *s1, const void *s2, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: bcmp(%p, %p, %ld)\n", rtv, s1, s2, n);
|
QASAN_DEBUG("%14p: bcmp(%p, %p, %zu)\n", rtv, s1, s2, n);
|
||||||
QASAN_LOAD(s1, n);
|
QASAN_LOAD(s1, n);
|
||||||
QASAN_LOAD(s2, n);
|
QASAN_LOAD(s2, n);
|
||||||
int r = __libqasan_bcmp(s1, s2, n);
|
int r = __libqasan_bcmp(s1, s2, n);
|
||||||
@ -381,7 +411,7 @@ int strncasecmp(const char *s1, const char *s2, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: strncasecmp(%p, %p, %ld)\n", rtv, s1, s2, n);
|
QASAN_DEBUG("%14p: strncasecmp(%p, %p, %zu)\n", rtv, s1, s2, n);
|
||||||
size_t l1 = __libqasan_strnlen(s1, n);
|
size_t l1 = __libqasan_strnlen(s1, n);
|
||||||
QASAN_LOAD(s1, l1);
|
QASAN_LOAD(s1, l1);
|
||||||
size_t l2 = __libqasan_strnlen(s2, n);
|
size_t l2 = __libqasan_strnlen(s2, n);
|
||||||
@ -431,7 +461,7 @@ int strncmp(const char *s1, const char *s2, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: strncmp(%p, %p, %ld)\n", rtv, s1, s2, n);
|
QASAN_DEBUG("%14p: strncmp(%p, %p, %zu)\n", rtv, s1, s2, n);
|
||||||
size_t l1 = __libqasan_strnlen(s1, n);
|
size_t l1 = __libqasan_strnlen(s1, n);
|
||||||
QASAN_LOAD(s1, l1);
|
QASAN_LOAD(s1, l1);
|
||||||
size_t l2 = __libqasan_strnlen(s2, n);
|
size_t l2 = __libqasan_strnlen(s2, n);
|
||||||
@ -462,7 +492,7 @@ char *strncpy(char *dest, const char *src, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: strncpy(%p, %p, %ld)\n", rtv, dest, src, n);
|
QASAN_DEBUG("%14p: strncpy(%p, %p, %zu)\n", rtv, dest, src, n);
|
||||||
size_t l = __libqasan_strnlen(src, n);
|
size_t l = __libqasan_strnlen(src, n);
|
||||||
QASAN_STORE(dest, n);
|
QASAN_STORE(dest, n);
|
||||||
void *r;
|
void *r;
|
||||||
@ -521,7 +551,7 @@ size_t strlen(const char *s) {
|
|||||||
QASAN_DEBUG("%14p: strlen(%p)\n", rtv, s);
|
QASAN_DEBUG("%14p: strlen(%p)\n", rtv, s);
|
||||||
size_t r = __libqasan_strlen(s);
|
size_t r = __libqasan_strlen(s);
|
||||||
QASAN_LOAD(s, r + 1);
|
QASAN_LOAD(s, r + 1);
|
||||||
QASAN_DEBUG("\t\t = %ld\n", r);
|
QASAN_DEBUG("\t\t = %zu\n", r);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -531,10 +561,10 @@ size_t strnlen(const char *s, size_t n) {
|
|||||||
|
|
||||||
void *rtv = __builtin_return_address(0);
|
void *rtv = __builtin_return_address(0);
|
||||||
|
|
||||||
QASAN_DEBUG("%14p: strnlen(%p, %ld)\n", rtv, s, n);
|
QASAN_DEBUG("%14p: strnlen(%p, %zu)\n", rtv, s, n);
|
||||||
size_t r = __libqasan_strnlen(s, n);
|
size_t r = __libqasan_strnlen(s, n);
|
||||||
QASAN_LOAD(s, r);
|
QASAN_LOAD(s, r);
|
||||||
QASAN_DEBUG("\t\t = %ld\n", r);
|
QASAN_DEBUG("\t\t = %zu\n", r);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -621,7 +651,7 @@ size_t wcslen(const wchar_t *s) {
|
|||||||
QASAN_DEBUG("%14p: wcslen(%p)\n", rtv, s);
|
QASAN_DEBUG("%14p: wcslen(%p)\n", rtv, s);
|
||||||
size_t r = __libqasan_wcslen(s);
|
size_t r = __libqasan_wcslen(s);
|
||||||
QASAN_LOAD(s, sizeof(wchar_t) * (r + 1));
|
QASAN_LOAD(s, sizeof(wchar_t) * (r + 1));
|
||||||
QASAN_DEBUG("\t\t = %ld\n", r);
|
QASAN_DEBUG("\t\t = %zu\n", r);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ void __libqasan_print_maps(void) {
|
|||||||
|
|
||||||
QASAN_LOG("QEMU-AddressSanitizer (v%s)\n", QASAN_VERSTR);
|
QASAN_LOG("QEMU-AddressSanitizer (v%s)\n", QASAN_VERSTR);
|
||||||
QASAN_LOG(
|
QASAN_LOG(
|
||||||
"Copyright (C) 2019-2020 Andrea Fioraldi <andreafioraldi@gmail.com>\n");
|
"Copyright (C) 2019-2021 Andrea Fioraldi <andreafioraldi@gmail.com>\n");
|
||||||
QASAN_LOG("\n");
|
QASAN_LOG("\n");
|
||||||
|
|
||||||
if (__qasan_log) __libqasan_print_maps();
|
if (__qasan_log) __libqasan_print_maps();
|
||||||
|
@ -51,9 +51,9 @@ typedef struct {
|
|||||||
struct chunk_begin {
|
struct chunk_begin {
|
||||||
|
|
||||||
size_t requested_size;
|
size_t requested_size;
|
||||||
void* aligned_orig; // NULL if not aligned
|
void * aligned_orig; // NULL if not aligned
|
||||||
struct chunk_begin* next;
|
struct chunk_begin *next;
|
||||||
struct chunk_begin* prev;
|
struct chunk_begin *prev;
|
||||||
char redzone[REDZONE_SIZE];
|
char redzone[REDZONE_SIZE];
|
||||||
|
|
||||||
};
|
};
|
||||||
@ -68,45 +68,45 @@ struct chunk_struct {
|
|||||||
|
|
||||||
#ifdef __GLIBC__
|
#ifdef __GLIBC__
|
||||||
|
|
||||||
void* (*__lq_libc_malloc)(size_t);
|
void *(*__lq_libc_malloc)(size_t);
|
||||||
void (*__lq_libc_free)(void*);
|
void (*__lq_libc_free)(void *);
|
||||||
#define backend_malloc __lq_libc_malloc
|
#define backend_malloc __lq_libc_malloc
|
||||||
#define backend_free __lq_libc_free
|
#define backend_free __lq_libc_free
|
||||||
|
|
||||||
#define TMP_ZONE_SIZE 4096
|
#define TMP_ZONE_SIZE 4096
|
||||||
static int __tmp_alloc_zone_idx;
|
static int __tmp_alloc_zone_idx;
|
||||||
static unsigned char __tmp_alloc_zone[TMP_ZONE_SIZE];
|
static unsigned char __tmp_alloc_zone[TMP_ZONE_SIZE];
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
// From dlmalloc.c
|
// From dlmalloc.c
|
||||||
void* dlmalloc(size_t);
|
void * dlmalloc(size_t);
|
||||||
void dlfree(void*);
|
void dlfree(void *);
|
||||||
#define backend_malloc dlmalloc
|
#define backend_malloc dlmalloc
|
||||||
#define backend_free dlfree
|
#define backend_free dlfree
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int __libqasan_malloc_initialized;
|
int __libqasan_malloc_initialized;
|
||||||
|
|
||||||
static struct chunk_begin* quarantine_top;
|
static struct chunk_begin *quarantine_top;
|
||||||
static struct chunk_begin* quarantine_end;
|
static struct chunk_begin *quarantine_end;
|
||||||
static size_t quarantine_bytes;
|
static size_t quarantine_bytes;
|
||||||
|
|
||||||
#ifdef __BIONIC__
|
#ifdef __BIONIC__
|
||||||
static pthread_mutex_t quarantine_lock;
|
static pthread_mutex_t quarantine_lock;
|
||||||
#define LOCK_TRY pthread_mutex_trylock
|
#define LOCK_TRY pthread_mutex_trylock
|
||||||
#define LOCK_INIT pthread_mutex_init
|
#define LOCK_INIT pthread_mutex_init
|
||||||
#define LOCK_UNLOCK pthread_mutex_unlock
|
#define LOCK_UNLOCK pthread_mutex_unlock
|
||||||
#else
|
#else
|
||||||
static pthread_spinlock_t quarantine_lock;
|
static pthread_spinlock_t quarantine_lock;
|
||||||
#define LOCK_TRY pthread_spin_trylock
|
#define LOCK_TRY pthread_spin_trylock
|
||||||
#define LOCK_INIT pthread_spin_init
|
#define LOCK_INIT pthread_spin_init
|
||||||
#define LOCK_UNLOCK pthread_spin_unlock
|
#define LOCK_UNLOCK pthread_spin_unlock
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// need qasan disabled
|
// need qasan disabled
|
||||||
static int quanratine_push(struct chunk_begin* ck) {
|
static int quanratine_push(struct chunk_begin *ck) {
|
||||||
|
|
||||||
if (ck->requested_size >= QUARANTINE_MAX_BYTES) return 0;
|
if (ck->requested_size >= QUARANTINE_MAX_BYTES) return 0;
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ static int quanratine_push(struct chunk_begin* ck) {
|
|||||||
|
|
||||||
while (ck->requested_size + quarantine_bytes >= QUARANTINE_MAX_BYTES) {
|
while (ck->requested_size + quarantine_bytes >= QUARANTINE_MAX_BYTES) {
|
||||||
|
|
||||||
struct chunk_begin* tmp = quarantine_end;
|
struct chunk_begin *tmp = quarantine_end;
|
||||||
quarantine_end = tmp->prev;
|
quarantine_end = tmp->prev;
|
||||||
|
|
||||||
quarantine_bytes -= tmp->requested_size;
|
quarantine_bytes -= tmp->requested_size;
|
||||||
@ -154,23 +154,23 @@ void __libqasan_init_malloc(void) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t __libqasan_malloc_usable_size(void* ptr) {
|
size_t __libqasan_malloc_usable_size(void *ptr) {
|
||||||
|
|
||||||
char* p = ptr;
|
char *p = ptr;
|
||||||
p -= sizeof(struct chunk_begin);
|
p -= sizeof(struct chunk_begin);
|
||||||
|
|
||||||
return ((struct chunk_begin*)p)->requested_size;
|
return ((struct chunk_begin *)p)->requested_size;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void* __libqasan_malloc(size_t size) {
|
void *__libqasan_malloc(size_t size) {
|
||||||
|
|
||||||
if (!__libqasan_malloc_initialized) {
|
if (!__libqasan_malloc_initialized) {
|
||||||
|
|
||||||
__libqasan_init_malloc();
|
__libqasan_init_malloc();
|
||||||
|
|
||||||
#ifdef __GLIBC__
|
#ifdef __GLIBC__
|
||||||
void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx];
|
void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx];
|
||||||
|
|
||||||
if (size & (ALLOC_ALIGN_SIZE - 1))
|
if (size & (ALLOC_ALIGN_SIZE - 1))
|
||||||
__tmp_alloc_zone_idx +=
|
__tmp_alloc_zone_idx +=
|
||||||
@ -185,7 +185,7 @@ void* __libqasan_malloc(size_t size) {
|
|||||||
|
|
||||||
int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread
|
int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread
|
||||||
|
|
||||||
struct chunk_begin* p = backend_malloc(sizeof(struct chunk_struct) + size);
|
struct chunk_begin *p = backend_malloc(sizeof(struct chunk_struct) + size);
|
||||||
|
|
||||||
QASAN_SWAP(state);
|
QASAN_SWAP(state);
|
||||||
|
|
||||||
@ -197,14 +197,14 @@ void* __libqasan_malloc(size_t size) {
|
|||||||
p->aligned_orig = NULL;
|
p->aligned_orig = NULL;
|
||||||
p->next = p->prev = NULL;
|
p->next = p->prev = NULL;
|
||||||
|
|
||||||
QASAN_ALLOC(&p[1], (char*)&p[1] + size);
|
QASAN_ALLOC(&p[1], (char *)&p[1] + size);
|
||||||
QASAN_POISON(p->redzone, REDZONE_SIZE, ASAN_HEAP_LEFT_RZ);
|
QASAN_POISON(p->redzone, REDZONE_SIZE, ASAN_HEAP_LEFT_RZ);
|
||||||
if (size & (ALLOC_ALIGN_SIZE - 1))
|
if (size & (ALLOC_ALIGN_SIZE - 1))
|
||||||
QASAN_POISON((char*)&p[1] + size,
|
QASAN_POISON((char *)&p[1] + size,
|
||||||
(size & ~(ALLOC_ALIGN_SIZE - 1)) + 8 - size + REDZONE_SIZE,
|
(size & ~(ALLOC_ALIGN_SIZE - 1)) + 8 - size + REDZONE_SIZE,
|
||||||
ASAN_HEAP_RIGHT_RZ);
|
ASAN_HEAP_RIGHT_RZ);
|
||||||
else
|
else
|
||||||
QASAN_POISON((char*)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ);
|
QASAN_POISON((char *)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ);
|
||||||
|
|
||||||
__builtin_memset(&p[1], 0xff, size);
|
__builtin_memset(&p[1], 0xff, size);
|
||||||
|
|
||||||
@ -212,17 +212,17 @@ void* __libqasan_malloc(size_t size) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __libqasan_free(void* ptr) {
|
void __libqasan_free(void *ptr) {
|
||||||
|
|
||||||
if (!ptr) return;
|
if (!ptr) return;
|
||||||
|
|
||||||
#ifdef __GLIBC__
|
#ifdef __GLIBC__
|
||||||
if (ptr >= (void*)__tmp_alloc_zone &&
|
if (ptr >= (void *)__tmp_alloc_zone &&
|
||||||
ptr < ((void*)__tmp_alloc_zone + TMP_ZONE_SIZE))
|
ptr < ((void *)__tmp_alloc_zone + TMP_ZONE_SIZE))
|
||||||
return;
|
return;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct chunk_begin* p = ptr;
|
struct chunk_begin *p = ptr;
|
||||||
p -= 1;
|
p -= 1;
|
||||||
|
|
||||||
size_t n = p->requested_size;
|
size_t n = p->requested_size;
|
||||||
@ -249,21 +249,22 @@ void __libqasan_free(void* ptr) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void* __libqasan_calloc(size_t nmemb, size_t size) {
|
void *__libqasan_calloc(size_t nmemb, size_t size) {
|
||||||
|
|
||||||
size *= nmemb;
|
size *= nmemb;
|
||||||
|
|
||||||
#ifdef __GLIBC__
|
#ifdef __GLIBC__
|
||||||
if (!__libqasan_malloc_initialized) {
|
if (!__libqasan_malloc_initialized) {
|
||||||
|
|
||||||
void* r = &__tmp_alloc_zone[__tmp_alloc_zone_idx];
|
void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx];
|
||||||
__tmp_alloc_zone_idx += size;
|
__tmp_alloc_zone_idx += size;
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
char* p = __libqasan_malloc(size);
|
char *p = __libqasan_malloc(size);
|
||||||
if (!p) return NULL;
|
if (!p) return NULL;
|
||||||
|
|
||||||
__builtin_memset(p, 0, size);
|
__builtin_memset(p, 0, size);
|
||||||
@ -272,14 +273,14 @@ void* __libqasan_calloc(size_t nmemb, size_t size) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void* __libqasan_realloc(void* ptr, size_t size) {
|
void *__libqasan_realloc(void *ptr, size_t size) {
|
||||||
|
|
||||||
char* p = __libqasan_malloc(size);
|
char *p = __libqasan_malloc(size);
|
||||||
if (!p) return NULL;
|
if (!p) return NULL;
|
||||||
|
|
||||||
if (!ptr) return p;
|
if (!ptr) return p;
|
||||||
|
|
||||||
size_t n = ((struct chunk_begin*)ptr)[-1].requested_size;
|
size_t n = ((struct chunk_begin *)ptr)[-1].requested_size;
|
||||||
if (size < n) n = size;
|
if (size < n) n = size;
|
||||||
|
|
||||||
__builtin_memcpy(p, ptr, n);
|
__builtin_memcpy(p, ptr, n);
|
||||||
@ -289,9 +290,9 @@ void* __libqasan_realloc(void* ptr, size_t size) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) {
|
int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) {
|
||||||
|
|
||||||
if ((align % 2) || (align % sizeof(void*))) return EINVAL;
|
if ((align % 2) || (align % sizeof(void *))) return EINVAL;
|
||||||
if (len == 0) {
|
if (len == 0) {
|
||||||
|
|
||||||
*ptr = NULL;
|
*ptr = NULL;
|
||||||
@ -305,7 +306,7 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) {
|
|||||||
|
|
||||||
int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread
|
int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread
|
||||||
|
|
||||||
char* orig = backend_malloc(sizeof(struct chunk_struct) + size);
|
char *orig = backend_malloc(sizeof(struct chunk_struct) + size);
|
||||||
|
|
||||||
QASAN_SWAP(state);
|
QASAN_SWAP(state);
|
||||||
|
|
||||||
@ -313,10 +314,10 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) {
|
|||||||
|
|
||||||
QASAN_UNPOISON(orig, sizeof(struct chunk_struct) + size);
|
QASAN_UNPOISON(orig, sizeof(struct chunk_struct) + size);
|
||||||
|
|
||||||
char* data = orig + sizeof(struct chunk_begin);
|
char *data = orig + sizeof(struct chunk_begin);
|
||||||
data += align - ((uintptr_t)data % align);
|
data += align - ((uintptr_t)data % align);
|
||||||
|
|
||||||
struct chunk_begin* p = (struct chunk_begin*)data - 1;
|
struct chunk_begin *p = (struct chunk_begin *)data - 1;
|
||||||
|
|
||||||
p->requested_size = len;
|
p->requested_size = len;
|
||||||
p->aligned_orig = orig;
|
p->aligned_orig = orig;
|
||||||
@ -339,9 +340,9 @@ int __libqasan_posix_memalign(void** ptr, size_t align, size_t len) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void* __libqasan_memalign(size_t align, size_t len) {
|
void *__libqasan_memalign(size_t align, size_t len) {
|
||||||
|
|
||||||
void* ret = NULL;
|
void *ret = NULL;
|
||||||
|
|
||||||
__libqasan_posix_memalign(&ret, align, len);
|
__libqasan_posix_memalign(&ret, align, len);
|
||||||
|
|
||||||
@ -349,9 +350,9 @@ void* __libqasan_memalign(size_t align, size_t len) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void* __libqasan_aligned_alloc(size_t align, size_t len) {
|
void *__libqasan_aligned_alloc(size_t align, size_t len) {
|
||||||
|
|
||||||
void* ret = NULL;
|
void *ret = NULL;
|
||||||
|
|
||||||
if ((len % align)) return NULL;
|
if ((len % align)) return NULL;
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
|
|
||||||
This code is DEPRECATED!
|
This code is DEPRECATED!
|
||||||
I'm keeping it here cause maybe the unistrumentation of a function is needed
|
I'm keeping it here cause maybe the uninstrumentation of a function is needed
|
||||||
for some strange reason.
|
for some strange reason.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
Submodule qemu_mode/qemuafl updated: 47722f64e4...e36a30ebca
23
src/afl-cc.c
23
src/afl-cc.c
@ -554,6 +554,11 @@ static void edit_params(u32 argc, char **argv, char **envp) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if LLVM_MAJOR >= 13
|
||||||
|
// fuck you llvm 13
|
||||||
|
cc_params[cc_par_cnt++] = "-fno-experimental-new-pass-manager";
|
||||||
|
#endif
|
||||||
|
|
||||||
if (lto_mode && !have_c) {
|
if (lto_mode && !have_c) {
|
||||||
|
|
||||||
u8 *ld_path = strdup(AFL_REAL_LD);
|
u8 *ld_path = strdup(AFL_REAL_LD);
|
||||||
@ -1416,6 +1421,14 @@ int main(int argc, char **argv, char **envp) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (instrument_opt_mode && instrument_mode == INSTRUMENT_DEFAULT &&
|
||||||
|
(compiler_mode == LLVM || compiler_mode == UNSET)) {
|
||||||
|
|
||||||
|
instrument_mode = INSTRUMENT_CLASSIC;
|
||||||
|
compiler_mode = LLVM;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if (!compiler_mode) {
|
if (!compiler_mode) {
|
||||||
|
|
||||||
// lto is not a default because outside of afl-cc RANLIB and AR have to
|
// lto is not a default because outside of afl-cc RANLIB and AR have to
|
||||||
@ -1582,6 +1595,7 @@ int main(int argc, char **argv, char **envp) {
|
|||||||
"libtokencap.so)\n"
|
"libtokencap.so)\n"
|
||||||
" AFL_PATH: path to instrumenting pass and runtime "
|
" AFL_PATH: path to instrumenting pass and runtime "
|
||||||
"(afl-compiler-rt.*o)\n"
|
"(afl-compiler-rt.*o)\n"
|
||||||
|
" AFL_IGNORE_UNKNOWN_ENVS: don't warn on unknown env vars\n"
|
||||||
" AFL_INST_RATIO: percentage of branches to instrument\n"
|
" AFL_INST_RATIO: percentage of branches to instrument\n"
|
||||||
" AFL_QUIET: suppress verbose output\n"
|
" AFL_QUIET: suppress verbose output\n"
|
||||||
" AFL_HARDEN: adds code hardening to catch memory bugs\n"
|
" AFL_HARDEN: adds code hardening to catch memory bugs\n"
|
||||||
@ -1693,7 +1707,10 @@ int main(int argc, char **argv, char **envp) {
|
|||||||
"Do not be overwhelmed :) afl-cc uses good defaults if no options are "
|
"Do not be overwhelmed :) afl-cc uses good defaults if no options are "
|
||||||
"selected.\n"
|
"selected.\n"
|
||||||
"Read the documentation for FEATURES though, all are good but few are "
|
"Read the documentation for FEATURES though, all are good but few are "
|
||||||
"defaults.\n\n");
|
"defaults.\n"
|
||||||
|
"Recommended is afl-clang-lto with AFL_LLVM_CMPLOG or afl-clang-fast "
|
||||||
|
"with\n"
|
||||||
|
"AFL_LLVM_CMPLOG and AFL_LLVM_DICT2FILE.\n\n");
|
||||||
|
|
||||||
exit(1);
|
exit(1);
|
||||||
|
|
||||||
@ -1785,8 +1802,8 @@ int main(int argc, char **argv, char **envp) {
|
|||||||
if (instrument_opt_mode && instrument_mode != INSTRUMENT_CLASSIC &&
|
if (instrument_opt_mode && instrument_mode != INSTRUMENT_CLASSIC &&
|
||||||
instrument_mode != INSTRUMENT_CFG)
|
instrument_mode != INSTRUMENT_CFG)
|
||||||
FATAL(
|
FATAL(
|
||||||
"CTX and NGRAM instrumentation options can only be used with CFG "
|
"CTX and NGRAM instrumentation options can only be used with LLVM and "
|
||||||
"(recommended) and CLASSIC instrumentation modes!");
|
"CFG or CLASSIC instrumentation modes!");
|
||||||
|
|
||||||
if (getenv("AFL_LLVM_SKIP_NEVERZERO") && getenv("AFL_LLVM_NOT_ZERO"))
|
if (getenv("AFL_LLVM_SKIP_NEVERZERO") && getenv("AFL_LLVM_NOT_ZERO"))
|
||||||
FATAL(
|
FATAL(
|
||||||
|
151
src/afl-common.c
151
src/afl-common.c
@ -47,6 +47,10 @@ u8 be_quiet = 0;
|
|||||||
u8 *doc_path = "";
|
u8 *doc_path = "";
|
||||||
u8 last_intr = 0;
|
u8 last_intr = 0;
|
||||||
|
|
||||||
|
#ifndef AFL_PATH
|
||||||
|
#define AFL_PATH "/usr/local/lib/afl/"
|
||||||
|
#endif
|
||||||
|
|
||||||
void detect_file_args(char **argv, u8 *prog_in, bool *use_stdin) {
|
void detect_file_args(char **argv, u8 *prog_in, bool *use_stdin) {
|
||||||
|
|
||||||
u32 i = 0;
|
u32 i = 0;
|
||||||
@ -372,11 +376,11 @@ u8 *get_libqasan_path(u8 *own_loc) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!access(BIN_PATH "/libqasan.so", X_OK)) {
|
if (!access(AFL_PATH "/libqasan.so", X_OK)) {
|
||||||
|
|
||||||
if (cp) { ck_free(cp); }
|
if (cp) { ck_free(cp); }
|
||||||
|
|
||||||
return ck_strdup(BIN_PATH "/libqasan.so");
|
return ck_strdup(AFL_PATH "/libqasan.so");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -518,12 +522,147 @@ int parse_afl_kill_signal_env(u8 *afl_kill_signal_env, int default_signal) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned int helper_min3(unsigned int a, unsigned int b,
|
||||||
|
unsigned int c) {
|
||||||
|
|
||||||
|
return a < b ? (a < c ? a : c) : (b < c ? b : c);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// from
|
||||||
|
// https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#C
|
||||||
|
static int string_distance_levenshtein(char *s1, char *s2) {
|
||||||
|
|
||||||
|
unsigned int s1len, s2len, x, y, lastdiag, olddiag;
|
||||||
|
s1len = strlen(s1);
|
||||||
|
s2len = strlen(s2);
|
||||||
|
unsigned int column[s1len + 1];
|
||||||
|
column[s1len] = 1;
|
||||||
|
|
||||||
|
for (y = 1; y <= s1len; y++)
|
||||||
|
column[y] = y;
|
||||||
|
for (x = 1; x <= s2len; x++) {
|
||||||
|
|
||||||
|
column[0] = x;
|
||||||
|
for (y = 1, lastdiag = x - 1; y <= s1len; y++) {
|
||||||
|
|
||||||
|
olddiag = column[y];
|
||||||
|
column[y] = helper_min3(column[y] + 1, column[y - 1] + 1,
|
||||||
|
lastdiag + (s1[y - 1] == s2[x - 1] ? 0 : 1));
|
||||||
|
lastdiag = olddiag;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return column[s1len];
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#define ENV_SIMILARITY_TRESHOLD 3
|
||||||
|
|
||||||
|
void print_suggested_envs(char *mispelled_env) {
|
||||||
|
|
||||||
|
size_t env_name_len =
|
||||||
|
strcspn(mispelled_env, "=") - 4; // remove the AFL_prefix
|
||||||
|
char *env_name = ck_alloc(env_name_len + 1);
|
||||||
|
memcpy(env_name, mispelled_env + 4, env_name_len);
|
||||||
|
|
||||||
|
char *seen = ck_alloc(sizeof(afl_environment_variables) / sizeof(char *));
|
||||||
|
int found = 0;
|
||||||
|
|
||||||
|
int j;
|
||||||
|
for (j = 0; afl_environment_variables[j] != NULL; ++j) {
|
||||||
|
|
||||||
|
char *afl_env = afl_environment_variables[j] + 4;
|
||||||
|
int distance = string_distance_levenshtein(afl_env, env_name);
|
||||||
|
if (distance < ENV_SIMILARITY_TRESHOLD && seen[j] == 0) {
|
||||||
|
|
||||||
|
SAYF("Did you mean %s?\n", afl_environment_variables[j]);
|
||||||
|
seen[j] = 1;
|
||||||
|
found = 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if (found) goto cleanup;
|
||||||
|
|
||||||
|
for (j = 0; afl_environment_variables[j] != NULL; ++j) {
|
||||||
|
|
||||||
|
char * afl_env = afl_environment_variables[j] + 4;
|
||||||
|
size_t afl_env_len = strlen(afl_env);
|
||||||
|
char * reduced = ck_alloc(afl_env_len + 1);
|
||||||
|
|
||||||
|
size_t start = 0;
|
||||||
|
while (start < afl_env_len) {
|
||||||
|
|
||||||
|
size_t end = start + strcspn(afl_env + start, "_") + 1;
|
||||||
|
memcpy(reduced, afl_env, start);
|
||||||
|
if (end < afl_env_len)
|
||||||
|
memcpy(reduced + start, afl_env + end, afl_env_len - end);
|
||||||
|
reduced[afl_env_len - end + start] = 0;
|
||||||
|
|
||||||
|
int distance = string_distance_levenshtein(reduced, env_name);
|
||||||
|
if (distance < ENV_SIMILARITY_TRESHOLD && seen[j] == 0) {
|
||||||
|
|
||||||
|
SAYF("Did you mean %s?\n", afl_environment_variables[j]);
|
||||||
|
seen[j] = 1;
|
||||||
|
found = 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
start = end;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
ck_free(reduced);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if (found) goto cleanup;
|
||||||
|
|
||||||
|
char * reduced = ck_alloc(env_name_len + 1);
|
||||||
|
size_t start = 0;
|
||||||
|
while (start < env_name_len) {
|
||||||
|
|
||||||
|
size_t end = start + strcspn(env_name + start, "_") + 1;
|
||||||
|
memcpy(reduced, env_name, start);
|
||||||
|
if (end < env_name_len)
|
||||||
|
memcpy(reduced + start, env_name + end, env_name_len - end);
|
||||||
|
reduced[env_name_len - end + start] = 0;
|
||||||
|
|
||||||
|
for (j = 0; afl_environment_variables[j] != NULL; ++j) {
|
||||||
|
|
||||||
|
int distance = string_distance_levenshtein(
|
||||||
|
afl_environment_variables[j] + 4, reduced);
|
||||||
|
if (distance < ENV_SIMILARITY_TRESHOLD && seen[j] == 0) {
|
||||||
|
|
||||||
|
SAYF("Did you mean %s?\n", afl_environment_variables[j]);
|
||||||
|
seen[j] = 1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
start = end;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
ck_free(reduced);
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
ck_free(env_name);
|
||||||
|
ck_free(seen);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
void check_environment_vars(char **envp) {
|
void check_environment_vars(char **envp) {
|
||||||
|
|
||||||
if (be_quiet) { return; }
|
if (be_quiet) { return; }
|
||||||
|
|
||||||
int index = 0, issue_detected = 0;
|
int index = 0, issue_detected = 0;
|
||||||
char *env, *val;
|
char *env, *val, *ignore = getenv("AFL_IGNORE_UNKNOWN_ENVS");
|
||||||
while ((env = envp[index++]) != NULL) {
|
while ((env = envp[index++]) != NULL) {
|
||||||
|
|
||||||
if (strncmp(env, "ALF_", 4) == 0 || strncmp(env, "_ALF", 4) == 0 ||
|
if (strncmp(env, "ALF_", 4) == 0 || strncmp(env, "_ALF", 4) == 0 ||
|
||||||
@ -582,11 +721,13 @@ void check_environment_vars(char **envp) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (match == 0) {
|
if (match == 0 && !ignore) {
|
||||||
|
|
||||||
WARNF("Mistyped AFL environment variable: %s", env);
|
WARNF("Mistyped AFL environment variable: %s", env);
|
||||||
issue_detected = 1;
|
issue_detected = 1;
|
||||||
|
|
||||||
|
print_suggested_envs(env);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -994,7 +1135,7 @@ u32 get_map_size(void) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (map_size % 32) { map_size = (((map_size >> 5) + 1) << 5); }
|
if (map_size % 64) { map_size = (((map_size >> 6) + 1) << 6); }
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -656,11 +656,11 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
|
|||||||
|
|
||||||
if (!fsrv->map_size) { fsrv->map_size = MAP_SIZE; }
|
if (!fsrv->map_size) { fsrv->map_size = MAP_SIZE; }
|
||||||
|
|
||||||
if (unlikely(tmp_map_size % 32)) {
|
if (unlikely(tmp_map_size % 64)) {
|
||||||
|
|
||||||
// should not happen
|
// should not happen
|
||||||
WARNF("Target reported non-aligned map size of %u", tmp_map_size);
|
WARNF("Target reported non-aligned map size of %u", tmp_map_size);
|
||||||
tmp_map_size = (((tmp_map_size + 31) >> 5) << 5);
|
tmp_map_size = (((tmp_map_size + 63) >> 6) << 6);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -882,32 +882,23 @@ void perform_dry_run(afl_state_t *afl) {
|
|||||||
|
|
||||||
if (afl->timeout_given) {
|
if (afl->timeout_given) {
|
||||||
|
|
||||||
/* The -t nn+ syntax in the command line sets afl->timeout_given to
|
/* if we have a timeout but a timeout value was given then always
|
||||||
'2' and instructs afl-fuzz to tolerate but skip queue entries that
|
skip. The '+' meaning has been changed! */
|
||||||
time out. */
|
WARNF("Test case results in a timeout (skipping)");
|
||||||
|
++cal_failures;
|
||||||
|
q->cal_failed = CAL_CHANCES;
|
||||||
|
q->disabled = 1;
|
||||||
|
q->perf_score = 0;
|
||||||
|
|
||||||
if (afl->timeout_given > 1) {
|
if (!q->was_fuzzed) {
|
||||||
|
|
||||||
WARNF("Test case results in a timeout (skipping)");
|
q->was_fuzzed = 1;
|
||||||
q->cal_failed = CAL_CHANCES;
|
--afl->pending_not_fuzzed;
|
||||||
++cal_failures;
|
--afl->active_paths;
|
||||||
break;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SAYF("\n" cLRD "[-] " cRST
|
break;
|
||||||
"The program took more than %u ms to process one of the initial "
|
|
||||||
"test cases.\n"
|
|
||||||
" Usually, the right thing to do is to relax the -t option - "
|
|
||||||
"or to delete it\n"
|
|
||||||
" altogether and allow the fuzzer to auto-calibrate. That "
|
|
||||||
"said, if you know\n"
|
|
||||||
" what you are doing and want to simply skip the unruly test "
|
|
||||||
"cases, append\n"
|
|
||||||
" '+' at the end of the value passed to -t ('-t %u+').\n",
|
|
||||||
afl->fsrv.exec_tmout, afl->fsrv.exec_tmout);
|
|
||||||
|
|
||||||
FATAL("Test case '%s' results in a timeout", fn);
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
@ -1060,13 +1051,22 @@ void perform_dry_run(afl_state_t *afl) {
|
|||||||
p->perf_score = 0;
|
p->perf_score = 0;
|
||||||
|
|
||||||
u32 i = 0;
|
u32 i = 0;
|
||||||
while (unlikely(afl->queue_buf[i]->disabled)) {
|
while (unlikely(i < afl->queued_paths && afl->queue_buf[i] &&
|
||||||
|
afl->queue_buf[i]->disabled)) {
|
||||||
|
|
||||||
++i;
|
++i;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
afl->queue = afl->queue_buf[i];
|
if (i < afl->queued_paths && afl->queue_buf[i]) {
|
||||||
|
|
||||||
|
afl->queue = afl->queue_buf[i];
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
afl->queue = afl->queue_buf[0];
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
afl->max_depth = 0;
|
afl->max_depth = 0;
|
||||||
for (i = 0; i < afl->queued_paths; i++) {
|
for (i = 0; i < afl->queued_paths; i++) {
|
||||||
@ -2017,7 +2017,7 @@ void setup_dirs_fds(afl_state_t *afl) {
|
|||||||
fprintf(afl->fsrv.plot_file,
|
fprintf(afl->fsrv.plot_file,
|
||||||
"# unix_time, cycles_done, cur_path, paths_total, "
|
"# unix_time, cycles_done, cur_path, paths_total, "
|
||||||
"pending_total, pending_favs, map_size, unique_crashes, "
|
"pending_total, pending_favs, map_size, unique_crashes, "
|
||||||
"unique_hangs, max_depth, execs_per_sec\n");
|
"unique_hangs, max_depth, execs_per_sec, total_execs, edges_found\n");
|
||||||
fflush(afl->fsrv.plot_file);
|
fflush(afl->fsrv.plot_file);
|
||||||
|
|
||||||
/* ignore errors */
|
/* ignore errors */
|
||||||
|
@ -30,7 +30,6 @@
|
|||||||
|
|
||||||
//#define _DEBUG
|
//#define _DEBUG
|
||||||
//#define CMPLOG_INTROSPECTION
|
//#define CMPLOG_INTROSPECTION
|
||||||
#define CMPLOG_COMBINE
|
|
||||||
|
|
||||||
// CMP attribute enum
|
// CMP attribute enum
|
||||||
enum {
|
enum {
|
||||||
@ -205,14 +204,31 @@ static void type_replace(afl_state_t *afl, u8 *buf, u32 len) {
|
|||||||
case '\t':
|
case '\t':
|
||||||
c = ' ';
|
c = ' ';
|
||||||
break;
|
break;
|
||||||
/*
|
case '\r':
|
||||||
case '\r':
|
c = '\n';
|
||||||
case '\n':
|
break;
|
||||||
// nothing ...
|
case '\n':
|
||||||
break;
|
c = '\r';
|
||||||
*/
|
break;
|
||||||
|
case 0:
|
||||||
|
c = 1;
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
c = 0;
|
||||||
|
break;
|
||||||
|
case 0xff:
|
||||||
|
c = 0;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
c = (buf[i] ^ 0xff);
|
if (buf[i] < 32) {
|
||||||
|
|
||||||
|
c = (buf[i] ^ 0x1f);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
c = (buf[i] ^ 0x7f); // we keep the highest bit
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,6 +398,7 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
|
|||||||
rng = ranges;
|
rng = ranges;
|
||||||
ranges = rng->next;
|
ranges = rng->next;
|
||||||
ck_free(rng);
|
ck_free(rng);
|
||||||
|
rng = NULL;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -420,8 +437,9 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
|
|||||||
|
|
||||||
if (taint) {
|
if (taint) {
|
||||||
|
|
||||||
if (len / positions == 1 && positions > CMPLOG_POSITIONS_MAX &&
|
if (afl->colorize_success &&
|
||||||
afl->active_paths / afl->colorize_success > CMPLOG_CORPUS_PERCENT) {
|
(len / positions == 1 && positions > CMPLOG_POSITIONS_MAX &&
|
||||||
|
afl->active_paths / afl->colorize_success > CMPLOG_CORPUS_PERCENT)) {
|
||||||
|
|
||||||
#ifdef _DEBUG
|
#ifdef _DEBUG
|
||||||
fprintf(stderr, "Colorization unsatisfactory\n");
|
fprintf(stderr, "Colorization unsatisfactory\n");
|
||||||
@ -455,6 +473,15 @@ static u8 colorization(afl_state_t *afl, u8 *buf, u32 len,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
checksum_fail:
|
checksum_fail:
|
||||||
|
while (ranges) {
|
||||||
|
|
||||||
|
rng = ranges;
|
||||||
|
ranges = rng->next;
|
||||||
|
ck_free(rng);
|
||||||
|
rng = NULL;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
ck_free(backup);
|
ck_free(backup);
|
||||||
ck_free(changed);
|
ck_free(changed);
|
||||||
|
|
||||||
@ -495,7 +522,7 @@ static u8 its_fuzz(afl_state_t *afl, u8 *buf, u32 len, u8 *status) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM
|
//#ifdef CMPLOG_SOLVE_TRANSFORM
|
||||||
static int strntoll(const char *str, size_t sz, char **end, int base,
|
static int strntoll(const char *str, size_t sz, char **end, int base,
|
||||||
long long *out) {
|
long long *out) {
|
||||||
|
|
||||||
@ -503,6 +530,8 @@ static int strntoll(const char *str, size_t sz, char **end, int base,
|
|||||||
long long ret;
|
long long ret;
|
||||||
const char *beg = str;
|
const char *beg = str;
|
||||||
|
|
||||||
|
if (!str || !sz) { return 1; }
|
||||||
|
|
||||||
for (; beg && sz && *beg == ' '; beg++, sz--) {};
|
for (; beg && sz && *beg == ' '; beg++, sz--) {};
|
||||||
|
|
||||||
if (!sz) return 1;
|
if (!sz) return 1;
|
||||||
@ -526,6 +555,8 @@ static int strntoull(const char *str, size_t sz, char **end, int base,
|
|||||||
unsigned long long ret;
|
unsigned long long ret;
|
||||||
const char * beg = str;
|
const char * beg = str;
|
||||||
|
|
||||||
|
if (!str || !sz) { return 1; }
|
||||||
|
|
||||||
for (; beg && sz && *beg == ' '; beg++, sz--)
|
for (; beg && sz && *beg == ' '; beg++, sz--)
|
||||||
;
|
;
|
||||||
|
|
||||||
@ -576,7 +607,7 @@ static int is_hex(const char *str) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
||||||
// tests 4 bytes at location
|
// tests 4 bytes at location
|
||||||
static int is_base64(const char *str) {
|
static int is_base64(const char *str) {
|
||||||
|
|
||||||
@ -689,10 +720,10 @@ static void to_base64(u8 *src, u8 *dst, u32 dst_len) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
//#endif
|
||||||
|
|
||||||
static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
|
static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
|
||||||
u64 pattern, u64 repl, u64 o_pattern,
|
u64 pattern, u64 repl, u64 o_pattern,
|
||||||
u64 changed_val, u8 attr, u32 idx, u32 taint_len,
|
u64 changed_val, u8 attr, u32 idx, u32 taint_len,
|
||||||
@ -716,9 +747,9 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
|
|||||||
// o_pattern, pattern, repl, changed_val, idx, taint_len,
|
// o_pattern, pattern, repl, changed_val, idx, taint_len,
|
||||||
// h->shape + 1, attr);
|
// h->shape + 1, attr);
|
||||||
|
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM
|
//#ifdef CMPLOG_SOLVE_TRANSFORM
|
||||||
// reverse atoi()/strnu?toll() is expensive, so we only to it in lvl 3
|
// reverse atoi()/strnu?toll() is expensive, so we only to it in lvl 3
|
||||||
if (lvl & LVL3) {
|
if (afl->cmplog_enable_transform && (lvl & LVL3)) {
|
||||||
|
|
||||||
u8 * endptr;
|
u8 * endptr;
|
||||||
u8 use_num = 0, use_unum = 0;
|
u8 use_num = 0, use_unum = 0;
|
||||||
@ -739,11 +770,11 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef _DEBUG
|
#ifdef _DEBUG
|
||||||
if (idx == 0)
|
if (idx == 0)
|
||||||
fprintf(stderr, "ASCII is=%u use_num=%u use_unum=%u idx=%u %llx==%llx\n",
|
fprintf(stderr, "ASCII is=%u use_num=%u use_unum=%u idx=%u %llx==%llx\n",
|
||||||
afl->queue_cur->is_ascii, use_num, use_unum, idx, num, pattern);
|
afl->queue_cur->is_ascii, use_num, use_unum, idx, num, pattern);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// num is likely not pattern as atoi("AAA") will be zero...
|
// num is likely not pattern as atoi("AAA") will be zero...
|
||||||
if (use_num && ((u64)num == pattern || !num)) {
|
if (use_num && ((u64)num == pattern || !num)) {
|
||||||
@ -793,37 +824,82 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
|
|||||||
// Try to identify transform magic
|
// Try to identify transform magic
|
||||||
if (pattern != o_pattern && repl == changed_val && attr <= IS_EQUAL) {
|
if (pattern != o_pattern && repl == changed_val && attr <= IS_EQUAL) {
|
||||||
|
|
||||||
u64 *ptr = (u64 *)&buf[idx];
|
u64 b_val, o_b_val, mask;
|
||||||
u64 *o_ptr = (u64 *)&orig_buf[idx];
|
u8 bytes;
|
||||||
u64 b_val, o_b_val, mask;
|
|
||||||
|
|
||||||
switch (SHAPE_BYTES(h->shape)) {
|
switch (SHAPE_BYTES(h->shape)) {
|
||||||
|
|
||||||
case 0:
|
case 0:
|
||||||
case 1:
|
case 1:
|
||||||
b_val = (u64)(*ptr % 0x100);
|
bytes = 1;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
bytes = 2;
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
case 4:
|
||||||
|
bytes = 4;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
bytes = 8;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// necessary for preventing heap access overflow
|
||||||
|
bytes = MIN(bytes, len - idx);
|
||||||
|
|
||||||
|
switch (bytes) {
|
||||||
|
|
||||||
|
case 0: // cannot happen
|
||||||
|
b_val = o_b_val = mask = 0; // keep the linters happy
|
||||||
|
break;
|
||||||
|
case 1: {
|
||||||
|
|
||||||
|
u8 *ptr = (u8 *)&buf[idx];
|
||||||
|
u8 *o_ptr = (u8 *)&orig_buf[idx];
|
||||||
|
b_val = (u64)(*ptr);
|
||||||
o_b_val = (u64)(*o_ptr % 0x100);
|
o_b_val = (u64)(*o_ptr % 0x100);
|
||||||
mask = 0xff;
|
mask = 0xff;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
case 2:
|
case 2:
|
||||||
case 3:
|
case 3: {
|
||||||
b_val = (u64)(*ptr % 0x10000);
|
|
||||||
o_b_val = (u64)(*o_ptr % 0x10000);
|
u16 *ptr = (u16 *)&buf[idx];
|
||||||
|
u16 *o_ptr = (u16 *)&orig_buf[idx];
|
||||||
|
b_val = (u64)(*ptr);
|
||||||
|
o_b_val = (u64)(*o_ptr);
|
||||||
mask = 0xffff;
|
mask = 0xffff;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
case 4:
|
case 4:
|
||||||
case 5:
|
case 5:
|
||||||
case 6:
|
case 6:
|
||||||
case 7:
|
case 7: {
|
||||||
b_val = (u64)(*ptr % 0x100000000);
|
|
||||||
o_b_val = (u64)(*o_ptr % 0x100000000);
|
u32 *ptr = (u32 *)&buf[idx];
|
||||||
|
u32 *o_ptr = (u32 *)&orig_buf[idx];
|
||||||
|
b_val = (u64)(*ptr);
|
||||||
|
o_b_val = (u64)(*o_ptr);
|
||||||
mask = 0xffffffff;
|
mask = 0xffffffff;
|
||||||
break;
|
break;
|
||||||
default:
|
|
||||||
b_val = *ptr;
|
}
|
||||||
o_b_val = *o_ptr;
|
|
||||||
|
default: {
|
||||||
|
|
||||||
|
u64 *ptr = (u64 *)&buf[idx];
|
||||||
|
u64 *o_ptr = (u64 *)&orig_buf[idx];
|
||||||
|
b_val = (u64)(*ptr);
|
||||||
|
o_b_val = (u64)(*o_ptr);
|
||||||
mask = 0xffffffffffffffff;
|
mask = 0xffffffffffffffff;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// test for arithmetic, eg. "if ((user_val - 0x1111) == 0x1234) ..."
|
// test for arithmetic, eg. "if ((user_val - 0x1111) == 0x1234) ..."
|
||||||
@ -983,7 +1059,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
//#endif
|
||||||
|
|
||||||
// we only allow this for ascii2integer (above) so leave if this is the case
|
// we only allow this for ascii2integer (above) so leave if this is the case
|
||||||
if (unlikely(pattern == o_pattern)) { return 0; }
|
if (unlikely(pattern == o_pattern)) { return 0; }
|
||||||
@ -1138,8 +1214,12 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
|
|||||||
// 16 = modified float, 32 = modified integer (modified = wont match
|
// 16 = modified float, 32 = modified integer (modified = wont match
|
||||||
// in original buffer)
|
// in original buffer)
|
||||||
|
|
||||||
#ifdef CMPLOG_SOLVE_ARITHMETIC
|
//#ifdef CMPLOG_SOLVE_ARITHMETIC
|
||||||
if (lvl < LVL3 || attr == IS_TRANSFORM) { return 0; }
|
if (!afl->cmplog_enable_arith || lvl < LVL3 || attr == IS_TRANSFORM) {
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if (!(attr & (IS_GREATER | IS_LESSER)) || SHAPE_BYTES(h->shape) < 4) {
|
if (!(attr & (IS_GREATER | IS_LESSER)) || SHAPE_BYTES(h->shape) < 4) {
|
||||||
|
|
||||||
@ -1244,11 +1324,11 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
|
|||||||
double *f = (double *)&repl;
|
double *f = (double *)&repl;
|
||||||
float g = (float)*f;
|
float g = (float)*f;
|
||||||
repl_new = 0;
|
repl_new = 0;
|
||||||
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||||
memcpy((char *)&repl_new, (char *)&g, 4);
|
memcpy((char *)&repl_new, (char *)&g, 4);
|
||||||
#else
|
#else
|
||||||
memcpy(((char *)&repl_new) + 4, (char *)&g, 4);
|
memcpy(((char *)&repl_new) + 4, (char *)&g, 4);
|
||||||
#endif
|
#endif
|
||||||
changed_val = repl_new;
|
changed_val = repl_new;
|
||||||
h->shape = 3; // modify shape
|
h->shape = 3; // modify shape
|
||||||
|
|
||||||
@ -1303,7 +1383,7 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CMPLOG_SOLVE_ARITHMETIC */
|
//#endif /* CMPLOG_SOLVE_ARITHMETIC
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1462,7 +1542,7 @@ static void try_to_add_to_dictN(afl_state_t *afl, u128 v, u8 size) {
|
|||||||
for (k = 0; k < size; ++k) {
|
for (k = 0; k < size; ++k) {
|
||||||
|
|
||||||
#else
|
#else
|
||||||
u32 off = 16 - size;
|
u32 off = 16 - size;
|
||||||
for (k = 16 - size; k < 16; ++k) {
|
for (k = 16 - size; k < 16; ++k) {
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -1498,11 +1578,12 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
|
|||||||
struct cmp_header *h = &afl->shm.cmp_map->headers[key];
|
struct cmp_header *h = &afl->shm.cmp_map->headers[key];
|
||||||
struct tainted * t;
|
struct tainted * t;
|
||||||
u32 i, j, idx, taint_len, loggeds;
|
u32 i, j, idx, taint_len, loggeds;
|
||||||
u32 have_taint = 1, is_n = 0;
|
u32 have_taint = 1;
|
||||||
u8 status = 0, found_one = 0;
|
u8 status = 0, found_one = 0;
|
||||||
|
|
||||||
/* loop cmps are useless, detect and ignore them */
|
/* loop cmps are useless, detect and ignore them */
|
||||||
#ifdef WORD_SIZE_64
|
#ifdef WORD_SIZE_64
|
||||||
|
u32 is_n = 0;
|
||||||
u128 s128_v0 = 0, s128_v1 = 0, orig_s128_v0 = 0, orig_s128_v1 = 0;
|
u128 s128_v0 = 0, s128_v1 = 0, orig_s128_v0 = 0, orig_s128_v1 = 0;
|
||||||
#endif
|
#endif
|
||||||
u64 s_v0, s_v1;
|
u64 s_v0, s_v1;
|
||||||
@ -1520,6 +1601,7 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef WORD_SIZE_64
|
||||||
switch (SHAPE_BYTES(h->shape)) {
|
switch (SHAPE_BYTES(h->shape)) {
|
||||||
|
|
||||||
case 1:
|
case 1:
|
||||||
@ -1532,6 +1614,8 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
for (i = 0; i < loggeds; ++i) {
|
for (i = 0; i < loggeds; ++i) {
|
||||||
|
|
||||||
struct cmp_operands *o = &afl->shm.cmp_map->log[key][i];
|
struct cmp_operands *o = &afl->shm.cmp_map->log[key][i];
|
||||||
@ -1776,9 +1860,9 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
|
|||||||
#ifndef CMPLOG_COMBINE
|
#ifndef CMPLOG_COMBINE
|
||||||
(void)(cbuf);
|
(void)(cbuf);
|
||||||
#endif
|
#endif
|
||||||
#ifndef CMPLOG_SOLVE_TRANSFORM
|
//#ifndef CMPLOG_SOLVE_TRANSFORM
|
||||||
(void)(changed_val);
|
// (void)(changed_val);
|
||||||
#endif
|
//#endif
|
||||||
|
|
||||||
u8 save[40];
|
u8 save[40];
|
||||||
u32 saved_idx = idx, pre, from = 0, to = 0, i, j;
|
u32 saved_idx = idx, pre, from = 0, to = 0, i, j;
|
||||||
@ -1858,16 +1942,16 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM
|
//#ifdef CMPLOG_SOLVE_TRANSFORM
|
||||||
|
|
||||||
if (*status == 1) return 0;
|
if (*status == 1) return 0;
|
||||||
|
|
||||||
if (lvl & LVL3) {
|
if (afl->cmplog_enable_transform && (lvl & LVL3)) {
|
||||||
|
|
||||||
u32 toupper = 0, tolower = 0, xor = 0, arith = 0, tohex = 0, fromhex = 0;
|
u32 toupper = 0, tolower = 0, xor = 0, arith = 0, tohex = 0, fromhex = 0;
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
||||||
u32 tob64 = 0, fromb64 = 0;
|
u32 tob64 = 0, fromb64 = 0;
|
||||||
#endif
|
#endif
|
||||||
u32 from_0 = 0, from_x = 0, from_X = 0, from_slash = 0, from_up = 0;
|
u32 from_0 = 0, from_x = 0, from_X = 0, from_slash = 0, from_up = 0;
|
||||||
u32 to_0 = 0, to_x = 0, to_slash = 0, to_up = 0;
|
u32 to_0 = 0, to_x = 0, to_slash = 0, to_up = 0;
|
||||||
u8 xor_val[32], arith_val[32], tmp[48];
|
u8 xor_val[32], arith_val[32], tmp[48];
|
||||||
@ -1963,7 +2047,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
||||||
if (i % 3 == 2 && i < 24) {
|
if (i % 3 == 2 && i < 24) {
|
||||||
|
|
||||||
if (is_base64(repl + ((i / 3) << 2))) tob64 += 3;
|
if (is_base64(repl + ((i / 3) << 2))) tob64 += 3;
|
||||||
@ -1976,7 +2060,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if ((o_pattern[i] ^ orig_buf[idx + i]) == xor_val[i] && xor_val[i]) {
|
if ((o_pattern[i] ^ orig_buf[idx + i]) == xor_val[i] && xor_val[i]) {
|
||||||
|
|
||||||
@ -2004,20 +2088,20 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef _DEBUG
|
#ifdef _DEBUG
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"RTN idx=%u loop=%u xor=%u arith=%u tolower=%u toupper=%u "
|
"RTN idx=%u loop=%u xor=%u arith=%u tolower=%u toupper=%u "
|
||||||
"tohex=%u fromhex=%u to_0=%u to_slash=%u to_x=%u "
|
"tohex=%u fromhex=%u to_0=%u to_slash=%u to_x=%u "
|
||||||
"from_0=%u from_slash=%u from_x=%u\n",
|
"from_0=%u from_slash=%u from_x=%u\n",
|
||||||
idx, i, xor, arith, tolower, toupper, tohex, fromhex, to_0,
|
idx, i, xor, arith, tolower, toupper, tohex, fromhex, to_0,
|
||||||
to_slash, to_x, from_0, from_slash, from_x);
|
to_slash, to_x, from_0, from_slash, from_x);
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
||||||
fprintf(stderr, "RTN idx=%u loop=%u tob64=%u from64=%u\n", tob64,
|
fprintf(stderr, "RTN idx=%u loop=%u tob64=%u from64=%u\n", tob64,
|
||||||
fromb64);
|
fromb64);
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
||||||
// input is base64 and converted to binary? convert repl to base64!
|
// input is base64 and converted to binary? convert repl to base64!
|
||||||
if ((i % 4) == 3 && i < 24 && fromb64 > i) {
|
if ((i % 4) == 3 && i < 24 && fromb64 > i) {
|
||||||
|
|
||||||
@ -2040,7 +2124,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// input is converted to hex? convert repl to binary!
|
// input is converted to hex? convert repl to binary!
|
||||||
if (i < 16 && tohex > i) {
|
if (i < 16 && tohex > i) {
|
||||||
@ -2169,16 +2253,16 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CMPLOG_COMBINE
|
#ifdef CMPLOG_COMBINE
|
||||||
if (*status == 1) { memcpy(cbuf + idx, &buf[idx], i + 1); }
|
if (*status == 1) { memcpy(cbuf + idx, &buf[idx], i + 1); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if ((i >= 7 &&
|
if ((i >= 7 &&
|
||||||
(i >= xor&&i >= arith &&i >= tolower &&i >= toupper &&i > tohex &&i >
|
(i >= xor&&i >= arith &&i >= tolower &&i >= toupper &&i > tohex &&i >
|
||||||
(fromhex + from_0 + from_x + from_slash + 1)
|
(fromhex + from_0 + from_x + from_slash + 1)
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64
|
||||||
&& i > tob64 + 3 && i > fromb64 + 4
|
&& i > tob64 + 3 && i > fromb64 + 4
|
||||||
#endif
|
#endif
|
||||||
)) ||
|
)) ||
|
||||||
repl[i] != changed_val[i] || *status == 1) {
|
repl[i] != changed_val[i] || *status == 1) {
|
||||||
|
|
||||||
@ -2192,7 +2276,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 *pattern, u8 *repl,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
//#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -2525,9 +2609,9 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) {
|
|||||||
|
|
||||||
} else if ((lvl & LVL1)
|
} else if ((lvl & LVL1)
|
||||||
|
|
||||||
#ifdef CMPLOG_SOLVE_TRANSFORM
|
//#ifdef CMPLOG_SOLVE_TRANSFORM
|
||||||
|| (lvl & LVL3)
|
|| ((lvl & LVL3) && afl->cmplog_enable_transform)
|
||||||
#endif
|
//#endif
|
||||||
) {
|
) {
|
||||||
|
|
||||||
if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, cbuf, len, lvl, taint))) {
|
if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, cbuf, len, lvl, taint))) {
|
||||||
@ -2606,9 +2690,9 @@ exit_its:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
u32 *v = (u64 *)afl->virgin_bits;
|
u32 *v = (u32 *)afl->virgin_bits;
|
||||||
u32 *s = (u64 *)virgin_save;
|
u32 *s = (u32 *)virgin_save;
|
||||||
u32 i;
|
u32 i;
|
||||||
for (i = 0; i < (afl->shm.map_size >> 2); i++) {
|
for (i = 0; i < (afl->shm.map_size >> 2); i++) {
|
||||||
|
|
||||||
v[i] &= s[i];
|
v[i] &= s[i];
|
||||||
@ -2670,3 +2754,4 @@ exit_its:
|
|||||||
return r;
|
return r;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -707,6 +707,8 @@ void sync_fuzzers(afl_state_t *afl) {
|
|||||||
|
|
||||||
if (afl->foreign_sync_cnt) read_foreign_testcases(afl, 0);
|
if (afl->foreign_sync_cnt) read_foreign_testcases(afl, 0);
|
||||||
|
|
||||||
|
afl->last_sync_time = get_cur_time();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Trim all new test cases to save cycles when doing deterministic checks. The
|
/* Trim all new test cases to save cycles when doing deterministic checks. The
|
||||||
|
@ -486,6 +486,8 @@ void read_afl_environment(afl_state_t *afl, char **envp) {
|
|||||||
WARNF("Mistyped AFL environment variable: %s", env);
|
WARNF("Mistyped AFL environment variable: %s", env);
|
||||||
issue_detected = 1;
|
issue_detected = 1;
|
||||||
|
|
||||||
|
print_suggested_envs(env);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -185,15 +185,14 @@ void load_stats_file(afl_state_t *afl) {
|
|||||||
|
|
||||||
/* Update stats file for unattended monitoring. */
|
/* Update stats file for unattended monitoring. */
|
||||||
|
|
||||||
void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
|
void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
|
||||||
double eps) {
|
double stability, double eps) {
|
||||||
|
|
||||||
#ifndef __HAIKU__
|
#ifndef __HAIKU__
|
||||||
struct rusage rus;
|
struct rusage rus;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
u64 cur_time = get_cur_time();
|
u64 cur_time = get_cur_time();
|
||||||
u32 t_bytes = count_non_255_bytes(afl, afl->virgin_bits);
|
|
||||||
u8 fn[PATH_MAX];
|
u8 fn[PATH_MAX];
|
||||||
FILE *f;
|
FILE *f;
|
||||||
|
|
||||||
@ -353,9 +352,11 @@ void write_stats_file(afl_state_t *afl, double bitmap_cvg, double stability,
|
|||||||
|
|
||||||
/* Update the plot file if there is a reason to. */
|
/* Update the plot file if there is a reason to. */
|
||||||
|
|
||||||
void maybe_update_plot_file(afl_state_t *afl, double bitmap_cvg, double eps) {
|
void maybe_update_plot_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg,
|
||||||
|
double eps) {
|
||||||
|
|
||||||
if (unlikely(afl->plot_prev_qp == afl->queued_paths &&
|
if (unlikely(afl->stop_soon) ||
|
||||||
|
unlikely(afl->plot_prev_qp == afl->queued_paths &&
|
||||||
afl->plot_prev_pf == afl->pending_favored &&
|
afl->plot_prev_pf == afl->pending_favored &&
|
||||||
afl->plot_prev_pnf == afl->pending_not_fuzzed &&
|
afl->plot_prev_pnf == afl->pending_not_fuzzed &&
|
||||||
afl->plot_prev_ce == afl->current_entry &&
|
afl->plot_prev_ce == afl->current_entry &&
|
||||||
@ -384,16 +385,16 @@ void maybe_update_plot_file(afl_state_t *afl, double bitmap_cvg, double eps) {
|
|||||||
/* Fields in the file:
|
/* Fields in the file:
|
||||||
|
|
||||||
unix_time, afl->cycles_done, cur_path, paths_total, paths_not_fuzzed,
|
unix_time, afl->cycles_done, cur_path, paths_total, paths_not_fuzzed,
|
||||||
favored_not_fuzzed, afl->unique_crashes, afl->unique_hangs, afl->max_depth,
|
favored_not_fuzzed, unique_crashes, unique_hangs, max_depth,
|
||||||
execs_per_sec */
|
execs_per_sec, edges_found */
|
||||||
|
|
||||||
fprintf(
|
fprintf(afl->fsrv.plot_file,
|
||||||
afl->fsrv.plot_file,
|
"%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f, %llu, "
|
||||||
"%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f, %llu\n",
|
"%u\n",
|
||||||
get_cur_time() / 1000, afl->queue_cycle - 1, afl->current_entry,
|
get_cur_time() / 1000, afl->queue_cycle - 1, afl->current_entry,
|
||||||
afl->queued_paths, afl->pending_not_fuzzed, afl->pending_favored,
|
afl->queued_paths, afl->pending_not_fuzzed, afl->pending_favored,
|
||||||
bitmap_cvg, afl->unique_crashes, afl->unique_hangs, afl->max_depth, eps,
|
bitmap_cvg, afl->unique_crashes, afl->unique_hangs, afl->max_depth,
|
||||||
afl->plot_prev_ed); /* ignore errors */
|
eps, afl->plot_prev_ed, t_bytes); /* ignore errors */
|
||||||
|
|
||||||
fflush(afl->fsrv.plot_file);
|
fflush(afl->fsrv.plot_file);
|
||||||
|
|
||||||
@ -532,7 +533,8 @@ void show_stats(afl_state_t *afl) {
|
|||||||
if (cur_ms - afl->stats_last_stats_ms > STATS_UPDATE_SEC * 1000) {
|
if (cur_ms - afl->stats_last_stats_ms > STATS_UPDATE_SEC * 1000) {
|
||||||
|
|
||||||
afl->stats_last_stats_ms = cur_ms;
|
afl->stats_last_stats_ms = cur_ms;
|
||||||
write_stats_file(afl, t_byte_ratio, stab_ratio, afl->stats_avg_exec);
|
write_stats_file(afl, t_bytes, t_byte_ratio, stab_ratio,
|
||||||
|
afl->stats_avg_exec);
|
||||||
save_auto(afl);
|
save_auto(afl);
|
||||||
write_bitmap(afl);
|
write_bitmap(afl);
|
||||||
|
|
||||||
@ -555,7 +557,7 @@ void show_stats(afl_state_t *afl) {
|
|||||||
if (cur_ms - afl->stats_last_plot_ms > PLOT_UPDATE_SEC * 1000) {
|
if (cur_ms - afl->stats_last_plot_ms > PLOT_UPDATE_SEC * 1000) {
|
||||||
|
|
||||||
afl->stats_last_plot_ms = cur_ms;
|
afl->stats_last_plot_ms = cur_ms;
|
||||||
maybe_update_plot_file(afl, t_byte_ratio, afl->stats_avg_exec);
|
maybe_update_plot_file(afl, t_bytes, t_byte_ratio, afl->stats_avg_exec);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1217,7 +1219,7 @@ void show_init_stats(afl_state_t *afl) {
|
|||||||
stringify_int(IB(0), min_us), stringify_int(IB(1), max_us),
|
stringify_int(IB(0), min_us), stringify_int(IB(1), max_us),
|
||||||
stringify_int(IB(2), avg_us));
|
stringify_int(IB(2), avg_us));
|
||||||
|
|
||||||
if (!afl->timeout_given) {
|
if (afl->timeout_given != 1) {
|
||||||
|
|
||||||
/* Figure out the appropriate timeout. The basic idea is: 5x average or
|
/* Figure out the appropriate timeout. The basic idea is: 5x average or
|
||||||
1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
|
1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second.
|
||||||
|
127
src/afl-fuzz.c
127
src/afl-fuzz.c
@ -103,7 +103,10 @@ static void usage(u8 *argv0, int more_help) {
|
|||||||
" quad -- see docs/power_schedules.md\n"
|
" quad -- see docs/power_schedules.md\n"
|
||||||
" -f file - location read by the fuzzed program (default: stdin "
|
" -f file - location read by the fuzzed program (default: stdin "
|
||||||
"or @@)\n"
|
"or @@)\n"
|
||||||
" -t msec - timeout for each run (auto-scaled, 50-%u ms)\n"
|
" -t msec - timeout for each run (auto-scaled, default %u ms). "
|
||||||
|
"Add a '+'\n"
|
||||||
|
" to auto-calculate the timeout, the value being the "
|
||||||
|
"maximum.\n"
|
||||||
" -m megs - memory limit for child process (%u MB, 0 = no limit "
|
" -m megs - memory limit for child process (%u MB, 0 = no limit "
|
||||||
"[default])\n"
|
"[default])\n"
|
||||||
" -Q - use binary-only instrumentation (QEMU mode)\n"
|
" -Q - use binary-only instrumentation (QEMU mode)\n"
|
||||||
@ -122,10 +125,10 @@ static void usage(u8 *argv0, int more_help) {
|
|||||||
" -c program - enable CmpLog by specifying a binary compiled for "
|
" -c program - enable CmpLog by specifying a binary compiled for "
|
||||||
"it.\n"
|
"it.\n"
|
||||||
" if using QEMU, just use -c 0.\n"
|
" if using QEMU, just use -c 0.\n"
|
||||||
" -l cmplog_level - set the complexity/intensivity of CmpLog.\n"
|
" -l cmplog_opts - CmpLog configuration values (e.g. \"2AT\"):\n"
|
||||||
" Values: 1 (basic), 2 (larger files) and 3 "
|
" 1=small files (default), 2=larger files, 3=all "
|
||||||
"(transform)\n\n"
|
"files,\n"
|
||||||
|
" A=arithmetic solving, T=transformational solving.\n\n"
|
||||||
"Fuzzing behavior settings:\n"
|
"Fuzzing behavior settings:\n"
|
||||||
" -Z - sequential queue selection instead of weighted "
|
" -Z - sequential queue selection instead of weighted "
|
||||||
"random\n"
|
"random\n"
|
||||||
@ -137,8 +140,8 @@ static void usage(u8 *argv0, int more_help) {
|
|||||||
|
|
||||||
"Testing settings:\n"
|
"Testing settings:\n"
|
||||||
" -s seed - use a fixed seed for the RNG\n"
|
" -s seed - use a fixed seed for the RNG\n"
|
||||||
" -V seconds - fuzz for a specific time then terminate\n"
|
" -V seconds - fuzz for a specified time then terminate\n"
|
||||||
" -E execs - fuzz for a approx. no of total executions then "
|
" -E execs - fuzz for an approx. no. of total executions then "
|
||||||
"terminate\n"
|
"terminate\n"
|
||||||
" Note: not precise and can have several more "
|
" Note: not precise and can have several more "
|
||||||
"executions.\n\n"
|
"executions.\n\n"
|
||||||
@ -198,6 +201,7 @@ static void usage(u8 *argv0, int more_help) {
|
|||||||
"AFL_FORKSRV_INIT_TMOUT: time spent waiting for forkserver during startup (in milliseconds)\n"
|
"AFL_FORKSRV_INIT_TMOUT: time spent waiting for forkserver during startup (in milliseconds)\n"
|
||||||
"AFL_HANG_TMOUT: override timeout value (in milliseconds)\n"
|
"AFL_HANG_TMOUT: override timeout value (in milliseconds)\n"
|
||||||
"AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES: don't warn about core dump handlers\n"
|
"AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES: don't warn about core dump handlers\n"
|
||||||
|
"AFL_IGNORE_UNKNOWN_ENVS: don't warn on unknown env vars\n"
|
||||||
"AFL_IMPORT_FIRST: sync and import test cases from other fuzzer instances first\n"
|
"AFL_IMPORT_FIRST: sync and import test cases from other fuzzer instances first\n"
|
||||||
"AFL_KILL_SIGNAL: Signal ID delivered to child processes on timeout, etc. (default: SIGKILL)\n"
|
"AFL_KILL_SIGNAL: Signal ID delivered to child processes on timeout, etc. (default: SIGKILL)\n"
|
||||||
"AFL_MAP_SIZE: the shared memory size for that target. must be >= the size\n"
|
"AFL_MAP_SIZE: the shared memory size for that target. must be >= the size\n"
|
||||||
@ -552,13 +556,22 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
case 'F': /* foreign sync dir */
|
case 'F': /* foreign sync dir */
|
||||||
|
|
||||||
if (!afl->is_main_node)
|
if (!optarg) { FATAL("Missing path for -F"); }
|
||||||
|
if (!afl->is_main_node) {
|
||||||
|
|
||||||
FATAL(
|
FATAL(
|
||||||
"Option -F can only be specified after the -M option for the "
|
"Option -F can only be specified after the -M option for the "
|
||||||
"main fuzzer of a fuzzing campaign");
|
"main fuzzer of a fuzzing campaign");
|
||||||
if (afl->foreign_sync_cnt >= FOREIGN_SYNCS_MAX)
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if (afl->foreign_sync_cnt >= FOREIGN_SYNCS_MAX) {
|
||||||
|
|
||||||
FATAL("Maximum %u entried of -F option can be specified",
|
FATAL("Maximum %u entried of -F option can be specified",
|
||||||
FOREIGN_SYNCS_MAX);
|
FOREIGN_SYNCS_MAX);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
afl->foreign_syncs[afl->foreign_sync_cnt].dir = optarg;
|
afl->foreign_syncs[afl->foreign_sync_cnt].dir = optarg;
|
||||||
while (afl->foreign_syncs[afl->foreign_sync_cnt]
|
while (afl->foreign_syncs[afl->foreign_sync_cnt]
|
||||||
.dir[strlen(afl->foreign_syncs[afl->foreign_sync_cnt].dir) -
|
.dir[strlen(afl->foreign_syncs[afl->foreign_sync_cnt].dir) -
|
||||||
@ -802,13 +815,36 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
case 'l': {
|
case 'l': {
|
||||||
|
|
||||||
afl->cmplog_lvl = atoi(optarg);
|
if (!optarg) { FATAL("missing parameter for 'l'"); }
|
||||||
if (afl->cmplog_lvl < 1 || afl->cmplog_lvl > CMPLOG_LVL_MAX) {
|
char *c = optarg;
|
||||||
|
while (*c) {
|
||||||
|
|
||||||
FATAL(
|
switch (*c) {
|
||||||
"Bad complog level value, accepted values are 1 (default), 2 and "
|
|
||||||
"%u.",
|
case '0':
|
||||||
CMPLOG_LVL_MAX);
|
case '1':
|
||||||
|
afl->cmplog_lvl = 1;
|
||||||
|
break;
|
||||||
|
case '2':
|
||||||
|
afl->cmplog_lvl = 2;
|
||||||
|
break;
|
||||||
|
case '3':
|
||||||
|
afl->cmplog_lvl = 3;
|
||||||
|
break;
|
||||||
|
case 'a':
|
||||||
|
case 'A':
|
||||||
|
afl->cmplog_enable_arith = 1;
|
||||||
|
break;
|
||||||
|
case 't':
|
||||||
|
case 'T':
|
||||||
|
afl->cmplog_enable_transform = 1;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
FATAL("Unknown option value '%c' in -l %s", *c, optarg);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
++c;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1418,7 +1454,7 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!afl->timeout_given) { find_timeout(afl); }
|
if (!afl->timeout_given) { find_timeout(afl); } // only for resumes!
|
||||||
|
|
||||||
if ((afl->tmp_dir = afl->afl_env.afl_tmpdir) != NULL &&
|
if ((afl->tmp_dir = afl->afl_env.afl_tmpdir) != NULL &&
|
||||||
!afl->in_place_resume) {
|
!afl->in_place_resume) {
|
||||||
@ -1672,20 +1708,49 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
cull_queue(afl);
|
cull_queue(afl);
|
||||||
|
|
||||||
if (!afl->pending_not_fuzzed) {
|
// ensure we have at least one seed that is not disabled.
|
||||||
|
u32 entry, valid_seeds = 0;
|
||||||
|
for (entry = 0; entry < afl->queued_paths; ++entry)
|
||||||
|
if (!afl->queue_buf[entry]->disabled) { ++valid_seeds; }
|
||||||
|
|
||||||
|
if (!afl->pending_not_fuzzed || !valid_seeds) {
|
||||||
|
|
||||||
FATAL("We need at least one valid input seed that does not crash!");
|
FATAL("We need at least one valid input seed that does not crash!");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (afl->timeout_given == 2) { // -t ...+ option
|
||||||
|
|
||||||
|
if (valid_seeds == 1) {
|
||||||
|
|
||||||
|
WARNF(
|
||||||
|
"Only one valid seed is present, auto-calculating the timeout is "
|
||||||
|
"disabled!");
|
||||||
|
afl->timeout_given = 1;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
u64 max_ms = 0;
|
||||||
|
|
||||||
|
for (entry = 0; entry < afl->queued_paths; ++entry)
|
||||||
|
if (!afl->queue_buf[entry]->disabled)
|
||||||
|
if (afl->queue_buf[entry]->exec_us > max_ms)
|
||||||
|
max_ms = afl->queue_buf[entry]->exec_us;
|
||||||
|
|
||||||
|
afl->fsrv.exec_tmout = max_ms;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
show_init_stats(afl);
|
show_init_stats(afl);
|
||||||
|
|
||||||
if (unlikely(afl->old_seed_selection)) seek_to = find_start_position(afl);
|
if (unlikely(afl->old_seed_selection)) seek_to = find_start_position(afl);
|
||||||
|
|
||||||
afl->start_time = get_cur_time();
|
afl->start_time = get_cur_time();
|
||||||
if (afl->in_place_resume || afl->afl_env.afl_autoresume) load_stats_file(afl);
|
if (afl->in_place_resume || afl->afl_env.afl_autoresume) load_stats_file(afl);
|
||||||
write_stats_file(afl, 0, 0, 0);
|
write_stats_file(afl, 0, 0, 0, 0);
|
||||||
maybe_update_plot_file(afl, 0, 0);
|
maybe_update_plot_file(afl, 0, 0, 0);
|
||||||
save_auto(afl);
|
save_auto(afl);
|
||||||
|
|
||||||
if (afl->stop_soon) { goto stop_fuzzing; }
|
if (afl->stop_soon) { goto stop_fuzzing; }
|
||||||
@ -1735,12 +1800,15 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
if (unlikely(afl->old_seed_selection)) {
|
if (unlikely(afl->old_seed_selection)) {
|
||||||
|
|
||||||
afl->current_entry = 0;
|
afl->current_entry = 0;
|
||||||
while (unlikely(afl->queue_buf[afl->current_entry]->disabled)) {
|
while (unlikely(afl->current_entry < afl->queued_paths &&
|
||||||
|
afl->queue_buf[afl->current_entry]->disabled)) {
|
||||||
|
|
||||||
++afl->current_entry;
|
++afl->current_entry;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (afl->current_entry >= afl->queued_paths) { afl->current_entry = 0; }
|
||||||
|
|
||||||
afl->queue_cur = afl->queue_buf[afl->current_entry];
|
afl->queue_cur = afl->queue_buf[afl->current_entry];
|
||||||
|
|
||||||
if (unlikely(seek_to)) {
|
if (unlikely(seek_to)) {
|
||||||
@ -1943,15 +2011,24 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
if (unlikely(afl->is_main_node)) {
|
if (unlikely(afl->is_main_node)) {
|
||||||
|
|
||||||
if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) {
|
if (unlikely(get_cur_time() >
|
||||||
|
(SYNC_TIME >> 1) + afl->last_sync_time)) {
|
||||||
|
|
||||||
sync_fuzzers(afl);
|
if (!(sync_interval_cnt++ % (SYNC_INTERVAL / 3))) {
|
||||||
|
|
||||||
|
sync_fuzzers(afl);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); }
|
if (unlikely(get_cur_time() > SYNC_TIME + afl->last_sync_time)) {
|
||||||
|
|
||||||
|
if (!(sync_interval_cnt++ % SYNC_INTERVAL)) { sync_fuzzers(afl); }
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1966,12 +2043,12 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
write_bitmap(afl);
|
write_bitmap(afl);
|
||||||
maybe_update_plot_file(afl, 0, 0);
|
maybe_update_plot_file(afl, 0, 0, 0);
|
||||||
save_auto(afl);
|
save_auto(afl);
|
||||||
|
|
||||||
stop_fuzzing:
|
stop_fuzzing:
|
||||||
|
|
||||||
write_stats_file(afl, 0, 0, 0);
|
write_stats_file(afl, 0, 0, 0, 0);
|
||||||
afl->force_ui_update = 1; // ensure the screen is reprinted
|
afl->force_ui_update = 1; // ensure the screen is reprinted
|
||||||
show_stats(afl); // print the screen one last time
|
show_stats(afl); // print the screen one last time
|
||||||
|
|
||||||
|
@ -1013,7 +1013,6 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
if (in_dir) {
|
if (in_dir) {
|
||||||
|
|
||||||
if (at_file) { PFATAL("Options -A and -i are mutually exclusive"); }
|
|
||||||
detect_file_args(argv + optind, "", &fsrv->use_stdin);
|
detect_file_args(argv + optind, "", &fsrv->use_stdin);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@ -1169,8 +1168,9 @@ int main(int argc, char **argv_orig, char **envp) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
stdin_file =
|
stdin_file = at_file ? strdup(at_file)
|
||||||
alloc_printf("%s/.afl-showmap-temp-%u", use_dir, (u32)getpid());
|
: (char *)alloc_printf("%s/.afl-showmap-temp-%u",
|
||||||
|
use_dir, (u32)getpid());
|
||||||
unlink(stdin_file);
|
unlink(stdin_file);
|
||||||
atexit(at_exit_handler);
|
atexit(at_exit_handler);
|
||||||
fsrv->out_file = stdin_file;
|
fsrv->out_file = stdin_file;
|
||||||
|
@ -7,7 +7,7 @@ AFL_GCC=afl-gcc
|
|||||||
$ECHO "$BLUE[*] Testing: ${AFL_GCC}, afl-showmap, afl-fuzz, afl-cmin and afl-tmin"
|
$ECHO "$BLUE[*] Testing: ${AFL_GCC}, afl-showmap, afl-fuzz, afl-cmin and afl-tmin"
|
||||||
test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc" -o "$SYS" = "i386" && {
|
test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc" -o "$SYS" = "i386" && {
|
||||||
test -e ../${AFL_GCC} -a -e ../afl-showmap -a -e ../afl-fuzz && {
|
test -e ../${AFL_GCC} -a -e ../afl-showmap -a -e ../afl-fuzz && {
|
||||||
../${AFL_GCC} -o test-instr.plain ../test-instr.c > /dev/null 2>&1
|
../${AFL_GCC} -o test-instr.plain -O0 ../test-instr.c > /dev/null 2>&1
|
||||||
AFL_HARDEN=1 ../${AFL_GCC} -o test-compcov.harden test-compcov.c > /dev/null 2>&1
|
AFL_HARDEN=1 ../${AFL_GCC} -o test-compcov.harden test-compcov.c > /dev/null 2>&1
|
||||||
test -e test-instr.plain && {
|
test -e test-instr.plain && {
|
||||||
$ECHO "$GREEN[+] ${AFL_GCC} compilation succeeded"
|
$ECHO "$GREEN[+] ${AFL_GCC} compilation succeeded"
|
||||||
@ -39,7 +39,7 @@ test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc
|
|||||||
$ECHO "$RED[!] ${AFL_GCC} failed"
|
$ECHO "$RED[!] ${AFL_GCC} failed"
|
||||||
echo CUT------------------------------------------------------------------CUT
|
echo CUT------------------------------------------------------------------CUT
|
||||||
uname -a
|
uname -a
|
||||||
../${AFL_GCC} -o test-instr.plain ../test-instr.c
|
../${AFL_GCC} -o test-instr.plain -O0 ../test-instr.c
|
||||||
echo CUT------------------------------------------------------------------CUT
|
echo CUT------------------------------------------------------------------CUT
|
||||||
CODE=1
|
CODE=1
|
||||||
}
|
}
|
||||||
@ -128,7 +128,7 @@ test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc
|
|||||||
$ECHO "$BLUE[*] Testing: ${AFL_GCC}, afl-showmap, afl-fuzz, afl-cmin and afl-tmin"
|
$ECHO "$BLUE[*] Testing: ${AFL_GCC}, afl-showmap, afl-fuzz, afl-cmin and afl-tmin"
|
||||||
SKIP=
|
SKIP=
|
||||||
test -e ../${AFL_GCC} -a -e ../afl-showmap -a -e ../afl-fuzz && {
|
test -e ../${AFL_GCC} -a -e ../afl-showmap -a -e ../afl-fuzz && {
|
||||||
../${AFL_GCC} -o test-instr.plain ../test-instr.c > /dev/null 2>&1
|
../${AFL_GCC} -o test-instr.plain -O0 ../test-instr.c > /dev/null 2>&1
|
||||||
AFL_HARDEN=1 ../${AFL_GCC} -o test-compcov.harden test-compcov.c > /dev/null 2>&1
|
AFL_HARDEN=1 ../${AFL_GCC} -o test-compcov.harden test-compcov.c > /dev/null 2>&1
|
||||||
test -e test-instr.plain && {
|
test -e test-instr.plain && {
|
||||||
$ECHO "$GREEN[+] ${AFL_GCC} compilation succeeded"
|
$ECHO "$GREEN[+] ${AFL_GCC} compilation succeeded"
|
||||||
|
@ -39,14 +39,7 @@ test -e ../afl-qemu-trace && {
|
|||||||
$ECHO "$GREY[*] running afl-fuzz for qemu_mode AFL_ENTRYPOINT, this will take approx 6 seconds"
|
$ECHO "$GREY[*] running afl-fuzz for qemu_mode AFL_ENTRYPOINT, this will take approx 6 seconds"
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
if file test-instr | grep -q "32-bit"; then
|
export AFL_ENTRYPOINT=`printf 1 | AFL_DEBUG=1 ../afl-qemu-trace ./test-instr 2>&1 >/dev/null | awk '/forkserver/{print $4; exit}'`
|
||||||
# for 32-bit reduce 8 nibbles to the lower 7 nibbles
|
|
||||||
ADDR_LOWER_PART=`nm test-instr | grep "T main" | awk '{print $1}' | sed 's/^.//'`
|
|
||||||
else
|
|
||||||
# for 64-bit reduce 16 nibbles to the lower 9 nibbles
|
|
||||||
ADDR_LOWER_PART=`nm test-instr | grep "T main" | awk '{print $1}' | sed 's/^.......//'`
|
|
||||||
fi
|
|
||||||
export AFL_ENTRYPOINT=`expr 0x4${ADDR_LOWER_PART}`
|
|
||||||
$ECHO AFL_ENTRYPOINT=$AFL_ENTRYPOINT - $(nm test-instr | grep "T main") - $(file ./test-instr)
|
$ECHO AFL_ENTRYPOINT=$AFL_ENTRYPOINT - $(nm test-instr | grep "T main") - $(file ./test-instr)
|
||||||
../afl-fuzz -m ${MEM_LIMIT} -V2 -Q -i in -o out -- ./test-instr
|
../afl-fuzz -m ${MEM_LIMIT} -V2 -Q -i in -o out -- ./test-instr
|
||||||
unset AFL_ENTRYPOINT
|
unset AFL_ENTRYPOINT
|
||||||
|
@ -14,7 +14,7 @@ test -d ../unicorn_mode/unicornafl -a -e ../unicorn_mode/unicornafl/samples/shel
|
|||||||
EASY_INSTALL_FOUND=0
|
EASY_INSTALL_FOUND=0
|
||||||
for PYTHON in $PYTHONS ; do
|
for PYTHON in $PYTHONS ; do
|
||||||
|
|
||||||
if $PYTHON -c "help('easy_install');" </dev/null | grep -q module ; then
|
if $PYTHON -c "import setuptools" ; then
|
||||||
|
|
||||||
EASY_INSTALL_FOUND=1
|
EASY_INSTALL_FOUND=1
|
||||||
PY=$PYTHON
|
PY=$PYTHON
|
||||||
|
@ -8,19 +8,19 @@ The CompareCoverage and NeverZero counters features are by Andrea Fioraldi <andr
|
|||||||
|
|
||||||
## 1) Introduction
|
## 1) Introduction
|
||||||
|
|
||||||
The code in ./unicorn_mode allows you to build a standalone feature that
|
The code in ./unicorn_mode allows you to build the (Unicorn Engine)[https://github.com/unicorn-engine/unicorn] with afl support.
|
||||||
leverages the Unicorn Engine and allows callers to obtain instrumentation
|
This means, you can run anything that can be emulated in unicorn and obtain instrumentation
|
||||||
output for black-box, closed-source binary code snippets. This mechanism
|
output for black-box, closed-source binary code snippets. This mechanism
|
||||||
can be then used by afl-fuzz to stress-test targets that couldn't be built
|
can be then used by afl-fuzz to stress-test targets that couldn't be built
|
||||||
with afl-gcc or used in QEMU mode, or with other extensions such as
|
with afl-cc or used in QEMU mode.
|
||||||
TriforceAFL.
|
|
||||||
|
|
||||||
There is a significant performance penalty compared to native AFL,
|
There is a significant performance penalty compared to native AFL,
|
||||||
but at least we're able to use AFL++ on these binaries, right?
|
but at least we're able to use AFL++ on these binaries, right?
|
||||||
|
|
||||||
## 2) How to use
|
## 2) How to use
|
||||||
|
|
||||||
Requirements: you need an installed python environment.
|
First, you will need a working harness for your target in unicorn, using Python, C, or Rust.
|
||||||
|
For some pointers for more advanced emulation, take a look at [BaseSAFE](https://github.com/fgsect/BaseSAFE) and [Qiling](https://github.com/qilingframework/qiling).
|
||||||
|
|
||||||
### Building AFL++'s Unicorn Mode
|
### Building AFL++'s Unicorn Mode
|
||||||
|
|
||||||
@ -34,23 +34,23 @@ cd unicorn_mode
|
|||||||
```
|
```
|
||||||
|
|
||||||
NOTE: This script checks out a Unicorn Engine fork as submodule that has been tested
|
NOTE: This script checks out a Unicorn Engine fork as submodule that has been tested
|
||||||
and is stable-ish, based on the unicorn engine master.
|
and is stable-ish, based on the unicorn engine `next` branch.
|
||||||
|
|
||||||
Building Unicorn will take a little bit (~5-10 minutes). Once it completes
|
Building Unicorn will take a little bit (~5-10 minutes). Once it completes
|
||||||
it automatically compiles a sample application and verifies that it works.
|
it automatically compiles a sample application and verifies that it works.
|
||||||
|
|
||||||
### Fuzzing with Unicorn Mode
|
### Fuzzing with Unicorn Mode
|
||||||
|
|
||||||
To really use unicorn-mode effectively you need to prepare the following:
|
To use unicorn-mode effectively you need to prepare the following:
|
||||||
|
|
||||||
* Relevant binary code to be fuzzed
|
* Relevant binary code to be fuzzed
|
||||||
* Knowledge of the memory map and good starting state
|
* Knowledge of the memory map and good starting state
|
||||||
* Folder containing sample inputs to start fuzzing with
|
* Folder containing sample inputs to start fuzzing with
|
||||||
+ Same ideas as any other AFL inputs
|
+ Same ideas as any other AFL inputs
|
||||||
+ Quality/speed of results will depend greatly on quality of starting
|
+ Quality/speed of results will depend greatly on the quality of starting
|
||||||
samples
|
samples
|
||||||
+ See AFL's guidance on how to create a sample corpus
|
+ See AFL's guidance on how to create a sample corpus
|
||||||
* Unicornafl-based test harness which:
|
* Unicornafl-based test harness in Rust, C, or Python, which:
|
||||||
+ Adds memory map regions
|
+ Adds memory map regions
|
||||||
+ Loads binary code into memory
|
+ Loads binary code into memory
|
||||||
+ Calls uc.afl_fuzz() / uc.afl_start_forkserver
|
+ Calls uc.afl_fuzz() / uc.afl_start_forkserver
|
||||||
@ -59,13 +59,13 @@ To really use unicorn-mode effectively you need to prepare the following:
|
|||||||
the test harness
|
the test harness
|
||||||
+ Presumably the data to be fuzzed is at a fixed buffer address
|
+ Presumably the data to be fuzzed is at a fixed buffer address
|
||||||
+ If input constraints (size, invalid bytes, etc.) are known they
|
+ If input constraints (size, invalid bytes, etc.) are known they
|
||||||
should be checked after the file is loaded. If a constraint
|
should be checked in the place_input handler. If a constraint
|
||||||
fails, just exit the test harness. AFL will treat the input as
|
fails, just return false from the handler. AFL will treat the input as
|
||||||
'uninteresting' and move on.
|
'uninteresting' and move on.
|
||||||
+ Sets up registers and memory state for beginning of test
|
+ Sets up registers and memory state for beginning of test
|
||||||
+ Emulates the interested code from beginning to end
|
+ Emulates the interesting code from beginning to end
|
||||||
+ If a crash is detected, the test harness must 'crash' by
|
+ If a crash is detected, the test harness must 'crash' by
|
||||||
throwing a signal (SIGSEGV, SIGKILL, SIGABORT, etc.)
|
throwing a signal (SIGSEGV, SIGKILL, SIGABORT, etc.), or indicate a crash in the crash validation callback.
|
||||||
|
|
||||||
Once you have all those things ready to go you just need to run afl-fuzz in
|
Once you have all those things ready to go you just need to run afl-fuzz in
|
||||||
'unicorn-mode' by passing in the '-U' flag:
|
'unicorn-mode' by passing in the '-U' flag:
|
||||||
@ -79,11 +79,12 @@ AFL's main documentation for more info about how to use afl-fuzz effectively.
|
|||||||
|
|
||||||
For a much clearer vision of what all of this looks like, please refer to the
|
For a much clearer vision of what all of this looks like, please refer to the
|
||||||
sample provided in the 'unicorn_mode/samples' directory. There is also a blog
|
sample provided in the 'unicorn_mode/samples' directory. There is also a blog
|
||||||
post that goes over the basics at:
|
post that uses slightly older concepts, but describes the general ideas, at:
|
||||||
|
|
||||||
[https://medium.com/@njvoss299/afl-unicorn-fuzzing-arbitrary-binary-code-563ca28936bf](https://medium.com/@njvoss299/afl-unicorn-fuzzing-arbitrary-binary-code-563ca28936bf)
|
[https://medium.com/@njvoss299/afl-unicorn-fuzzing-arbitrary-binary-code-563ca28936bf](https://medium.com/@njvoss299/afl-unicorn-fuzzing-arbitrary-binary-code-563ca28936bf)
|
||||||
|
|
||||||
The 'helper_scripts' directory also contains several helper scripts that allow you
|
|
||||||
|
The ['helper_scripts'](./helper_scripts) directory also contains several helper scripts that allow you
|
||||||
to dump context from a running process, load it, and hook heap allocations. For details
|
to dump context from a running process, load it, and hook heap allocations. For details
|
||||||
on how to use this check out the follow-up blog post to the one linked above.
|
on how to use this check out the follow-up blog post to the one linked above.
|
||||||
|
|
||||||
@ -92,10 +93,10 @@ A example use of AFL-Unicorn mode is discussed in the paper Unicorefuzz:
|
|||||||
|
|
||||||
## 3) Options
|
## 3) Options
|
||||||
|
|
||||||
As for the QEMU-based instrumentation, the afl-unicorn twist of afl++
|
As for the QEMU-based instrumentation, unicornafl comes with a sub-instruction based instrumentation similar in purpose to laf-intel.
|
||||||
comes with a sub-instruction based instrumentation similar in purpose to laf-intel.
|
|
||||||
|
|
||||||
The options that enable Unicorn CompareCoverage are the same used for QEMU.
|
The options that enable Unicorn CompareCoverage are the same used for QEMU.
|
||||||
|
This will split up each multi-byte compare to give feedback for each correct byte.
|
||||||
AFL_COMPCOV_LEVEL=1 is to instrument comparisons with only immediate values.
|
AFL_COMPCOV_LEVEL=1 is to instrument comparisons with only immediate values.
|
||||||
|
|
||||||
AFL_COMPCOV_LEVEL=2 instruments all comparison instructions.
|
AFL_COMPCOV_LEVEL=2 instruments all comparison instructions.
|
||||||
@ -119,6 +120,20 @@ unicornafl.monkeypatch()
|
|||||||
|
|
||||||
This will replace all unicorn imports with unicornafl inputs.
|
This will replace all unicorn imports with unicornafl inputs.
|
||||||
|
|
||||||
Refer to the [samples/arm_example/arm_tester.c](samples/arm_example/arm_tester.c) for an example
|
5) Examples
|
||||||
of how to do this properly! If you don't get this right, AFL will not
|
|
||||||
load any mutated inputs and your fuzzing will be useless!
|
Apart from reading the documentation in `afl.c` and the python bindings of unicornafl, the best documentation are the [samples/](./samples).
|
||||||
|
The following examples exist at the time of writing:
|
||||||
|
|
||||||
|
- c: A simple example how to use the c bindings
|
||||||
|
- compcov_x64: A python example that uses compcov to traverse hard-to-reach blocks
|
||||||
|
- persistent: A c example using persistent mode for maximum speed, and resetting the target state between each iteration
|
||||||
|
- simple: A simple python example
|
||||||
|
- speedtest/c: The c harness for an example target, used to compare c, python, and rust bindings and fix speed issues
|
||||||
|
- speedtest/python: Fuzzing the same target in python
|
||||||
|
- speedtest/rust: Fuzzing the same target using a rust harness
|
||||||
|
|
||||||
|
Usually, the place to look at is the `harness` in each folder. The source code in each harness is pretty well documented.
|
||||||
|
Most harnesses also have the `afl-fuzz` commandline, or even offer a `make fuzz` Makefile target.
|
||||||
|
Targets in these folders, if x86, can usually be made using `make target` in each folder or get shipped pre-built (plus their source).
|
||||||
|
Especially take a look at the [speedtest documentation](./samples/speedtest/README.md) to see how the languages compare.
|
@ -117,19 +117,19 @@ done
|
|||||||
|
|
||||||
# some python version should be available now
|
# some python version should be available now
|
||||||
PYTHONS="`command -v python3` `command -v python` `command -v python2`"
|
PYTHONS="`command -v python3` `command -v python` `command -v python2`"
|
||||||
EASY_INSTALL_FOUND=0
|
SETUPTOOLS_FOUND=0
|
||||||
for PYTHON in $PYTHONS ; do
|
for PYTHON in $PYTHONS ; do
|
||||||
|
|
||||||
if $PYTHON -c "import setuptools" ; then
|
if $PYTHON -c "import setuptools" ; then
|
||||||
|
|
||||||
EASY_INSTALL_FOUND=1
|
SETUPTOOLS_FOUND=1
|
||||||
PYTHONBIN=$PYTHON
|
PYTHONBIN=$PYTHON
|
||||||
break
|
break
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
done
|
done
|
||||||
if [ "0" = $EASY_INSTALL_FOUND ]; then
|
if [ "0" = $SETUPTOOLS_FOUND ]; then
|
||||||
|
|
||||||
echo "[-] Error: Python setup-tools not found. Run 'sudo apt-get install python-setuptools', or install python3-setuptools, or run '$PYTHONBIN -m ensurepip', or create a virtualenv, or ..."
|
echo "[-] Error: Python setup-tools not found. Run 'sudo apt-get install python-setuptools', or install python3-setuptools, or run '$PYTHONBIN -m ensurepip', or create a virtualenv, or ..."
|
||||||
PREREQ_NOTFOUND=1
|
PREREQ_NOTFOUND=1
|
||||||
|
@ -45,30 +45,31 @@ MAX_SEG_SIZE = 128 * 1024 * 1024
|
|||||||
INDEX_FILE_NAME = "_index.json"
|
INDEX_FILE_NAME = "_index.json"
|
||||||
|
|
||||||
|
|
||||||
#----------------------
|
# ----------------------
|
||||||
#---- Helper Functions
|
# ---- Helper Functions
|
||||||
|
|
||||||
|
|
||||||
def map_arch():
|
def map_arch():
|
||||||
arch = get_arch() # from GEF
|
arch = get_arch() # from GEF
|
||||||
if 'x86_64' in arch or 'x86-64' in arch:
|
if "x86_64" in arch or "x86-64" in arch:
|
||||||
return "x64"
|
return "x64"
|
||||||
elif 'x86' in arch or 'i386' in arch:
|
elif "x86" in arch or "i386" in arch:
|
||||||
return "x86"
|
return "x86"
|
||||||
elif 'aarch64' in arch or 'arm64' in arch:
|
elif "aarch64" in arch or "arm64" in arch:
|
||||||
return "arm64le"
|
return "arm64le"
|
||||||
elif 'aarch64_be' in arch:
|
elif "aarch64_be" in arch:
|
||||||
return "arm64be"
|
return "arm64be"
|
||||||
elif 'armeb' in arch:
|
elif "armeb" in arch:
|
||||||
# check for THUMB mode
|
# check for THUMB mode
|
||||||
cpsr = get_register('$cpsr')
|
cpsr = get_register("$cpsr")
|
||||||
if (cpsr & (1 << 5)):
|
if cpsr & (1 << 5):
|
||||||
return "armbethumb"
|
return "armbethumb"
|
||||||
else:
|
else:
|
||||||
return "armbe"
|
return "armbe"
|
||||||
elif 'arm' in arch:
|
elif "arm" in arch:
|
||||||
# check for THUMB mode
|
# check for THUMB mode
|
||||||
cpsr = get_register('$cpsr')
|
cpsr = get_register("$cpsr")
|
||||||
if (cpsr & (1 << 5)):
|
if cpsr & (1 << 5):
|
||||||
return "armlethumb"
|
return "armlethumb"
|
||||||
else:
|
else:
|
||||||
return "armle"
|
return "armle"
|
||||||
@ -76,8 +77,9 @@ def map_arch():
|
|||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
#-----------------------
|
# -----------------------
|
||||||
#---- Dumping functions
|
# ---- Dumping functions
|
||||||
|
|
||||||
|
|
||||||
def dump_arch_info():
|
def dump_arch_info():
|
||||||
arch_info = {}
|
arch_info = {}
|
||||||
@ -89,7 +91,7 @@ def dump_regs():
|
|||||||
reg_state = {}
|
reg_state = {}
|
||||||
for reg in current_arch.all_registers:
|
for reg in current_arch.all_registers:
|
||||||
reg_val = get_register(reg)
|
reg_val = get_register(reg)
|
||||||
reg_state[reg.strip().strip('$')] = reg_val
|
reg_state[reg.strip().strip("$")] = reg_val
|
||||||
|
|
||||||
return reg_state
|
return reg_state
|
||||||
|
|
||||||
@ -108,47 +110,76 @@ def dump_process_memory(output_dir):
|
|||||||
if entry.page_start == entry.page_end:
|
if entry.page_start == entry.page_end:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
seg_info = {'start': entry.page_start, 'end': entry.page_end, 'name': entry.path, 'permissions': {
|
seg_info = {
|
||||||
"r": entry.is_readable() > 0,
|
"start": entry.page_start,
|
||||||
"w": entry.is_writable() > 0,
|
"end": entry.page_end,
|
||||||
"x": entry.is_executable() > 0
|
"name": entry.path,
|
||||||
}, 'content_file': ''}
|
"permissions": {
|
||||||
|
"r": entry.is_readable() > 0,
|
||||||
|
"w": entry.is_writable() > 0,
|
||||||
|
"x": entry.is_executable() > 0,
|
||||||
|
},
|
||||||
|
"content_file": "",
|
||||||
|
}
|
||||||
|
|
||||||
# "(deleted)" may or may not be valid, but don't push it.
|
# "(deleted)" may or may not be valid, but don't push it.
|
||||||
if entry.is_readable() and not '(deleted)' in entry.path:
|
if entry.is_readable() and not "(deleted)" in entry.path:
|
||||||
try:
|
try:
|
||||||
# Compress and dump the content to a file
|
# Compress and dump the content to a file
|
||||||
seg_content = read_memory(entry.page_start, entry.size)
|
seg_content = read_memory(entry.page_start, entry.size)
|
||||||
if(seg_content == None):
|
if seg_content == None:
|
||||||
print("Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(entry.page_start, entry.path))
|
print(
|
||||||
|
"Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(
|
||||||
|
entry.page_start, entry.path
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print("Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(entry.page_start, len(seg_content), entry.path, repr(seg_info['permissions'])))
|
print(
|
||||||
|
"Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(
|
||||||
|
entry.page_start,
|
||||||
|
len(seg_content),
|
||||||
|
entry.path,
|
||||||
|
repr(seg_info["permissions"]),
|
||||||
|
)
|
||||||
|
)
|
||||||
compressed_seg_content = zlib.compress(seg_content)
|
compressed_seg_content = zlib.compress(seg_content)
|
||||||
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
|
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
|
||||||
seg_info["content_file"] = md5_sum
|
seg_info["content_file"] = md5_sum
|
||||||
|
|
||||||
# Write the compressed contents to disk
|
# Write the compressed contents to disk
|
||||||
out_file = open(os.path.join(output_dir, md5_sum), 'wb')
|
out_file = open(os.path.join(output_dir, md5_sum), "wb")
|
||||||
out_file.write(compressed_seg_content)
|
out_file.write(compressed_seg_content)
|
||||||
out_file.close()
|
out_file.close()
|
||||||
|
|
||||||
except:
|
except:
|
||||||
print("Exception reading segment ({}): {}".format(entry.path, sys.exc_info()[0]))
|
print(
|
||||||
|
"Exception reading segment ({}): {}".format(
|
||||||
|
entry.path, sys.exc_info()[0]
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print("Skipping segment {0}@0x{1:016x}".format(entry.path, entry.page_start))
|
print(
|
||||||
|
"Skipping segment {0}@0x{1:016x}".format(entry.path, entry.page_start)
|
||||||
|
)
|
||||||
|
|
||||||
# Add the segment to the list
|
# Add the segment to the list
|
||||||
final_segment_list.append(seg_info)
|
final_segment_list.append(seg_info)
|
||||||
|
|
||||||
|
|
||||||
return final_segment_list
|
return final_segment_list
|
||||||
|
|
||||||
#---------------------------------------------
|
|
||||||
#---- ARM Extention (dump floating point regs)
|
# ---------------------------------------------
|
||||||
|
# ---- ARM Extention (dump floating point regs)
|
||||||
|
|
||||||
|
|
||||||
def dump_float(rge=32):
|
def dump_float(rge=32):
|
||||||
reg_convert = ""
|
reg_convert = ""
|
||||||
if map_arch() == "armbe" or map_arch() == "armle" or map_arch() == "armbethumb" or map_arch() == "armbethumb":
|
if (
|
||||||
|
map_arch() == "armbe"
|
||||||
|
or map_arch() == "armle"
|
||||||
|
or map_arch() == "armbethumb"
|
||||||
|
or map_arch() == "armbethumb"
|
||||||
|
):
|
||||||
reg_state = {}
|
reg_state = {}
|
||||||
for reg_num in range(32):
|
for reg_num in range(32):
|
||||||
value = gdb.selected_frame().read_register("d" + str(reg_num))
|
value = gdb.selected_frame().read_register("d" + str(reg_num))
|
||||||
@ -158,8 +189,10 @@ def dump_float(rge=32):
|
|||||||
|
|
||||||
return reg_state
|
return reg_state
|
||||||
|
|
||||||
#----------
|
|
||||||
#---- Main
|
# ----------
|
||||||
|
# ---- Main
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
print("----- Unicorn Context Dumper -----")
|
print("----- Unicorn Context Dumper -----")
|
||||||
@ -175,7 +208,9 @@ def main():
|
|||||||
try:
|
try:
|
||||||
|
|
||||||
# Create the output directory
|
# Create the output directory
|
||||||
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
|
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime(
|
||||||
|
"%Y%m%d_%H%M%S"
|
||||||
|
)
|
||||||
output_path = "UnicornContext_" + timestamp
|
output_path = "UnicornContext_" + timestamp
|
||||||
if not os.path.exists(output_path):
|
if not os.path.exists(output_path):
|
||||||
os.makedirs(output_path)
|
os.makedirs(output_path)
|
||||||
@ -190,7 +225,7 @@ def main():
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Write the index file
|
# Write the index file
|
||||||
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), 'w')
|
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), "w")
|
||||||
index_file.write(json.dumps(context, indent=4))
|
index_file.write(json.dumps(context, indent=4))
|
||||||
index_file.close()
|
index_file.close()
|
||||||
print("Done.")
|
print("Done.")
|
||||||
@ -198,5 +233,6 @@ def main():
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("!!! ERROR:\n\t{}".format(repr(e)))
|
print("!!! ERROR:\n\t{}".format(repr(e)))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -31,8 +31,9 @@ MAX_SEG_SIZE = 128 * 1024 * 1024
|
|||||||
# Name of the index file
|
# Name of the index file
|
||||||
INDEX_FILE_NAME = "_index.json"
|
INDEX_FILE_NAME = "_index.json"
|
||||||
|
|
||||||
#----------------------
|
# ----------------------
|
||||||
#---- Helper Functions
|
# ---- Helper Functions
|
||||||
|
|
||||||
|
|
||||||
def get_arch():
|
def get_arch():
|
||||||
if ph.id == PLFM_386 and ph.flag & PR_USE64:
|
if ph.id == PLFM_386 and ph.flag & PR_USE64:
|
||||||
@ -52,6 +53,7 @@ def get_arch():
|
|||||||
else:
|
else:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def get_register_list(arch):
|
def get_register_list(arch):
|
||||||
if arch == "arm64le" or arch == "arm64be":
|
if arch == "arm64le" or arch == "arm64be":
|
||||||
arch = "arm64"
|
arch = "arm64"
|
||||||
@ -59,84 +61,174 @@ def get_register_list(arch):
|
|||||||
arch = "arm"
|
arch = "arm"
|
||||||
|
|
||||||
registers = {
|
registers = {
|
||||||
"x64" : [
|
"x64": [
|
||||||
"rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
|
"rax",
|
||||||
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
|
"rbx",
|
||||||
"rip", "rsp", "efl",
|
"rcx",
|
||||||
"cs", "ds", "es", "fs", "gs", "ss",
|
"rdx",
|
||||||
|
"rsi",
|
||||||
|
"rdi",
|
||||||
|
"rbp",
|
||||||
|
"rsp",
|
||||||
|
"r8",
|
||||||
|
"r9",
|
||||||
|
"r10",
|
||||||
|
"r11",
|
||||||
|
"r12",
|
||||||
|
"r13",
|
||||||
|
"r14",
|
||||||
|
"r15",
|
||||||
|
"rip",
|
||||||
|
"rsp",
|
||||||
|
"efl",
|
||||||
|
"cs",
|
||||||
|
"ds",
|
||||||
|
"es",
|
||||||
|
"fs",
|
||||||
|
"gs",
|
||||||
|
"ss",
|
||||||
],
|
],
|
||||||
"x86" : [
|
"x86": [
|
||||||
"eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
|
"eax",
|
||||||
"eip", "esp", "efl",
|
"ebx",
|
||||||
"cs", "ds", "es", "fs", "gs", "ss",
|
"ecx",
|
||||||
],
|
"edx",
|
||||||
"arm" : [
|
"esi",
|
||||||
"R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
|
"edi",
|
||||||
"R8", "R9", "R10", "R11", "R12", "PC", "SP", "LR",
|
"ebp",
|
||||||
|
"esp",
|
||||||
|
"eip",
|
||||||
|
"esp",
|
||||||
|
"efl",
|
||||||
|
"cs",
|
||||||
|
"ds",
|
||||||
|
"es",
|
||||||
|
"fs",
|
||||||
|
"gs",
|
||||||
|
"ss",
|
||||||
|
],
|
||||||
|
"arm": [
|
||||||
|
"R0",
|
||||||
|
"R1",
|
||||||
|
"R2",
|
||||||
|
"R3",
|
||||||
|
"R4",
|
||||||
|
"R5",
|
||||||
|
"R6",
|
||||||
|
"R7",
|
||||||
|
"R8",
|
||||||
|
"R9",
|
||||||
|
"R10",
|
||||||
|
"R11",
|
||||||
|
"R12",
|
||||||
|
"PC",
|
||||||
|
"SP",
|
||||||
|
"LR",
|
||||||
"PSR",
|
"PSR",
|
||||||
],
|
],
|
||||||
"arm64" : [
|
"arm64": [
|
||||||
"X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7",
|
"X0",
|
||||||
"X8", "X9", "X10", "X11", "X12", "X13", "X14",
|
"X1",
|
||||||
"X15", "X16", "X17", "X18", "X19", "X20", "X21",
|
"X2",
|
||||||
"X22", "X23", "X24", "X25", "X26", "X27", "X28",
|
"X3",
|
||||||
"PC", "SP", "FP", "LR", "CPSR"
|
"X4",
|
||||||
|
"X5",
|
||||||
|
"X6",
|
||||||
|
"X7",
|
||||||
|
"X8",
|
||||||
|
"X9",
|
||||||
|
"X10",
|
||||||
|
"X11",
|
||||||
|
"X12",
|
||||||
|
"X13",
|
||||||
|
"X14",
|
||||||
|
"X15",
|
||||||
|
"X16",
|
||||||
|
"X17",
|
||||||
|
"X18",
|
||||||
|
"X19",
|
||||||
|
"X20",
|
||||||
|
"X21",
|
||||||
|
"X22",
|
||||||
|
"X23",
|
||||||
|
"X24",
|
||||||
|
"X25",
|
||||||
|
"X26",
|
||||||
|
"X27",
|
||||||
|
"X28",
|
||||||
|
"PC",
|
||||||
|
"SP",
|
||||||
|
"FP",
|
||||||
|
"LR",
|
||||||
|
"CPSR"
|
||||||
# "NZCV",
|
# "NZCV",
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
return registers[arch]
|
return registers[arch]
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------
|
||||||
|
# ---- Dumping functions
|
||||||
|
|
||||||
#-----------------------
|
|
||||||
#---- Dumping functions
|
|
||||||
|
|
||||||
def dump_arch_info():
|
def dump_arch_info():
|
||||||
arch_info = {}
|
arch_info = {}
|
||||||
arch_info["arch"] = get_arch()
|
arch_info["arch"] = get_arch()
|
||||||
return arch_info
|
return arch_info
|
||||||
|
|
||||||
|
|
||||||
def dump_regs():
|
def dump_regs():
|
||||||
reg_state = {}
|
reg_state = {}
|
||||||
for reg in get_register_list(get_arch()):
|
for reg in get_register_list(get_arch()):
|
||||||
reg_state[reg] = GetRegValue(reg)
|
reg_state[reg] = GetRegValue(reg)
|
||||||
return reg_state
|
return reg_state
|
||||||
|
|
||||||
|
|
||||||
def dump_process_memory(output_dir):
|
def dump_process_memory(output_dir):
|
||||||
# Segment information dictionary
|
# Segment information dictionary
|
||||||
segment_list = []
|
segment_list = []
|
||||||
|
|
||||||
# Loop over the segments, fill in the info dictionary
|
# Loop over the segments, fill in the info dictionary
|
||||||
for seg_ea in Segments():
|
for seg_ea in Segments():
|
||||||
seg_start = SegStart(seg_ea)
|
seg_start = SegStart(seg_ea)
|
||||||
seg_end = SegEnd(seg_ea)
|
seg_end = SegEnd(seg_ea)
|
||||||
seg_size = seg_end - seg_start
|
seg_size = seg_end - seg_start
|
||||||
|
|
||||||
seg_info = {}
|
seg_info = {}
|
||||||
seg_info["name"] = SegName(seg_ea)
|
seg_info["name"] = SegName(seg_ea)
|
||||||
seg_info["start"] = seg_start
|
seg_info["start"] = seg_start
|
||||||
seg_info["end"] = seg_end
|
seg_info["end"] = seg_end
|
||||||
|
|
||||||
perms = getseg(seg_ea).perm
|
perms = getseg(seg_ea).perm
|
||||||
seg_info["permissions"] = {
|
seg_info["permissions"] = {
|
||||||
"r": False if (perms & SEGPERM_READ) == 0 else True,
|
"r": False if (perms & SEGPERM_READ) == 0 else True,
|
||||||
"w": False if (perms & SEGPERM_WRITE) == 0 else True,
|
"w": False if (perms & SEGPERM_WRITE) == 0 else True,
|
||||||
"x": False if (perms & SEGPERM_EXEC) == 0 else True,
|
"x": False if (perms & SEGPERM_EXEC) == 0 else True,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (perms & SEGPERM_READ) and seg_size <= MAX_SEG_SIZE and isLoaded(seg_start):
|
if (perms & SEGPERM_READ) and seg_size <= MAX_SEG_SIZE and isLoaded(seg_start):
|
||||||
try:
|
try:
|
||||||
# Compress and dump the content to a file
|
# Compress and dump the content to a file
|
||||||
seg_content = get_many_bytes(seg_start, seg_end - seg_start)
|
seg_content = get_many_bytes(seg_start, seg_end - seg_start)
|
||||||
if(seg_content == None):
|
if seg_content == None:
|
||||||
print("Segment empty: {0}@0x{1:016x} (size:UNKNOWN)".format(SegName(seg_ea), seg_ea))
|
print(
|
||||||
|
"Segment empty: {0}@0x{1:016x} (size:UNKNOWN)".format(
|
||||||
|
SegName(seg_ea), seg_ea
|
||||||
|
)
|
||||||
|
)
|
||||||
seg_info["content_file"] = ""
|
seg_info["content_file"] = ""
|
||||||
else:
|
else:
|
||||||
print("Dumping segment {0}@0x{1:016x} (size:{2})".format(SegName(seg_ea), seg_ea, len(seg_content)))
|
print(
|
||||||
|
"Dumping segment {0}@0x{1:016x} (size:{2})".format(
|
||||||
|
SegName(seg_ea), seg_ea, len(seg_content)
|
||||||
|
)
|
||||||
|
)
|
||||||
compressed_seg_content = zlib.compress(seg_content)
|
compressed_seg_content = zlib.compress(seg_content)
|
||||||
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
|
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
|
||||||
seg_info["content_file"] = md5_sum
|
seg_info["content_file"] = md5_sum
|
||||||
|
|
||||||
# Write the compressed contents to disk
|
# Write the compressed contents to disk
|
||||||
out_file = open(os.path.join(output_dir, md5_sum), 'wb')
|
out_file = open(os.path.join(output_dir, md5_sum), "wb")
|
||||||
out_file.write(compressed_seg_content)
|
out_file.write(compressed_seg_content)
|
||||||
out_file.close()
|
out_file.close()
|
||||||
except:
|
except:
|
||||||
@ -145,12 +237,13 @@ def dump_process_memory(output_dir):
|
|||||||
else:
|
else:
|
||||||
print("Skipping segment {0}@0x{1:016x}".format(SegName(seg_ea), seg_ea))
|
print("Skipping segment {0}@0x{1:016x}".format(SegName(seg_ea), seg_ea))
|
||||||
seg_info["content_file"] = ""
|
seg_info["content_file"] = ""
|
||||||
|
|
||||||
# Add the segment to the list
|
# Add the segment to the list
|
||||||
segment_list.append(seg_info)
|
segment_list.append(seg_info)
|
||||||
|
|
||||||
return segment_list
|
return segment_list
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
TODO: FINISH IMPORT DUMPING
|
TODO: FINISH IMPORT DUMPING
|
||||||
def import_callback(ea, name, ord):
|
def import_callback(ea, name, ord):
|
||||||
@ -169,41 +262,47 @@ def dump_imports():
|
|||||||
|
|
||||||
return import_dict
|
return import_dict
|
||||||
"""
|
"""
|
||||||
|
|
||||||
#----------
|
# ----------
|
||||||
#---- Main
|
# ---- Main
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print("----- Unicorn Context Dumper -----")
|
print("----- Unicorn Context Dumper -----")
|
||||||
print("You must be actively debugging before running this!")
|
print("You must be actively debugging before running this!")
|
||||||
print("If it fails, double check that you are actively debugging before running.")
|
print(
|
||||||
|
"If it fails, double check that you are actively debugging before running."
|
||||||
|
)
|
||||||
|
|
||||||
# Create the output directory
|
# Create the output directory
|
||||||
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
|
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime(
|
||||||
|
"%Y%m%d_%H%M%S"
|
||||||
|
)
|
||||||
output_path = os.path.dirname(os.path.abspath(GetIdbPath()))
|
output_path = os.path.dirname(os.path.abspath(GetIdbPath()))
|
||||||
output_path = os.path.join(output_path, "UnicornContext_" + timestamp)
|
output_path = os.path.join(output_path, "UnicornContext_" + timestamp)
|
||||||
if not os.path.exists(output_path):
|
if not os.path.exists(output_path):
|
||||||
os.makedirs(output_path)
|
os.makedirs(output_path)
|
||||||
print("Process context will be output to {}".format(output_path))
|
print("Process context will be output to {}".format(output_path))
|
||||||
|
|
||||||
# Get the context
|
# Get the context
|
||||||
context = {
|
context = {
|
||||||
"arch": dump_arch_info(),
|
"arch": dump_arch_info(),
|
||||||
"regs": dump_regs(),
|
"regs": dump_regs(),
|
||||||
"segments": dump_process_memory(output_path),
|
"segments": dump_process_memory(output_path),
|
||||||
#"imports": dump_imports(),
|
# "imports": dump_imports(),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Write the index file
|
# Write the index file
|
||||||
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), 'w')
|
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), "w")
|
||||||
index_file.write(json.dumps(context, indent=4))
|
index_file.write(json.dumps(context, indent=4))
|
||||||
index_file.close()
|
index_file.close()
|
||||||
print("Done.")
|
print("Done.")
|
||||||
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print("!!! ERROR:\n\t{}".format(str(e)))
|
print("!!! ERROR:\n\t{}".format(str(e)))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -50,10 +50,11 @@ UNICORN_PAGE_SIZE = 0x1000
|
|||||||
|
|
||||||
# Alignment functions to align all memory segments to Unicorn page boundaries (4KB pages only)
|
# Alignment functions to align all memory segments to Unicorn page boundaries (4KB pages only)
|
||||||
ALIGN_PAGE_DOWN = lambda x: x & ~(UNICORN_PAGE_SIZE - 1)
|
ALIGN_PAGE_DOWN = lambda x: x & ~(UNICORN_PAGE_SIZE - 1)
|
||||||
ALIGN_PAGE_UP = lambda x: (x + UNICORN_PAGE_SIZE - 1) & ~(UNICORN_PAGE_SIZE-1)
|
ALIGN_PAGE_UP = lambda x: (x + UNICORN_PAGE_SIZE - 1) & ~(UNICORN_PAGE_SIZE - 1)
|
||||||
|
|
||||||
|
# ----------------------
|
||||||
|
# ---- Helper Functions
|
||||||
|
|
||||||
#----------------------
|
|
||||||
#---- Helper Functions
|
|
||||||
|
|
||||||
def overlap_alignments(segments, memory):
|
def overlap_alignments(segments, memory):
|
||||||
final_list = []
|
final_list = []
|
||||||
@ -61,33 +62,40 @@ def overlap_alignments(segments, memory):
|
|||||||
curr_end_addr = 0
|
curr_end_addr = 0
|
||||||
curr_node = None
|
curr_node = None
|
||||||
current_segment = None
|
current_segment = None
|
||||||
sorted_segments = sorted(segments, key=lambda k: (k['start'], k['end']))
|
sorted_segments = sorted(segments, key=lambda k: (k["start"], k["end"]))
|
||||||
if curr_seg_idx < len(sorted_segments):
|
if curr_seg_idx < len(sorted_segments):
|
||||||
current_segment = sorted_segments[curr_seg_idx]
|
current_segment = sorted_segments[curr_seg_idx]
|
||||||
for mem in sorted(memory, key=lambda k: (k['start'], -k['end'])):
|
for mem in sorted(memory, key=lambda k: (k["start"], -k["end"])):
|
||||||
if curr_node is None:
|
if curr_node is None:
|
||||||
if current_segment is not None and current_segment['start'] == mem['start']:
|
if current_segment is not None and current_segment["start"] == mem["start"]:
|
||||||
curr_node = deepcopy(current_segment)
|
curr_node = deepcopy(current_segment)
|
||||||
curr_node['permissions'] = mem['permissions']
|
curr_node["permissions"] = mem["permissions"]
|
||||||
else:
|
else:
|
||||||
curr_node = deepcopy(mem)
|
curr_node = deepcopy(mem)
|
||||||
|
|
||||||
curr_end_addr = curr_node['end']
|
curr_end_addr = curr_node["end"]
|
||||||
|
|
||||||
while curr_end_addr <= mem['end']:
|
while curr_end_addr <= mem["end"]:
|
||||||
if curr_node['end'] == mem['end']:
|
if curr_node["end"] == mem["end"]:
|
||||||
if current_segment is not None and current_segment['start'] > curr_node['start'] and current_segment['start'] < curr_node['end']:
|
if (
|
||||||
curr_node['end'] = current_segment['start']
|
current_segment is not None
|
||||||
if(curr_node['end'] > curr_node['start']):
|
and current_segment["start"] > curr_node["start"]
|
||||||
|
and current_segment["start"] < curr_node["end"]
|
||||||
|
):
|
||||||
|
curr_node["end"] = current_segment["start"]
|
||||||
|
if curr_node["end"] > curr_node["start"]:
|
||||||
final_list.append(curr_node)
|
final_list.append(curr_node)
|
||||||
curr_node = deepcopy(current_segment)
|
curr_node = deepcopy(current_segment)
|
||||||
curr_node['permissions'] = mem['permissions']
|
curr_node["permissions"] = mem["permissions"]
|
||||||
curr_end_addr = curr_node['end']
|
curr_end_addr = curr_node["end"]
|
||||||
else:
|
else:
|
||||||
if(curr_node['end'] > curr_node['start']):
|
if curr_node["end"] > curr_node["start"]:
|
||||||
final_list.append(curr_node)
|
final_list.append(curr_node)
|
||||||
# if curr_node is a segment
|
# if curr_node is a segment
|
||||||
if current_segment is not None and current_segment['end'] == mem['end']:
|
if (
|
||||||
|
current_segment is not None
|
||||||
|
and current_segment["end"] == mem["end"]
|
||||||
|
):
|
||||||
curr_seg_idx += 1
|
curr_seg_idx += 1
|
||||||
if curr_seg_idx < len(sorted_segments):
|
if curr_seg_idx < len(sorted_segments):
|
||||||
current_segment = sorted_segments[curr_seg_idx]
|
current_segment = sorted_segments[curr_seg_idx]
|
||||||
@ -98,50 +106,56 @@ def overlap_alignments(segments, memory):
|
|||||||
break
|
break
|
||||||
# could only be a segment
|
# could only be a segment
|
||||||
else:
|
else:
|
||||||
if curr_node['end'] < mem['end']:
|
if curr_node["end"] < mem["end"]:
|
||||||
# check for remaining segments and valid segments
|
# check for remaining segments and valid segments
|
||||||
if(curr_node['end'] > curr_node['start']):
|
if curr_node["end"] > curr_node["start"]:
|
||||||
final_list.append(curr_node)
|
final_list.append(curr_node)
|
||||||
|
|
||||||
curr_seg_idx += 1
|
curr_seg_idx += 1
|
||||||
if curr_seg_idx < len(sorted_segments):
|
if curr_seg_idx < len(sorted_segments):
|
||||||
current_segment = sorted_segments[curr_seg_idx]
|
current_segment = sorted_segments[curr_seg_idx]
|
||||||
else:
|
else:
|
||||||
current_segment = None
|
current_segment = None
|
||||||
|
|
||||||
if current_segment is not None and current_segment['start'] <= curr_end_addr and current_segment['start'] < mem['end']:
|
if (
|
||||||
|
current_segment is not None
|
||||||
|
and current_segment["start"] <= curr_end_addr
|
||||||
|
and current_segment["start"] < mem["end"]
|
||||||
|
):
|
||||||
curr_node = deepcopy(current_segment)
|
curr_node = deepcopy(current_segment)
|
||||||
curr_node['permissions'] = mem['permissions']
|
curr_node["permissions"] = mem["permissions"]
|
||||||
else:
|
else:
|
||||||
# no more segments
|
# no more segments
|
||||||
curr_node = deepcopy(mem)
|
curr_node = deepcopy(mem)
|
||||||
|
|
||||||
curr_node['start'] = curr_end_addr
|
|
||||||
curr_end_addr = curr_node['end']
|
|
||||||
|
|
||||||
return final_list
|
curr_node["start"] = curr_end_addr
|
||||||
|
curr_end_addr = curr_node["end"]
|
||||||
|
|
||||||
|
return final_list
|
||||||
|
|
||||||
|
|
||||||
# https://github.com/llvm-mirror/llvm/blob/master/include/llvm/ADT/Triple.h
|
# https://github.com/llvm-mirror/llvm/blob/master/include/llvm/ADT/Triple.h
|
||||||
def get_arch():
|
def get_arch():
|
||||||
arch, arch_vendor, arch_os = lldb.target.GetTriple().split('-')
|
arch, arch_vendor, arch_os = lldb.target.GetTriple().split("-")
|
||||||
if arch == 'x86_64':
|
if arch == "x86_64":
|
||||||
return "x64"
|
return "x64"
|
||||||
elif arch == 'x86' or arch == 'i386':
|
elif arch == "x86" or arch == "i386":
|
||||||
return "x86"
|
return "x86"
|
||||||
elif arch == 'aarch64' or arch == 'arm64':
|
elif arch == "aarch64" or arch == "arm64":
|
||||||
return "arm64le"
|
return "arm64le"
|
||||||
elif arch == 'aarch64_be':
|
elif arch == "aarch64_be":
|
||||||
return "arm64be"
|
return "arm64be"
|
||||||
elif arch == 'armeb':
|
elif arch == "armeb":
|
||||||
return "armbe"
|
return "armbe"
|
||||||
elif arch == 'arm':
|
elif arch == "arm":
|
||||||
return "armle"
|
return "armle"
|
||||||
else:
|
else:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
#-----------------------
|
# -----------------------
|
||||||
#---- Dumping functions
|
# ---- Dumping functions
|
||||||
|
|
||||||
|
|
||||||
def dump_arch_info():
|
def dump_arch_info():
|
||||||
arch_info = {}
|
arch_info = {}
|
||||||
@ -152,56 +166,64 @@ def dump_arch_info():
|
|||||||
def dump_regs():
|
def dump_regs():
|
||||||
reg_state = {}
|
reg_state = {}
|
||||||
for reg_list in lldb.frame.GetRegisters():
|
for reg_list in lldb.frame.GetRegisters():
|
||||||
if 'general purpose registers' in reg_list.GetName().lower():
|
if "general purpose registers" in reg_list.GetName().lower():
|
||||||
for reg in reg_list:
|
for reg in reg_list:
|
||||||
reg_state[reg.GetName()] = int(reg.GetValue(), 16)
|
reg_state[reg.GetName()] = int(reg.GetValue(), 16)
|
||||||
return reg_state
|
return reg_state
|
||||||
|
|
||||||
|
|
||||||
def get_section_info(sec):
|
def get_section_info(sec):
|
||||||
name = sec.name if sec.name is not None else ''
|
name = sec.name if sec.name is not None else ""
|
||||||
if sec.GetParent().name is not None:
|
if sec.GetParent().name is not None:
|
||||||
name = sec.GetParent().name + '.' + sec.name
|
name = sec.GetParent().name + "." + sec.name
|
||||||
|
|
||||||
module_name = sec.addr.module.file.GetFilename()
|
module_name = sec.addr.module.file.GetFilename()
|
||||||
module_name = module_name if module_name is not None else ''
|
module_name = module_name if module_name is not None else ""
|
||||||
long_name = module_name + '.' + name
|
long_name = module_name + "." + name
|
||||||
|
|
||||||
return sec.addr.load_addr, (sec.addr.load_addr + sec.size), sec.size, long_name
|
return sec.addr.load_addr, (sec.addr.load_addr + sec.size), sec.size, long_name
|
||||||
|
|
||||||
|
|
||||||
def dump_process_memory(output_dir):
|
def dump_process_memory(output_dir):
|
||||||
# Segment information dictionary
|
# Segment information dictionary
|
||||||
raw_segment_list = []
|
raw_segment_list = []
|
||||||
raw_memory_list = []
|
raw_memory_list = []
|
||||||
|
|
||||||
# 1st pass:
|
# 1st pass:
|
||||||
# Loop over the segments, fill in the segment info dictionary
|
# Loop over the segments, fill in the segment info dictionary
|
||||||
for module in lldb.target.module_iter():
|
for module in lldb.target.module_iter():
|
||||||
for seg_ea in module.section_iter():
|
for seg_ea in module.section_iter():
|
||||||
seg_info = {'module': module.file.GetFilename() }
|
seg_info = {"module": module.file.GetFilename()}
|
||||||
seg_info['start'], seg_info['end'], seg_size, seg_info['name'] = get_section_info(seg_ea)
|
(
|
||||||
|
seg_info["start"],
|
||||||
|
seg_info["end"],
|
||||||
|
seg_size,
|
||||||
|
seg_info["name"],
|
||||||
|
) = get_section_info(seg_ea)
|
||||||
# TODO: Ugly hack for -1 LONG address on 32-bit
|
# TODO: Ugly hack for -1 LONG address on 32-bit
|
||||||
if seg_info['start'] >= sys.maxint or seg_size <= 0:
|
if seg_info["start"] >= sys.maxint or seg_size <= 0:
|
||||||
print "Throwing away page: {}".format(seg_info['name'])
|
print "Throwing away page: {}".format(seg_info["name"])
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Page-align segment
|
# Page-align segment
|
||||||
seg_info['start'] = ALIGN_PAGE_DOWN(seg_info['start'])
|
seg_info["start"] = ALIGN_PAGE_DOWN(seg_info["start"])
|
||||||
seg_info['end'] = ALIGN_PAGE_UP(seg_info['end'])
|
seg_info["end"] = ALIGN_PAGE_UP(seg_info["end"])
|
||||||
print("Appending: {}".format(seg_info['name']))
|
print ("Appending: {}".format(seg_info["name"]))
|
||||||
raw_segment_list.append(seg_info)
|
raw_segment_list.append(seg_info)
|
||||||
|
|
||||||
# Add the stack memory region (just hardcode 0x1000 around the current SP)
|
# Add the stack memory region (just hardcode 0x1000 around the current SP)
|
||||||
sp = lldb.frame.GetSP()
|
sp = lldb.frame.GetSP()
|
||||||
start_sp = ALIGN_PAGE_DOWN(sp)
|
start_sp = ALIGN_PAGE_DOWN(sp)
|
||||||
raw_segment_list.append({'start': start_sp, 'end': start_sp + 0x1000, 'name': 'STACK'})
|
raw_segment_list.append(
|
||||||
|
{"start": start_sp, "end": start_sp + 0x1000, "name": "STACK"}
|
||||||
|
)
|
||||||
|
|
||||||
# Write the original memory to file for debugging
|
# Write the original memory to file for debugging
|
||||||
index_file = open(os.path.join(output_dir, DEBUG_MEM_FILE_NAME), 'w')
|
index_file = open(os.path.join(output_dir, DEBUG_MEM_FILE_NAME), "w")
|
||||||
index_file.write(json.dumps(raw_segment_list, indent=4))
|
index_file.write(json.dumps(raw_segment_list, indent=4))
|
||||||
index_file.close()
|
index_file.close()
|
||||||
|
|
||||||
# Loop over raw memory regions
|
# Loop over raw memory regions
|
||||||
mem_info = lldb.SBMemoryRegionInfo()
|
mem_info = lldb.SBMemoryRegionInfo()
|
||||||
start_addr = -1
|
start_addr = -1
|
||||||
next_region_addr = 0
|
next_region_addr = 0
|
||||||
@ -218,15 +240,20 @@ def dump_process_memory(output_dir):
|
|||||||
end_addr = mem_info.GetRegionEnd()
|
end_addr = mem_info.GetRegionEnd()
|
||||||
|
|
||||||
# Unknown region name
|
# Unknown region name
|
||||||
region_name = 'UNKNOWN'
|
region_name = "UNKNOWN"
|
||||||
|
|
||||||
# Ignore regions that aren't even mapped
|
# Ignore regions that aren't even mapped
|
||||||
if mem_info.IsMapped() and mem_info.IsReadable():
|
if mem_info.IsMapped() and mem_info.IsReadable():
|
||||||
mem_info_obj = {'start': start_addr, 'end': end_addr, 'name': region_name, 'permissions': {
|
mem_info_obj = {
|
||||||
"r": mem_info.IsReadable(),
|
"start": start_addr,
|
||||||
"w": mem_info.IsWritable(),
|
"end": end_addr,
|
||||||
"x": mem_info.IsExecutable()
|
"name": region_name,
|
||||||
}}
|
"permissions": {
|
||||||
|
"r": mem_info.IsReadable(),
|
||||||
|
"w": mem_info.IsWritable(),
|
||||||
|
"x": mem_info.IsExecutable(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
raw_memory_list.append(mem_info_obj)
|
raw_memory_list.append(mem_info_obj)
|
||||||
|
|
||||||
@ -234,65 +261,89 @@ def dump_process_memory(output_dir):
|
|||||||
|
|
||||||
for seg_info in final_segment_list:
|
for seg_info in final_segment_list:
|
||||||
try:
|
try:
|
||||||
seg_info['content_file'] = ''
|
seg_info["content_file"] = ""
|
||||||
start_addr = seg_info['start']
|
start_addr = seg_info["start"]
|
||||||
end_addr = seg_info['end']
|
end_addr = seg_info["end"]
|
||||||
region_name = seg_info['name']
|
region_name = seg_info["name"]
|
||||||
# Compress and dump the content to a file
|
# Compress and dump the content to a file
|
||||||
err = lldb.SBError()
|
err = lldb.SBError()
|
||||||
seg_content = lldb.process.ReadMemory(start_addr, end_addr - start_addr, err)
|
seg_content = lldb.process.ReadMemory(
|
||||||
if(seg_content == None):
|
start_addr, end_addr - start_addr, err
|
||||||
print("Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(start_addr, region_name))
|
)
|
||||||
seg_info['content_file'] = ''
|
if seg_content == None:
|
||||||
|
print (
|
||||||
|
"Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(
|
||||||
|
start_addr, region_name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
seg_info["content_file"] = ""
|
||||||
else:
|
else:
|
||||||
print("Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(start_addr, len(seg_content), region_name, repr(seg_info['permissions'])))
|
print (
|
||||||
|
"Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(
|
||||||
|
start_addr,
|
||||||
|
len(seg_content),
|
||||||
|
region_name,
|
||||||
|
repr(seg_info["permissions"]),
|
||||||
|
)
|
||||||
|
)
|
||||||
compressed_seg_content = zlib.compress(seg_content)
|
compressed_seg_content = zlib.compress(seg_content)
|
||||||
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
|
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
|
||||||
seg_info['content_file'] = md5_sum
|
seg_info["content_file"] = md5_sum
|
||||||
|
|
||||||
# Write the compressed contents to disk
|
# Write the compressed contents to disk
|
||||||
out_file = open(os.path.join(output_dir, md5_sum), 'wb')
|
out_file = open(os.path.join(output_dir, md5_sum), "wb")
|
||||||
out_file.write(compressed_seg_content)
|
out_file.write(compressed_seg_content)
|
||||||
out_file.close()
|
out_file.close()
|
||||||
|
|
||||||
except:
|
except:
|
||||||
print("Exception reading segment ({}): {}".format(region_name, sys.exc_info()[0]))
|
print (
|
||||||
|
"Exception reading segment ({}): {}".format(
|
||||||
|
region_name, sys.exc_info()[0]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
return final_segment_list
|
return final_segment_list
|
||||||
|
|
||||||
#----------
|
|
||||||
#---- Main
|
# ----------
|
||||||
|
# ---- Main
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print("----- Unicorn Context Dumper -----")
|
print ("----- Unicorn Context Dumper -----")
|
||||||
print("You must be actively debugging before running this!")
|
print ("You must be actively debugging before running this!")
|
||||||
print("If it fails, double check that you are actively debugging before running.")
|
print (
|
||||||
|
"If it fails, double check that you are actively debugging before running."
|
||||||
|
)
|
||||||
|
|
||||||
# Create the output directory
|
# Create the output directory
|
||||||
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
|
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime(
|
||||||
|
"%Y%m%d_%H%M%S"
|
||||||
|
)
|
||||||
output_path = "UnicornContext_" + timestamp
|
output_path = "UnicornContext_" + timestamp
|
||||||
if not os.path.exists(output_path):
|
if not os.path.exists(output_path):
|
||||||
os.makedirs(output_path)
|
os.makedirs(output_path)
|
||||||
print("Process context will be output to {}".format(output_path))
|
print ("Process context will be output to {}".format(output_path))
|
||||||
|
|
||||||
# Get the context
|
# Get the context
|
||||||
context = {
|
context = {
|
||||||
"arch": dump_arch_info(),
|
"arch": dump_arch_info(),
|
||||||
"regs": dump_regs(),
|
"regs": dump_regs(),
|
||||||
"segments": dump_process_memory(output_path),
|
"segments": dump_process_memory(output_path),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Write the index file
|
# Write the index file
|
||||||
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), 'w')
|
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), "w")
|
||||||
index_file.write(json.dumps(context, indent=4))
|
index_file.write(json.dumps(context, indent=4))
|
||||||
index_file.close()
|
index_file.close()
|
||||||
print("Done.")
|
print ("Done.")
|
||||||
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
print("!!! ERROR:\n\t{}".format(repr(e)))
|
print ("!!! ERROR:\n\t{}".format(repr(e)))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
elif lldb.debugger:
|
elif lldb.debugger:
|
||||||
|
@ -59,45 +59,47 @@ MAX_SEG_SIZE = 128 * 1024 * 1024
|
|||||||
# Name of the index file
|
# Name of the index file
|
||||||
INDEX_FILE_NAME = "_index.json"
|
INDEX_FILE_NAME = "_index.json"
|
||||||
|
|
||||||
#----------------------
|
# ----------------------
|
||||||
#---- Helper Functions
|
# ---- Helper Functions
|
||||||
|
|
||||||
|
|
||||||
def map_arch():
|
def map_arch():
|
||||||
arch = pwndbg.arch.current # from PWNDBG
|
arch = pwndbg.arch.current # from PWNDBG
|
||||||
if 'x86_64' in arch or 'x86-64' in arch:
|
if "x86_64" in arch or "x86-64" in arch:
|
||||||
return "x64"
|
return "x64"
|
||||||
elif 'x86' in arch or 'i386' in arch:
|
elif "x86" in arch or "i386" in arch:
|
||||||
return "x86"
|
return "x86"
|
||||||
elif 'aarch64' in arch or 'arm64' in arch:
|
elif "aarch64" in arch or "arm64" in arch:
|
||||||
return "arm64le"
|
return "arm64le"
|
||||||
elif 'aarch64_be' in arch:
|
elif "aarch64_be" in arch:
|
||||||
return "arm64be"
|
return "arm64be"
|
||||||
elif 'arm' in arch:
|
elif "arm" in arch:
|
||||||
cpsr = pwndbg.regs['cpsr']
|
cpsr = pwndbg.regs["cpsr"]
|
||||||
# check endianess
|
# check endianess
|
||||||
if pwndbg.arch.endian == 'big':
|
if pwndbg.arch.endian == "big":
|
||||||
# check for THUMB mode
|
# check for THUMB mode
|
||||||
if (cpsr & (1 << 5)):
|
if cpsr & (1 << 5):
|
||||||
return "armbethumb"
|
return "armbethumb"
|
||||||
else:
|
else:
|
||||||
return "armbe"
|
return "armbe"
|
||||||
else:
|
else:
|
||||||
# check for THUMB mode
|
# check for THUMB mode
|
||||||
if (cpsr & (1 << 5)):
|
if cpsr & (1 << 5):
|
||||||
return "armlethumb"
|
return "armlethumb"
|
||||||
else:
|
else:
|
||||||
return "armle"
|
return "armle"
|
||||||
elif 'mips' in arch:
|
elif "mips" in arch:
|
||||||
if pwndbg.arch.endian == 'little':
|
if pwndbg.arch.endian == "little":
|
||||||
return 'mipsel'
|
return "mipsel"
|
||||||
else:
|
else:
|
||||||
return 'mips'
|
return "mips"
|
||||||
else:
|
else:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
#-----------------------
|
# -----------------------
|
||||||
#---- Dumping functions
|
# ---- Dumping functions
|
||||||
|
|
||||||
|
|
||||||
def dump_arch_info():
|
def dump_arch_info():
|
||||||
arch_info = {}
|
arch_info = {}
|
||||||
@ -110,26 +112,26 @@ def dump_regs():
|
|||||||
for reg in pwndbg.regs.all:
|
for reg in pwndbg.regs.all:
|
||||||
reg_val = pwndbg.regs[reg]
|
reg_val = pwndbg.regs[reg]
|
||||||
# current dumper script looks for register values to be hex strings
|
# current dumper script looks for register values to be hex strings
|
||||||
# reg_str = "0x{:08x}".format(reg_val)
|
# reg_str = "0x{:08x}".format(reg_val)
|
||||||
# if "64" in get_arch():
|
# if "64" in get_arch():
|
||||||
# reg_str = "0x{:016x}".format(reg_val)
|
# reg_str = "0x{:016x}".format(reg_val)
|
||||||
# reg_state[reg.strip().strip('$')] = reg_str
|
# reg_state[reg.strip().strip('$')] = reg_str
|
||||||
reg_state[reg.strip().strip('$')] = reg_val
|
reg_state[reg.strip().strip("$")] = reg_val
|
||||||
return reg_state
|
return reg_state
|
||||||
|
|
||||||
|
|
||||||
def dump_process_memory(output_dir):
|
def dump_process_memory(output_dir):
|
||||||
# Segment information dictionary
|
# Segment information dictionary
|
||||||
final_segment_list = []
|
final_segment_list = []
|
||||||
|
|
||||||
# PWNDBG:
|
# PWNDBG:
|
||||||
vmmap = pwndbg.vmmap.get()
|
vmmap = pwndbg.vmmap.get()
|
||||||
|
|
||||||
# Pointer to end of last dumped memory segment
|
# Pointer to end of last dumped memory segment
|
||||||
segment_last_addr = 0x0;
|
segment_last_addr = 0x0
|
||||||
|
|
||||||
start = None
|
start = None
|
||||||
end = None
|
end = None
|
||||||
|
|
||||||
if not vmmap:
|
if not vmmap:
|
||||||
print("No address mapping information found")
|
print("No address mapping information found")
|
||||||
@ -141,86 +143,107 @@ def dump_process_memory(output_dir):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
start = entry.start
|
start = entry.start
|
||||||
end = entry.end
|
end = entry.end
|
||||||
|
|
||||||
if (segment_last_addr > entry.start): # indicates overlap
|
if segment_last_addr > entry.start: # indicates overlap
|
||||||
if (segment_last_addr > entry.end): # indicates complete overlap, so we skip the segment entirely
|
if (
|
||||||
|
segment_last_addr > entry.end
|
||||||
|
): # indicates complete overlap, so we skip the segment entirely
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
start = segment_last_addr
|
start = segment_last_addr
|
||||||
|
|
||||||
|
seg_info = {
|
||||||
seg_info = {'start': start, 'end': end, 'name': entry.objfile, 'permissions': {
|
"start": start,
|
||||||
"r": entry.read,
|
"end": end,
|
||||||
"w": entry.write,
|
"name": entry.objfile,
|
||||||
"x": entry.execute
|
"permissions": {"r": entry.read, "w": entry.write, "x": entry.execute},
|
||||||
}, 'content_file': ''}
|
"content_file": "",
|
||||||
|
}
|
||||||
|
|
||||||
# "(deleted)" may or may not be valid, but don't push it.
|
# "(deleted)" may or may not be valid, but don't push it.
|
||||||
if entry.read and not '(deleted)' in entry.objfile:
|
if entry.read and not "(deleted)" in entry.objfile:
|
||||||
try:
|
try:
|
||||||
# Compress and dump the content to a file
|
# Compress and dump the content to a file
|
||||||
seg_content = pwndbg.memory.read(start, end - start)
|
seg_content = pwndbg.memory.read(start, end - start)
|
||||||
if(seg_content == None):
|
if seg_content == None:
|
||||||
print("Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(entry.start, entry.objfile))
|
print(
|
||||||
|
"Segment empty: @0x{0:016x} (size:UNKNOWN) {1}".format(
|
||||||
|
entry.start, entry.objfile
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print("Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(entry.start, len(seg_content), entry.objfile, repr(seg_info['permissions'])))
|
print(
|
||||||
|
"Dumping segment @0x{0:016x} (size:0x{1:x}): {2} [{3}]".format(
|
||||||
|
entry.start,
|
||||||
|
len(seg_content),
|
||||||
|
entry.objfile,
|
||||||
|
repr(seg_info["permissions"]),
|
||||||
|
)
|
||||||
|
)
|
||||||
compressed_seg_content = zlib.compress(str(seg_content))
|
compressed_seg_content = zlib.compress(str(seg_content))
|
||||||
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
|
md5_sum = hashlib.md5(compressed_seg_content).hexdigest() + ".bin"
|
||||||
seg_info["content_file"] = md5_sum
|
seg_info["content_file"] = md5_sum
|
||||||
|
|
||||||
# Write the compressed contents to disk
|
# Write the compressed contents to disk
|
||||||
out_file = open(os.path.join(output_dir, md5_sum), 'wb')
|
out_file = open(os.path.join(output_dir, md5_sum), "wb")
|
||||||
out_file.write(compressed_seg_content)
|
out_file.write(compressed_seg_content)
|
||||||
out_file.close()
|
out_file.close()
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
print("Exception reading segment ({}): {}".format(entry.objfile, sys.exc_info()[0]))
|
print(
|
||||||
|
"Exception reading segment ({}): {}".format(
|
||||||
|
entry.objfile, sys.exc_info()[0]
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print("Skipping segment {0}@0x{1:016x}".format(entry.objfile, entry.start))
|
print("Skipping segment {0}@0x{1:016x}".format(entry.objfile, entry.start))
|
||||||
|
|
||||||
segment_last_addr = end
|
segment_last_addr = end
|
||||||
|
|
||||||
# Add the segment to the list
|
# Add the segment to the list
|
||||||
final_segment_list.append(seg_info)
|
final_segment_list.append(seg_info)
|
||||||
|
|
||||||
|
|
||||||
return final_segment_list
|
return final_segment_list
|
||||||
|
|
||||||
#----------
|
|
||||||
#---- Main
|
# ----------
|
||||||
|
# ---- Main
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
print("----- Unicorn Context Dumper -----")
|
print("----- Unicorn Context Dumper -----")
|
||||||
print("You must be actively debugging before running this!")
|
print("You must be actively debugging before running this!")
|
||||||
print("If it fails, double check that you are actively debugging before running.")
|
print("If it fails, double check that you are actively debugging before running.")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
||||||
# Create the output directory
|
# Create the output directory
|
||||||
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
|
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime(
|
||||||
|
"%Y%m%d_%H%M%S"
|
||||||
|
)
|
||||||
output_path = "UnicornContext_" + timestamp
|
output_path = "UnicornContext_" + timestamp
|
||||||
if not os.path.exists(output_path):
|
if not os.path.exists(output_path):
|
||||||
os.makedirs(output_path)
|
os.makedirs(output_path)
|
||||||
print("Process context will be output to {}".format(output_path))
|
print("Process context will be output to {}".format(output_path))
|
||||||
|
|
||||||
# Get the context
|
# Get the context
|
||||||
context = {
|
context = {
|
||||||
"arch": dump_arch_info(),
|
"arch": dump_arch_info(),
|
||||||
"regs": dump_regs(),
|
"regs": dump_regs(),
|
||||||
"segments": dump_process_memory(output_path),
|
"segments": dump_process_memory(output_path),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Write the index file
|
# Write the index file
|
||||||
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), 'w')
|
index_file = open(os.path.join(output_path, INDEX_FILE_NAME), "w")
|
||||||
index_file.write(json.dumps(context, indent=4))
|
index_file.write(json.dumps(context, indent=4))
|
||||||
index_file.close()
|
index_file.close()
|
||||||
print("Done.")
|
print("Done.")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("!!! ERROR:\n\t{}".format(repr(e)))
|
print("!!! ERROR:\n\t{}".format(repr(e)))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__" and pwndbg_loaded:
|
if __name__ == "__main__" and pwndbg_loaded:
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
@ -17,6 +17,6 @@ You shouldn't need to compile simple_target.c since a X86_64 binary version is
|
|||||||
pre-built and shipped in this sample folder. This file documents how the binary
|
pre-built and shipped in this sample folder. This file documents how the binary
|
||||||
was built in case you want to rebuild it or recompile it for any reason.
|
was built in case you want to rebuild it or recompile it for any reason.
|
||||||
|
|
||||||
The pre-built binary (simple_target_x86_64.bin) was built using -g -O0 in gcc.
|
The pre-built binary (persistent_target_x86_64) was built using -g -O0 in gcc.
|
||||||
|
|
||||||
We then load the binary and execute the main function directly.
|
We then load the binary and execute the main function directly.
|
||||||
|
@ -22,48 +22,81 @@ from unicornafl import *
|
|||||||
from unicornafl.x86_const import *
|
from unicornafl.x86_const import *
|
||||||
|
|
||||||
# Path to the file containing the binary to emulate
|
# Path to the file containing the binary to emulate
|
||||||
BINARY_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'compcov_target.bin')
|
BINARY_FILE = os.path.join(
|
||||||
|
os.path.dirname(os.path.abspath(__file__)), "compcov_target.bin"
|
||||||
|
)
|
||||||
|
|
||||||
# Memory map for the code to be tested
|
# Memory map for the code to be tested
|
||||||
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
|
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
|
||||||
CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb)
|
CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb)
|
||||||
STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen)
|
STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen)
|
||||||
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
|
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
|
||||||
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
|
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
|
||||||
DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data
|
DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# If Capstone is installed then we'll dump disassembly, otherwise just dump the binary.
|
# If Capstone is installed then we'll dump disassembly, otherwise just dump the binary.
|
||||||
from capstone import *
|
from capstone import *
|
||||||
|
|
||||||
cs = Cs(CS_ARCH_X86, CS_MODE_64)
|
cs = Cs(CS_ARCH_X86, CS_MODE_64)
|
||||||
|
|
||||||
def unicorn_debug_instruction(uc, address, size, user_data):
|
def unicorn_debug_instruction(uc, address, size, user_data):
|
||||||
mem = uc.mem_read(address, size)
|
mem = uc.mem_read(address, size)
|
||||||
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(bytes(mem), size):
|
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(
|
||||||
|
bytes(mem), size
|
||||||
|
):
|
||||||
print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr))
|
print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr))
|
||||||
|
|
||||||
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
||||||
def unicorn_debug_instruction(uc, address, size, user_data):
|
def unicorn_debug_instruction(uc, address, size, user_data):
|
||||||
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
||||||
|
|
||||||
|
|
||||||
def unicorn_debug_block(uc, address, size, user_data):
|
def unicorn_debug_block(uc, address, size, user_data):
|
||||||
print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
||||||
|
|
||||||
|
|
||||||
def unicorn_debug_mem_access(uc, access, address, size, value, user_data):
|
def unicorn_debug_mem_access(uc, access, address, size, value, user_data):
|
||||||
if access == UC_MEM_WRITE:
|
if access == UC_MEM_WRITE:
|
||||||
print(" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
|
print(
|
||||||
|
" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
|
||||||
|
address, size, value
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
|
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
|
||||||
|
|
||||||
|
|
||||||
def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data):
|
def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data):
|
||||||
if access == UC_MEM_WRITE_UNMAPPED:
|
if access == UC_MEM_WRITE_UNMAPPED:
|
||||||
print(" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
|
print(
|
||||||
|
" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
|
||||||
|
address, size, value
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size))
|
print(
|
||||||
|
" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="Test harness for compcov_target.bin")
|
parser = argparse.ArgumentParser(description="Test harness for compcov_target.bin")
|
||||||
parser.add_argument('input_file', type=str, help="Path to the file containing the mutated input to load")
|
parser.add_argument(
|
||||||
parser.add_argument('-t', '--trace', default=False, action="store_true", help="Enables debug tracing")
|
"input_file",
|
||||||
|
type=str,
|
||||||
|
help="Path to the file containing the mutated input to load",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-t",
|
||||||
|
"--trace",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Enables debug tracing",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# Instantiate a MIPS32 big endian Unicorn Engine instance
|
# Instantiate a MIPS32 big endian Unicorn Engine instance
|
||||||
@ -73,13 +106,16 @@ def main():
|
|||||||
uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
|
uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
|
||||||
uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction)
|
uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction)
|
||||||
uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access)
|
uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access)
|
||||||
uc.hook_add(UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID, unicorn_debug_mem_invalid_access)
|
uc.hook_add(
|
||||||
|
UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID,
|
||||||
|
unicorn_debug_mem_invalid_access,
|
||||||
|
)
|
||||||
|
|
||||||
#---------------------------------------------------
|
# ---------------------------------------------------
|
||||||
# Load the binary to emulate and map it into memory
|
# Load the binary to emulate and map it into memory
|
||||||
|
|
||||||
print("Loading data input from {}".format(args.input_file))
|
print("Loading data input from {}".format(args.input_file))
|
||||||
binary_file = open(BINARY_FILE, 'rb')
|
binary_file = open(BINARY_FILE, "rb")
|
||||||
binary_code = binary_file.read()
|
binary_code = binary_file.read()
|
||||||
binary_file.close()
|
binary_file.close()
|
||||||
|
|
||||||
@ -93,11 +129,11 @@ def main():
|
|||||||
uc.mem_write(CODE_ADDRESS, binary_code)
|
uc.mem_write(CODE_ADDRESS, binary_code)
|
||||||
|
|
||||||
# Set the program counter to the start of the code
|
# Set the program counter to the start of the code
|
||||||
start_address = CODE_ADDRESS # Address of entry point of main()
|
start_address = CODE_ADDRESS # Address of entry point of main()
|
||||||
end_address = CODE_ADDRESS + 0x55 # Address of last instruction in main()
|
end_address = CODE_ADDRESS + 0x55 # Address of last instruction in main()
|
||||||
uc.reg_write(UC_X86_REG_RIP, start_address)
|
uc.reg_write(UC_X86_REG_RIP, start_address)
|
||||||
|
|
||||||
#-----------------
|
# -----------------
|
||||||
# Setup the stack
|
# Setup the stack
|
||||||
|
|
||||||
uc.mem_map(STACK_ADDRESS, STACK_SIZE)
|
uc.mem_map(STACK_ADDRESS, STACK_SIZE)
|
||||||
@ -106,8 +142,7 @@ def main():
|
|||||||
# Mapping a location to write our buffer to
|
# Mapping a location to write our buffer to
|
||||||
uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX)
|
uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX)
|
||||||
|
|
||||||
|
# -----------------------------------------------
|
||||||
#-----------------------------------------------
|
|
||||||
# Load the mutated input and map it into memory
|
# Load the mutated input and map it into memory
|
||||||
|
|
||||||
def place_input_callback(uc, input, _, data):
|
def place_input_callback(uc, input, _, data):
|
||||||
@ -121,7 +156,7 @@ def main():
|
|||||||
# Write the mutated command into the data buffer
|
# Write the mutated command into the data buffer
|
||||||
uc.mem_write(DATA_ADDRESS, input)
|
uc.mem_write(DATA_ADDRESS, input)
|
||||||
|
|
||||||
#------------------------------------------------------------
|
# ------------------------------------------------------------
|
||||||
# Emulate the code, allowing it to process the mutated input
|
# Emulate the code, allowing it to process the mutated input
|
||||||
|
|
||||||
print("Starting the AFL fuzz")
|
print("Starting the AFL fuzz")
|
||||||
@ -129,8 +164,9 @@ def main():
|
|||||||
input_file=args.input_file,
|
input_file=args.input_file,
|
||||||
place_input_callback=place_input_callback,
|
place_input_callback=place_input_callback,
|
||||||
exits=[end_address],
|
exits=[end_address],
|
||||||
persistent_iters=1
|
persistent_iters=1,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -22,48 +22,81 @@ from unicornafl import *
|
|||||||
from unicornafl.mips_const import *
|
from unicornafl.mips_const import *
|
||||||
|
|
||||||
# Path to the file containing the binary to emulate
|
# Path to the file containing the binary to emulate
|
||||||
BINARY_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'simple_target.bin')
|
BINARY_FILE = os.path.join(
|
||||||
|
os.path.dirname(os.path.abspath(__file__)), "simple_target.bin"
|
||||||
|
)
|
||||||
|
|
||||||
# Memory map for the code to be tested
|
# Memory map for the code to be tested
|
||||||
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
|
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
|
||||||
CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb)
|
CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb)
|
||||||
STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen)
|
STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen)
|
||||||
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
|
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
|
||||||
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
|
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
|
||||||
DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data
|
DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# If Capstone is installed then we'll dump disassembly, otherwise just dump the binary.
|
# If Capstone is installed then we'll dump disassembly, otherwise just dump the binary.
|
||||||
from capstone import *
|
from capstone import *
|
||||||
|
|
||||||
cs = Cs(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN)
|
cs = Cs(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN)
|
||||||
|
|
||||||
def unicorn_debug_instruction(uc, address, size, user_data):
|
def unicorn_debug_instruction(uc, address, size, user_data):
|
||||||
mem = uc.mem_read(address, size)
|
mem = uc.mem_read(address, size)
|
||||||
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(bytes(mem), size):
|
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(
|
||||||
|
bytes(mem), size
|
||||||
|
):
|
||||||
print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr))
|
print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr))
|
||||||
|
|
||||||
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
||||||
def unicorn_debug_instruction(uc, address, size, user_data):
|
def unicorn_debug_instruction(uc, address, size, user_data):
|
||||||
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
||||||
|
|
||||||
|
|
||||||
def unicorn_debug_block(uc, address, size, user_data):
|
def unicorn_debug_block(uc, address, size, user_data):
|
||||||
print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
||||||
|
|
||||||
|
|
||||||
def unicorn_debug_mem_access(uc, access, address, size, value, user_data):
|
def unicorn_debug_mem_access(uc, access, address, size, value, user_data):
|
||||||
if access == UC_MEM_WRITE:
|
if access == UC_MEM_WRITE:
|
||||||
print(" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
|
print(
|
||||||
|
" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
|
||||||
|
address, size, value
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
|
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
|
||||||
|
|
||||||
|
|
||||||
def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data):
|
def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data):
|
||||||
if access == UC_MEM_WRITE_UNMAPPED:
|
if access == UC_MEM_WRITE_UNMAPPED:
|
||||||
print(" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
|
print(
|
||||||
|
" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
|
||||||
|
address, size, value
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size))
|
print(
|
||||||
|
" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="Test harness for simple_target.bin")
|
parser = argparse.ArgumentParser(description="Test harness for simple_target.bin")
|
||||||
parser.add_argument('input_file', type=str, help="Path to the file containing the mutated input to load")
|
parser.add_argument(
|
||||||
parser.add_argument('-t', '--trace', default=False, action="store_true", help="Enables debug tracing")
|
"input_file",
|
||||||
|
type=str,
|
||||||
|
help="Path to the file containing the mutated input to load",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-t",
|
||||||
|
"--trace",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Enables debug tracing",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# Instantiate a MIPS32 big endian Unicorn Engine instance
|
# Instantiate a MIPS32 big endian Unicorn Engine instance
|
||||||
@ -73,13 +106,16 @@ def main():
|
|||||||
uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
|
uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
|
||||||
uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction)
|
uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction)
|
||||||
uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access)
|
uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access)
|
||||||
uc.hook_add(UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID, unicorn_debug_mem_invalid_access)
|
uc.hook_add(
|
||||||
|
UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID,
|
||||||
|
unicorn_debug_mem_invalid_access,
|
||||||
|
)
|
||||||
|
|
||||||
#---------------------------------------------------
|
# ---------------------------------------------------
|
||||||
# Load the binary to emulate and map it into memory
|
# Load the binary to emulate and map it into memory
|
||||||
|
|
||||||
print("Loading data input from {}".format(args.input_file))
|
print("Loading data input from {}".format(args.input_file))
|
||||||
binary_file = open(BINARY_FILE, 'rb')
|
binary_file = open(BINARY_FILE, "rb")
|
||||||
binary_code = binary_file.read()
|
binary_code = binary_file.read()
|
||||||
binary_file.close()
|
binary_file.close()
|
||||||
|
|
||||||
@ -93,11 +129,11 @@ def main():
|
|||||||
uc.mem_write(CODE_ADDRESS, binary_code)
|
uc.mem_write(CODE_ADDRESS, binary_code)
|
||||||
|
|
||||||
# Set the program counter to the start of the code
|
# Set the program counter to the start of the code
|
||||||
start_address = CODE_ADDRESS # Address of entry point of main()
|
start_address = CODE_ADDRESS # Address of entry point of main()
|
||||||
end_address = CODE_ADDRESS + 0xf4 # Address of last instruction in main()
|
end_address = CODE_ADDRESS + 0xF4 # Address of last instruction in main()
|
||||||
uc.reg_write(UC_MIPS_REG_PC, start_address)
|
uc.reg_write(UC_MIPS_REG_PC, start_address)
|
||||||
|
|
||||||
#-----------------
|
# -----------------
|
||||||
# Setup the stack
|
# Setup the stack
|
||||||
|
|
||||||
uc.mem_map(STACK_ADDRESS, STACK_SIZE)
|
uc.mem_map(STACK_ADDRESS, STACK_SIZE)
|
||||||
@ -106,14 +142,14 @@ def main():
|
|||||||
# reserve some space for data
|
# reserve some space for data
|
||||||
uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX)
|
uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX)
|
||||||
|
|
||||||
#-----------------------------------------------------
|
# -----------------------------------------------------
|
||||||
# Set up a callback to place input data (do little work here, it's called for every single iteration)
|
# Set up a callback to place input data (do little work here, it's called for every single iteration)
|
||||||
# We did not pass in any data and don't use persistent mode, so we can ignore these params.
|
# We did not pass in any data and don't use persistent mode, so we can ignore these params.
|
||||||
# Be sure to check out the docstrings for the uc.afl_* functions.
|
# Be sure to check out the docstrings for the uc.afl_* functions.
|
||||||
def place_input_callback(uc, input, persistent_round, data):
|
def place_input_callback(uc, input, persistent_round, data):
|
||||||
# Apply constraints to the mutated input
|
# Apply constraints to the mutated input
|
||||||
if len(input) > DATA_SIZE_MAX:
|
if len(input) > DATA_SIZE_MAX:
|
||||||
#print("Test input is too long (> {} bytes)")
|
# print("Test input is too long (> {} bytes)")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Write the mutated command into the data buffer
|
# Write the mutated command into the data buffer
|
||||||
@ -122,5 +158,6 @@ def main():
|
|||||||
# Start the fuzzer.
|
# Start the fuzzer.
|
||||||
uc.afl_fuzz(args.input_file, place_input_callback, [end_address])
|
uc.afl_fuzz(args.input_file, place_input_callback, [end_address])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -25,50 +25,79 @@ from unicornafl import *
|
|||||||
from unicornafl.mips_const import *
|
from unicornafl.mips_const import *
|
||||||
|
|
||||||
# Path to the file containing the binary to emulate
|
# Path to the file containing the binary to emulate
|
||||||
BINARY_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'simple_target.bin')
|
BINARY_FILE = os.path.join(
|
||||||
|
os.path.dirname(os.path.abspath(__file__)), "simple_target.bin"
|
||||||
|
)
|
||||||
|
|
||||||
# Memory map for the code to be tested
|
# Memory map for the code to be tested
|
||||||
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
|
CODE_ADDRESS = 0x00100000 # Arbitrary address where code to test will be loaded
|
||||||
CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb)
|
CODE_SIZE_MAX = 0x00010000 # Max size for the code (64kb)
|
||||||
STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen)
|
STACK_ADDRESS = 0x00200000 # Address of the stack (arbitrarily chosen)
|
||||||
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
|
STACK_SIZE = 0x00010000 # Size of the stack (arbitrarily chosen)
|
||||||
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
|
DATA_ADDRESS = 0x00300000 # Address where mutated data will be placed
|
||||||
DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data
|
DATA_SIZE_MAX = 0x00010000 # Maximum allowable size of mutated data
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# If Capstone is installed then we'll dump disassembly, otherwise just dump the binary.
|
# If Capstone is installed then we'll dump disassembly, otherwise just dump the binary.
|
||||||
from capstone import *
|
from capstone import *
|
||||||
|
|
||||||
cs = Cs(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN)
|
cs = Cs(CS_ARCH_MIPS, CS_MODE_MIPS32 + CS_MODE_BIG_ENDIAN)
|
||||||
|
|
||||||
def unicorn_debug_instruction(uc, address, size, user_data):
|
def unicorn_debug_instruction(uc, address, size, user_data):
|
||||||
mem = uc.mem_read(address, size)
|
mem = uc.mem_read(address, size)
|
||||||
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(bytes(mem), size):
|
for (cs_address, cs_size, cs_mnemonic, cs_opstr) in cs.disasm_lite(
|
||||||
|
bytes(mem), size
|
||||||
|
):
|
||||||
print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr))
|
print(" Instr: {:#016x}:\t{}\t{}".format(address, cs_mnemonic, cs_opstr))
|
||||||
|
|
||||||
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
||||||
def unicorn_debug_instruction(uc, address, size, user_data):
|
def unicorn_debug_instruction(uc, address, size, user_data):
|
||||||
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
print(" Instr: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
||||||
|
|
||||||
|
|
||||||
def unicorn_debug_block(uc, address, size, user_data):
|
def unicorn_debug_block(uc, address, size, user_data):
|
||||||
print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
print("Basic Block: addr=0x{0:016x}, size=0x{1:016x}".format(address, size))
|
||||||
|
|
||||||
|
|
||||||
def unicorn_debug_mem_access(uc, access, address, size, value, user_data):
|
def unicorn_debug_mem_access(uc, access, address, size, value, user_data):
|
||||||
if access == UC_MEM_WRITE:
|
if access == UC_MEM_WRITE:
|
||||||
print(" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
|
print(
|
||||||
|
" >>> Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
|
||||||
|
address, size, value
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
|
print(" >>> Read: addr=0x{0:016x} size={1}".format(address, size))
|
||||||
|
|
||||||
|
|
||||||
def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data):
|
def unicorn_debug_mem_invalid_access(uc, access, address, size, value, user_data):
|
||||||
if access == UC_MEM_WRITE_UNMAPPED:
|
if access == UC_MEM_WRITE_UNMAPPED:
|
||||||
print(" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(address, size, value))
|
print(
|
||||||
|
" >>> INVALID Write: addr=0x{0:016x} size={1} data=0x{2:016x}".format(
|
||||||
|
address, size, value
|
||||||
|
)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size))
|
print(
|
||||||
|
" >>> INVALID Read: addr=0x{0:016x} size={1}".format(address, size)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def force_crash(uc_error):
|
def force_crash(uc_error):
|
||||||
# This function should be called to indicate to AFL that a crash occurred during emulation.
|
# This function should be called to indicate to AFL that a crash occurred during emulation.
|
||||||
# Pass in the exception received from Uc.emu_start()
|
# Pass in the exception received from Uc.emu_start()
|
||||||
mem_errors = [
|
mem_errors = [
|
||||||
UC_ERR_READ_UNMAPPED, UC_ERR_READ_PROT, UC_ERR_READ_UNALIGNED,
|
UC_ERR_READ_UNMAPPED,
|
||||||
UC_ERR_WRITE_UNMAPPED, UC_ERR_WRITE_PROT, UC_ERR_WRITE_UNALIGNED,
|
UC_ERR_READ_PROT,
|
||||||
UC_ERR_FETCH_UNMAPPED, UC_ERR_FETCH_PROT, UC_ERR_FETCH_UNALIGNED,
|
UC_ERR_READ_UNALIGNED,
|
||||||
|
UC_ERR_WRITE_UNMAPPED,
|
||||||
|
UC_ERR_WRITE_PROT,
|
||||||
|
UC_ERR_WRITE_UNALIGNED,
|
||||||
|
UC_ERR_FETCH_UNMAPPED,
|
||||||
|
UC_ERR_FETCH_PROT,
|
||||||
|
UC_ERR_FETCH_UNALIGNED,
|
||||||
]
|
]
|
||||||
if uc_error.errno in mem_errors:
|
if uc_error.errno in mem_errors:
|
||||||
# Memory error - throw SIGSEGV
|
# Memory error - throw SIGSEGV
|
||||||
@ -80,11 +109,22 @@ def force_crash(uc_error):
|
|||||||
# Not sure what happened - throw SIGABRT
|
# Not sure what happened - throw SIGABRT
|
||||||
os.kill(os.getpid(), signal.SIGABRT)
|
os.kill(os.getpid(), signal.SIGABRT)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="Test harness for simple_target.bin")
|
parser = argparse.ArgumentParser(description="Test harness for simple_target.bin")
|
||||||
parser.add_argument('input_file', type=str, help="Path to the file containing the mutated input to load")
|
parser.add_argument(
|
||||||
parser.add_argument('-d', '--debug', default=False, action="store_true", help="Enables debug tracing")
|
"input_file",
|
||||||
|
type=str,
|
||||||
|
help="Path to the file containing the mutated input to load",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-d",
|
||||||
|
"--debug",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Enables debug tracing",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# Instantiate a MIPS32 big endian Unicorn Engine instance
|
# Instantiate a MIPS32 big endian Unicorn Engine instance
|
||||||
@ -94,13 +134,16 @@ def main():
|
|||||||
uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
|
uc.hook_add(UC_HOOK_BLOCK, unicorn_debug_block)
|
||||||
uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction)
|
uc.hook_add(UC_HOOK_CODE, unicorn_debug_instruction)
|
||||||
uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access)
|
uc.hook_add(UC_HOOK_MEM_WRITE | UC_HOOK_MEM_READ, unicorn_debug_mem_access)
|
||||||
uc.hook_add(UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID, unicorn_debug_mem_invalid_access)
|
uc.hook_add(
|
||||||
|
UC_HOOK_MEM_WRITE_UNMAPPED | UC_HOOK_MEM_READ_INVALID,
|
||||||
|
unicorn_debug_mem_invalid_access,
|
||||||
|
)
|
||||||
|
|
||||||
#---------------------------------------------------
|
# ---------------------------------------------------
|
||||||
# Load the binary to emulate and map it into memory
|
# Load the binary to emulate and map it into memory
|
||||||
|
|
||||||
print("Loading data input from {}".format(args.input_file))
|
print("Loading data input from {}".format(args.input_file))
|
||||||
binary_file = open(BINARY_FILE, 'rb')
|
binary_file = open(BINARY_FILE, "rb")
|
||||||
binary_code = binary_file.read()
|
binary_code = binary_file.read()
|
||||||
binary_file.close()
|
binary_file.close()
|
||||||
|
|
||||||
@ -114,11 +157,11 @@ def main():
|
|||||||
uc.mem_write(CODE_ADDRESS, binary_code)
|
uc.mem_write(CODE_ADDRESS, binary_code)
|
||||||
|
|
||||||
# Set the program counter to the start of the code
|
# Set the program counter to the start of the code
|
||||||
start_address = CODE_ADDRESS # Address of entry point of main()
|
start_address = CODE_ADDRESS # Address of entry point of main()
|
||||||
end_address = CODE_ADDRESS + 0xf4 # Address of last instruction in main()
|
end_address = CODE_ADDRESS + 0xF4 # Address of last instruction in main()
|
||||||
uc.reg_write(UC_MIPS_REG_PC, start_address)
|
uc.reg_write(UC_MIPS_REG_PC, start_address)
|
||||||
|
|
||||||
#-----------------
|
# -----------------
|
||||||
# Setup the stack
|
# Setup the stack
|
||||||
|
|
||||||
uc.mem_map(STACK_ADDRESS, STACK_SIZE)
|
uc.mem_map(STACK_ADDRESS, STACK_SIZE)
|
||||||
@ -127,10 +170,10 @@ def main():
|
|||||||
# reserve some space for data
|
# reserve some space for data
|
||||||
uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX)
|
uc.mem_map(DATA_ADDRESS, DATA_SIZE_MAX)
|
||||||
|
|
||||||
#-----------------------------------------------------
|
# -----------------------------------------------------
|
||||||
# Kick off AFL's fork server
|
# Kick off AFL's fork server
|
||||||
# THIS MUST BE DONE BEFORE LOADING USER DATA!
|
# THIS MUST BE DONE BEFORE LOADING USER DATA!
|
||||||
# If this isn't done every single run, the AFL fork server
|
# If this isn't done every single run, the AFL fork server
|
||||||
# will not be started appropriately and you'll get erratic results!
|
# will not be started appropriately and you'll get erratic results!
|
||||||
|
|
||||||
print("Starting the AFL forkserver")
|
print("Starting the AFL forkserver")
|
||||||
@ -142,12 +185,12 @@ def main():
|
|||||||
else:
|
else:
|
||||||
out = lambda x, y: print(x.format(y))
|
out = lambda x, y: print(x.format(y))
|
||||||
|
|
||||||
#-----------------------------------------------
|
# -----------------------------------------------
|
||||||
# Load the mutated input and map it into memory
|
# Load the mutated input and map it into memory
|
||||||
|
|
||||||
# Load the mutated input from disk
|
# Load the mutated input from disk
|
||||||
out("Loading data input from {}", args.input_file)
|
out("Loading data input from {}", args.input_file)
|
||||||
input_file = open(args.input_file, 'rb')
|
input_file = open(args.input_file, "rb")
|
||||||
input = input_file.read()
|
input = input_file.read()
|
||||||
input_file.close()
|
input_file.close()
|
||||||
|
|
||||||
@ -159,7 +202,7 @@ def main():
|
|||||||
# Write the mutated command into the data buffer
|
# Write the mutated command into the data buffer
|
||||||
uc.mem_write(DATA_ADDRESS, input)
|
uc.mem_write(DATA_ADDRESS, input)
|
||||||
|
|
||||||
#------------------------------------------------------------
|
# ------------------------------------------------------------
|
||||||
# Emulate the code, allowing it to process the mutated input
|
# Emulate the code, allowing it to process the mutated input
|
||||||
|
|
||||||
out("Executing until a crash or execution reaches 0x{0:016x}", end_address)
|
out("Executing until a crash or execution reaches 0x{0:016x}", end_address)
|
||||||
@ -175,5 +218,6 @@ def main():
|
|||||||
# UC_AFL_RET_FINISHED = 3
|
# UC_AFL_RET_FINISHED = 3
|
||||||
out("Done. AFL Mode is {}", afl_mode)
|
out("Done. AFL Mode is {}", afl_mode)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -256,17 +256,17 @@ def main():
|
|||||||
input_len = len(input)
|
input_len = len(input)
|
||||||
# global input_len
|
# global input_len
|
||||||
if input_len > INPUT_MAX:
|
if input_len > INPUT_MAX:
|
||||||
#print("Test input is too long (> {} bytes)")
|
# print("Test input is too long (> {} bytes)")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# print(f"Placing input: {input} in round {persistent_round}")
|
# print(f"Placing input: {input} in round {persistent_round}")
|
||||||
|
|
||||||
# Make sure the string is always 0-terminated (as it would be "in the wild")
|
# Make sure the string is always 0-terminated (as it would be "in the wild")
|
||||||
input[-1] = b'\0'
|
input[-1] = b"\0"
|
||||||
|
|
||||||
# Write the mutated command into the data buffer
|
# Write the mutated command into the data buffer
|
||||||
uc.mem_write(INPUT_ADDRESS, input)
|
uc.mem_write(INPUT_ADDRESS, input)
|
||||||
#uc.reg_write(UC_X86_REG_RIP, main_offset)
|
# uc.reg_write(UC_X86_REG_RIP, main_offset)
|
||||||
|
|
||||||
print(f"Starting to fuzz. Running from addr {main_offset} to one of {main_ends}")
|
print(f"Starting to fuzz. Running from addr {main_offset} to one of {main_ends}")
|
||||||
# Start the fuzzer.
|
# Start the fuzzer.
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
PREFIX ?= /usr/local
|
PREFIX ?= /usr/local
|
||||||
BIN_PATH = $(PREFIX)/bin
|
BIN_PATH = $(PREFIX)/bin
|
||||||
|
HELPER_PATH = $(PREFIX)/lib/afl
|
||||||
DOC_PATH = $(PREFIX)/share/doc/afl
|
DOC_PATH = $(PREFIX)/share/doc/afl
|
||||||
|
|
||||||
PROGRAMS = afl-network-client afl-network-server
|
PROGRAMS = afl-network-client afl-network-server
|
||||||
@ -31,7 +32,7 @@ afl-network-client: afl-network-client.c
|
|||||||
$(CC) $(CFLAGS) -I../../include -o afl-network-client afl-network-client.c $(LDFLAGS)
|
$(CC) $(CFLAGS) -I../../include -o afl-network-client afl-network-client.c $(LDFLAGS)
|
||||||
|
|
||||||
afl-network-server: afl-network-server.c
|
afl-network-server: afl-network-server.c
|
||||||
$(CC) $(CFLAGS) -I../../include -o afl-network-server afl-network-server.c ../../src/afl-forkserver.c ../../src/afl-sharedmem.c ../../src/afl-common.c -DBIN_PATH=\"$(BIN_PATH)\" $(LDFLAGS)
|
$(CC) $(CFLAGS) -I../../include -o afl-network-server afl-network-server.c ../../src/afl-forkserver.c ../../src/afl-sharedmem.c ../../src/afl-common.c -DAFL_PATH=\"$(HELPER_PATH)\" -DBIN_PATH=\"$(BIN_PATH)\" $(LDFLAGS)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f $(PROGRAMS) *~ core
|
rm -f $(PROGRAMS) *~ core
|
||||||
|
@ -11,6 +11,7 @@ import idc
|
|||||||
# See https://www.hex-rays.com/products/ida/support/ida74_idapython_no_bc695_porting_guide.shtml
|
# See https://www.hex-rays.com/products/ida/support/ida74_idapython_no_bc695_porting_guide.shtml
|
||||||
|
|
||||||
from os.path import expanduser
|
from os.path import expanduser
|
||||||
|
|
||||||
home = expanduser("~")
|
home = expanduser("~")
|
||||||
|
|
||||||
patchpoints = set()
|
patchpoints = set()
|
||||||
@ -18,7 +19,7 @@ patchpoints = set()
|
|||||||
max_offset = 0
|
max_offset = 0
|
||||||
for seg_ea in idautils.Segments():
|
for seg_ea in idautils.Segments():
|
||||||
name = idc.get_segm_name(seg_ea)
|
name = idc.get_segm_name(seg_ea)
|
||||||
#print("Segment: " + name)
|
# print("Segment: " + name)
|
||||||
if name != "__text" and name != ".text":
|
if name != "__text" and name != ".text":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -26,7 +27,7 @@ for seg_ea in idautils.Segments():
|
|||||||
end = idc.get_segm_end(seg_ea)
|
end = idc.get_segm_end(seg_ea)
|
||||||
first = 0
|
first = 0
|
||||||
subtract_addr = 0
|
subtract_addr = 0
|
||||||
#print("Start: " + hex(start) + " End: " + hex(end))
|
# print("Start: " + hex(start) + " End: " + hex(end))
|
||||||
for func_ea in idautils.Functions(start, end):
|
for func_ea in idautils.Functions(start, end):
|
||||||
f = idaapi.get_func(func_ea)
|
f = idaapi.get_func(func_ea)
|
||||||
if not f:
|
if not f:
|
||||||
@ -37,10 +38,10 @@ for seg_ea in idautils.Segments():
|
|||||||
if block.start_ea >= 0x1000:
|
if block.start_ea >= 0x1000:
|
||||||
subtract_addr = 0x1000
|
subtract_addr = 0x1000
|
||||||
first = 1
|
first = 1
|
||||||
|
|
||||||
max_offset = max(max_offset, block.start_ea)
|
max_offset = max(max_offset, block.start_ea)
|
||||||
patchpoints.add(block.start_ea - subtract_addr)
|
patchpoints.add(block.start_ea - subtract_addr)
|
||||||
#else:
|
# else:
|
||||||
# print("Warning: broken CFG?")
|
# print("Warning: broken CFG?")
|
||||||
|
|
||||||
# Round up max_offset to page size
|
# Round up max_offset to page size
|
||||||
@ -52,11 +53,11 @@ if rem != 0:
|
|||||||
print("Writing to " + home + "/Desktop/patches.txt")
|
print("Writing to " + home + "/Desktop/patches.txt")
|
||||||
|
|
||||||
with open(home + "/Desktop/patches.txt", "w") as f:
|
with open(home + "/Desktop/patches.txt", "w") as f:
|
||||||
f.write(ida_nalt.get_root_filename() + ':' + hex(size) + '\n')
|
f.write(ida_nalt.get_root_filename() + ":" + hex(size) + "\n")
|
||||||
f.write('\n'.join(map(hex, sorted(patchpoints))))
|
f.write("\n".join(map(hex, sorted(patchpoints))))
|
||||||
f.write('\n')
|
f.write("\n")
|
||||||
|
|
||||||
print("Done, found {} patchpoints".format(len(patchpoints)))
|
print("Done, found {} patchpoints".format(len(patchpoints)))
|
||||||
|
|
||||||
# For headless script running remove the comment from the next line
|
# For headless script running remove the comment from the next line
|
||||||
#ida_pro.qexit()
|
# ida_pro.qexit()
|
||||||
|
@ -12,12 +12,13 @@ import random, re, io
|
|||||||
# The XmlMutatorMin class #
|
# The XmlMutatorMin class #
|
||||||
###########################
|
###########################
|
||||||
|
|
||||||
|
|
||||||
class XmlMutatorMin:
|
class XmlMutatorMin:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Optionals parameters:
|
Optionals parameters:
|
||||||
seed Seed used by the PRNG (default: "RANDOM")
|
seed Seed used by the PRNG (default: "RANDOM")
|
||||||
verbose Verbosity (default: False)
|
verbose Verbosity (default: False)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, seed="RANDOM", verbose=False):
|
def __init__(self, seed="RANDOM", verbose=False):
|
||||||
@ -41,7 +42,12 @@ class XmlMutatorMin:
|
|||||||
self.tree = None
|
self.tree = None
|
||||||
|
|
||||||
# High-level mutators (no database needed)
|
# High-level mutators (no database needed)
|
||||||
hl_mutators_delete = ["del_node_and_children", "del_node_but_children", "del_attribute", "del_content"] # Delete items
|
hl_mutators_delete = [
|
||||||
|
"del_node_and_children",
|
||||||
|
"del_node_but_children",
|
||||||
|
"del_attribute",
|
||||||
|
"del_content",
|
||||||
|
] # Delete items
|
||||||
hl_mutators_fuzz = ["fuzz_attribute"] # Randomly change attribute values
|
hl_mutators_fuzz = ["fuzz_attribute"] # Randomly change attribute values
|
||||||
|
|
||||||
# Exposed mutators
|
# Exposed mutators
|
||||||
@ -74,7 +80,9 @@ class XmlMutatorMin:
|
|||||||
|
|
||||||
""" Serialize a XML document. Basic wrapper around lxml.tostring() """
|
""" Serialize a XML document. Basic wrapper around lxml.tostring() """
|
||||||
|
|
||||||
return ET.tostring(tree, with_tail=False, xml_declaration=True, encoding=tree.docinfo.encoding)
|
return ET.tostring(
|
||||||
|
tree, with_tail=False, xml_declaration=True, encoding=tree.docinfo.encoding
|
||||||
|
)
|
||||||
|
|
||||||
def __ver(self, version):
|
def __ver(self, version):
|
||||||
|
|
||||||
@ -161,7 +169,7 @@ class XmlMutatorMin:
|
|||||||
# Randomly pick one the function calls
|
# Randomly pick one the function calls
|
||||||
(func, args) = random.choice(l)
|
(func, args) = random.choice(l)
|
||||||
# Split by "," and randomly pick one of the arguments
|
# Split by "," and randomly pick one of the arguments
|
||||||
value = random.choice(args.split(','))
|
value = random.choice(args.split(","))
|
||||||
# Remove superfluous characters
|
# Remove superfluous characters
|
||||||
unclean_value = value
|
unclean_value = value
|
||||||
value = value.strip(" ").strip("'")
|
value = value.strip(" ").strip("'")
|
||||||
@ -170,49 +178,49 @@ class XmlMutatorMin:
|
|||||||
value = attrib_value
|
value = attrib_value
|
||||||
|
|
||||||
# For each type, define some possible replacement values
|
# For each type, define some possible replacement values
|
||||||
choices_number = ( \
|
choices_number = (
|
||||||
"0", \
|
"0",
|
||||||
"11111", \
|
"11111",
|
||||||
"-128", \
|
"-128",
|
||||||
"2", \
|
"2",
|
||||||
"-1", \
|
"-1",
|
||||||
"1/3", \
|
"1/3",
|
||||||
"42/0", \
|
"42/0",
|
||||||
"1094861636 idiv 1.0", \
|
"1094861636 idiv 1.0",
|
||||||
"-1123329771506872 idiv 3.8", \
|
"-1123329771506872 idiv 3.8",
|
||||||
"17=$numericRTF", \
|
"17=$numericRTF",
|
||||||
str(3 + random.randrange(0, 100)), \
|
str(3 + random.randrange(0, 100)),
|
||||||
)
|
)
|
||||||
|
|
||||||
choices_letter = ( \
|
choices_letter = (
|
||||||
"P" * (25 * random.randrange(1, 100)), \
|
"P" * (25 * random.randrange(1, 100)),
|
||||||
"%s%s%s%s%s%s", \
|
"%s%s%s%s%s%s",
|
||||||
"foobar", \
|
"foobar",
|
||||||
)
|
)
|
||||||
|
|
||||||
choices_alnum = ( \
|
choices_alnum = (
|
||||||
"Abc123", \
|
"Abc123",
|
||||||
"020F0302020204030204", \
|
"020F0302020204030204",
|
||||||
"020F0302020204030204" * (random.randrange(5, 20)), \
|
"020F0302020204030204" * (random.randrange(5, 20)),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Fuzz the value
|
# Fuzz the value
|
||||||
if random.choice((True,False)) and value == "":
|
if random.choice((True, False)) and value == "":
|
||||||
|
|
||||||
# Empty
|
# Empty
|
||||||
new_value = value
|
new_value = value
|
||||||
|
|
||||||
elif random.choice((True,False)) and value.isdigit():
|
elif random.choice((True, False)) and value.isdigit():
|
||||||
|
|
||||||
# Numbers
|
# Numbers
|
||||||
new_value = random.choice(choices_number)
|
new_value = random.choice(choices_number)
|
||||||
|
|
||||||
elif random.choice((True,False)) and value.isalpha():
|
elif random.choice((True, False)) and value.isalpha():
|
||||||
|
|
||||||
# Letters
|
# Letters
|
||||||
new_value = random.choice(choices_letter)
|
new_value = random.choice(choices_letter)
|
||||||
|
|
||||||
elif random.choice((True,False)) and value.isalnum():
|
elif random.choice((True, False)) and value.isalnum():
|
||||||
|
|
||||||
# Alphanumeric
|
# Alphanumeric
|
||||||
new_value = random.choice(choices_alnum)
|
new_value = random.choice(choices_alnum)
|
||||||
@ -232,22 +240,25 @@ class XmlMutatorMin:
|
|||||||
|
|
||||||
# Log something
|
# Log something
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print("Fuzzing attribute #%i '%s' of tag #%i '%s'" % (rand_attrib_id, rand_attrib, rand_elem_id, rand_elem.tag))
|
print(
|
||||||
|
"Fuzzing attribute #%i '%s' of tag #%i '%s'"
|
||||||
|
% (rand_attrib_id, rand_attrib, rand_elem_id, rand_elem.tag)
|
||||||
|
)
|
||||||
|
|
||||||
# Modify the attribute
|
# Modify the attribute
|
||||||
rand_elem.set(rand_attrib, new_value.decode("utf-8"))
|
rand_elem.set(rand_attrib, new_value.decode("utf-8"))
|
||||||
|
|
||||||
def __del_node_and_children(self):
|
def __del_node_and_children(self):
|
||||||
|
|
||||||
""" High-level minimizing mutator
|
"""High-level minimizing mutator
|
||||||
Delete a random node and its children (i.e. delete a random tree) """
|
Delete a random node and its children (i.e. delete a random tree)"""
|
||||||
|
|
||||||
self.__del_node(True)
|
self.__del_node(True)
|
||||||
|
|
||||||
def __del_node_but_children(self):
|
def __del_node_but_children(self):
|
||||||
|
|
||||||
""" High-level minimizing mutator
|
"""High-level minimizing mutator
|
||||||
Delete a random node but its children (i.e. link them to the parent of the deleted node) """
|
Delete a random node but its children (i.e. link them to the parent of the deleted node)"""
|
||||||
|
|
||||||
self.__del_node(False)
|
self.__del_node(False)
|
||||||
|
|
||||||
@ -270,7 +281,10 @@ class XmlMutatorMin:
|
|||||||
# Log something
|
# Log something
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
but_or_and = "and" if delete_children else "but"
|
but_or_and = "and" if delete_children else "but"
|
||||||
print("Deleting tag #%i '%s' %s its children" % (rand_elem_id, rand_elem.tag, but_or_and))
|
print(
|
||||||
|
"Deleting tag #%i '%s' %s its children"
|
||||||
|
% (rand_elem_id, rand_elem.tag, but_or_and)
|
||||||
|
)
|
||||||
|
|
||||||
if delete_children is False:
|
if delete_children is False:
|
||||||
# Link children of the random (soon to be deleted) node to its parent
|
# Link children of the random (soon to be deleted) node to its parent
|
||||||
@ -282,8 +296,8 @@ class XmlMutatorMin:
|
|||||||
|
|
||||||
def __del_content(self):
|
def __del_content(self):
|
||||||
|
|
||||||
""" High-level minimizing mutator
|
"""High-level minimizing mutator
|
||||||
Delete the attributes and children of a random node """
|
Delete the attributes and children of a random node"""
|
||||||
|
|
||||||
# Select a node to modify
|
# Select a node to modify
|
||||||
(rand_elem_id, rand_elem) = self.__pick_element()
|
(rand_elem_id, rand_elem) = self.__pick_element()
|
||||||
@ -297,8 +311,8 @@ class XmlMutatorMin:
|
|||||||
|
|
||||||
def __del_attribute(self):
|
def __del_attribute(self):
|
||||||
|
|
||||||
""" High-level minimizing mutator
|
"""High-level minimizing mutator
|
||||||
Delete a random attribute from a random node """
|
Delete a random attribute from a random node"""
|
||||||
|
|
||||||
# Select a node to modify
|
# Select a node to modify
|
||||||
(rand_elem_id, rand_elem) = self.__pick_element()
|
(rand_elem_id, rand_elem) = self.__pick_element()
|
||||||
@ -318,7 +332,10 @@ class XmlMutatorMin:
|
|||||||
|
|
||||||
# Log something
|
# Log something
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print("Deleting attribute #%i '%s' of tag #%i '%s'" % (rand_attrib_id, rand_attrib, rand_elem_id, rand_elem.tag))
|
print(
|
||||||
|
"Deleting attribute #%i '%s' of tag #%i '%s'"
|
||||||
|
% (rand_attrib_id, rand_attrib, rand_elem_id, rand_elem.tag)
|
||||||
|
)
|
||||||
|
|
||||||
# Delete the attribute
|
# Delete the attribute
|
||||||
rand_elem.attrib.pop(rand_attrib)
|
rand_elem.attrib.pop(rand_attrib)
|
||||||
@ -329,4 +346,3 @@ class XmlMutatorMin:
|
|||||||
|
|
||||||
# High-level mutation
|
# High-level mutation
|
||||||
self.__exec_among(self, self.hl_mutators_all, min, max)
|
self.__exec_among(self, self.hl_mutators_all, min, max)
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
'''
|
"""
|
||||||
Module containing functions shared between multiple AFL modules
|
Module containing functions shared between multiple AFL modules
|
||||||
|
|
||||||
@author: Christian Holler (:decoder)
|
@author: Christian Holler (:decoder)
|
||||||
@ -12,7 +12,7 @@ License, v. 2.0. If a copy of the MPL was not distributed with this
|
|||||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
@contact: choller@mozilla.com
|
@contact: choller@mozilla.com
|
||||||
'''
|
"""
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
import random
|
import random
|
||||||
@ -23,18 +23,18 @@ import re
|
|||||||
def randel(l):
|
def randel(l):
|
||||||
if not l:
|
if not l:
|
||||||
return None
|
return None
|
||||||
return l[random.randint(0, len(l)-1)]
|
return l[random.randint(0, len(l) - 1)]
|
||||||
|
|
||||||
|
|
||||||
def randel_pop(l):
|
def randel_pop(l):
|
||||||
if not l:
|
if not l:
|
||||||
return None
|
return None
|
||||||
return l.pop(random.randint(0, len(l)-1))
|
return l.pop(random.randint(0, len(l) - 1))
|
||||||
|
|
||||||
|
|
||||||
def write_exc_example(data, exc):
|
def write_exc_example(data, exc):
|
||||||
exc_name = re.sub(r'[^a-zA-Z0-9]', '_', repr(exc))
|
exc_name = re.sub(r"[^a-zA-Z0-9]", "_", repr(exc))
|
||||||
|
|
||||||
if not os.path.exists(exc_name):
|
if not os.path.exists(exc_name):
|
||||||
with open(exc_name, 'w') as f:
|
with open(exc_name, "w") as f:
|
||||||
f.write(data)
|
f.write(data)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
'''
|
"""
|
||||||
Example Python Module for AFLFuzz
|
Example Python Module for AFLFuzz
|
||||||
|
|
||||||
@author: Christian Holler (:decoder)
|
@author: Christian Holler (:decoder)
|
||||||
@ -12,7 +12,7 @@ License, v. 2.0. If a copy of the MPL was not distributed with this
|
|||||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
@contact: choller@mozilla.com
|
@contact: choller@mozilla.com
|
||||||
'''
|
"""
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
@ -26,12 +26,12 @@ COMMANDS = [
|
|||||||
|
|
||||||
|
|
||||||
def init(seed):
|
def init(seed):
|
||||||
'''
|
"""
|
||||||
Called once when AFLFuzz starts up. Used to seed our RNG.
|
Called once when AFLFuzz starts up. Used to seed our RNG.
|
||||||
|
|
||||||
@type seed: int
|
@type seed: int
|
||||||
@param seed: A 32-bit random value
|
@param seed: A 32-bit random value
|
||||||
'''
|
"""
|
||||||
random.seed(seed)
|
random.seed(seed)
|
||||||
|
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ def deinit():
|
|||||||
|
|
||||||
|
|
||||||
def fuzz(buf, add_buf, max_size):
|
def fuzz(buf, add_buf, max_size):
|
||||||
'''
|
"""
|
||||||
Called per fuzzing iteration.
|
Called per fuzzing iteration.
|
||||||
|
|
||||||
@type buf: bytearray
|
@type buf: bytearray
|
||||||
@ -55,13 +55,14 @@ def fuzz(buf, add_buf, max_size):
|
|||||||
|
|
||||||
@rtype: bytearray
|
@rtype: bytearray
|
||||||
@return: A new bytearray containing the mutated data
|
@return: A new bytearray containing the mutated data
|
||||||
'''
|
"""
|
||||||
ret = bytearray(100)
|
ret = bytearray(100)
|
||||||
|
|
||||||
ret[:3] = random.choice(COMMANDS)
|
ret[:3] = random.choice(COMMANDS)
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
# Uncomment and implement the following methods if you want to use a custom
|
# Uncomment and implement the following methods if you want to use a custom
|
||||||
# trimming algorithm. See also the documentation for a better API description.
|
# trimming algorithm. See also the documentation for a better API description.
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
'''
|
"""
|
||||||
Simple Chunk Cross-Over Replacement Module for AFLFuzz
|
Simple Chunk Cross-Over Replacement Module for AFLFuzz
|
||||||
|
|
||||||
@author: Christian Holler (:decoder)
|
@author: Christian Holler (:decoder)
|
||||||
@ -12,24 +12,24 @@ License, v. 2.0. If a copy of the MPL was not distributed with this
|
|||||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
@contact: choller@mozilla.com
|
@contact: choller@mozilla.com
|
||||||
'''
|
"""
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
|
|
||||||
def init(seed):
|
def init(seed):
|
||||||
'''
|
"""
|
||||||
Called once when AFLFuzz starts up. Used to seed our RNG.
|
Called once when AFLFuzz starts up. Used to seed our RNG.
|
||||||
|
|
||||||
@type seed: int
|
@type seed: int
|
||||||
@param seed: A 32-bit random value
|
@param seed: A 32-bit random value
|
||||||
'''
|
"""
|
||||||
# Seed our RNG
|
# Seed our RNG
|
||||||
random.seed(seed)
|
random.seed(seed)
|
||||||
|
|
||||||
|
|
||||||
def fuzz(buf, add_buf, max_size):
|
def fuzz(buf, add_buf, max_size):
|
||||||
'''
|
"""
|
||||||
Called per fuzzing iteration.
|
Called per fuzzing iteration.
|
||||||
|
|
||||||
@type buf: bytearray
|
@type buf: bytearray
|
||||||
@ -44,7 +44,7 @@ def fuzz(buf, add_buf, max_size):
|
|||||||
|
|
||||||
@rtype: bytearray
|
@rtype: bytearray
|
||||||
@return: A new bytearray containing the mutated data
|
@return: A new bytearray containing the mutated data
|
||||||
'''
|
"""
|
||||||
# Make a copy of our input buffer for returning
|
# Make a copy of our input buffer for returning
|
||||||
ret = bytearray(buf)
|
ret = bytearray(buf)
|
||||||
|
|
||||||
@ -58,7 +58,9 @@ def fuzz(buf, add_buf, max_size):
|
|||||||
rand_dst_idx = random.randint(0, len(buf))
|
rand_dst_idx = random.randint(0, len(buf))
|
||||||
|
|
||||||
# Make the chunk replacement
|
# Make the chunk replacement
|
||||||
ret[rand_dst_idx:rand_dst_idx + fragment_len] = add_buf[rand_src_idx:rand_src_idx + fragment_len]
|
ret[rand_dst_idx : rand_dst_idx + fragment_len] = add_buf[
|
||||||
|
rand_src_idx : rand_src_idx + fragment_len
|
||||||
|
]
|
||||||
|
|
||||||
# Return data
|
# Return data
|
||||||
return ret
|
return ret
|
||||||
|
@ -27,7 +27,7 @@ def log(text):
|
|||||||
|
|
||||||
def init(seed):
|
def init(seed):
|
||||||
"""
|
"""
|
||||||
Called once when AFL starts up. Seed is used to identify the AFL instance in log files
|
Called once when AFL starts up. Seed is used to identify the AFL instance in log files
|
||||||
"""
|
"""
|
||||||
|
|
||||||
global __mutator__
|
global __mutator__
|
||||||
@ -72,7 +72,10 @@ def fuzz(buf, add_buf, max_size):
|
|||||||
if via_buffer:
|
if via_buffer:
|
||||||
try:
|
try:
|
||||||
__mutator__.init_from_string(buf_str)
|
__mutator__.init_from_string(buf_str)
|
||||||
log("fuzz(): Mutator successfully initialized with AFL buffer (%d bytes)" % len(buf_str))
|
log(
|
||||||
|
"fuzz(): Mutator successfully initialized with AFL buffer (%d bytes)"
|
||||||
|
% len(buf_str)
|
||||||
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
via_buffer = False
|
via_buffer = False
|
||||||
log("fuzz(): Can't initialize mutator with AFL buffer")
|
log("fuzz(): Can't initialize mutator with AFL buffer")
|
||||||
@ -104,7 +107,7 @@ def fuzz(buf, add_buf, max_size):
|
|||||||
|
|
||||||
|
|
||||||
# Main (for debug)
|
# Main (for debug)
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
|
|
||||||
__log__ = True
|
__log__ = True
|
||||||
__log_file__ = "/dev/stdout"
|
__log_file__ = "/dev/stdout"
|
||||||
@ -112,7 +115,9 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
init(__seed__)
|
init(__seed__)
|
||||||
|
|
||||||
in_1 = bytearray("<foo ddd='eeee'>ffff<a b='c' d='456' eee='ffffff'>zzzzzzzzzzzz</a><b yyy='YYY' zzz='ZZZ'></b></foo>")
|
in_1 = bytearray(
|
||||||
|
"<foo ddd='eeee'>ffff<a b='c' d='456' eee='ffffff'>zzzzzzzzzzzz</a><b yyy='YYY' zzz='ZZZ'></b></foo>"
|
||||||
|
)
|
||||||
in_2 = bytearray("<abc abc123='456' abcCBA='ppppppppppppppppppppppppppppp'/>")
|
in_2 = bytearray("<abc abc123='456' abcCBA='ppppppppppppppppppppppppppppp'/>")
|
||||||
out = fuzz(in_1, in_2)
|
out = fuzz(in_1, in_2)
|
||||||
print(out)
|
print(out)
|
||||||
|
@ -70,7 +70,7 @@ int main(int argc, char **argv) {
|
|||||||
|
|
||||||
len = __AFL_FUZZ_TESTCASE_LEN; // do not use the macro directly in a call!
|
len = __AFL_FUZZ_TESTCASE_LEN; // do not use the macro directly in a call!
|
||||||
|
|
||||||
fprintf(stderr, "input: %zd \"%s\"\n", len, buf);
|
// fprintf(stderr, "input: %zd \"%s\"\n", len, buf);
|
||||||
|
|
||||||
/* do we have enough data? */
|
/* do we have enough data? */
|
||||||
if (len < 8) continue;
|
if (len < 8) continue;
|
||||||
|
Reference in New Issue
Block a user