mirror of
https://github.com/AFLplusplus/AFLplusplus.git
synced 2025-06-20 05:23:43 +00:00
@ -31,7 +31,7 @@ PROGNAME = afl
|
||||
VERSION = $(shell grep '^$(HASH)define VERSION ' ../config.h | cut -d '"' -f2)
|
||||
|
||||
PROGS = afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze
|
||||
SH_PROGS = afl-plot afl-cmin afl-cmin.bash afl-whatsup afl-addseeds afl-system-config afl-persistent-config afl-cc
|
||||
SH_PROGS = afl-plot afl-cmin afl-cmin.bash afl-cmin.py afl-whatsup afl-addseeds afl-system-config afl-persistent-config afl-cc
|
||||
HEADERS = include/afl-fuzz.h include/afl-mutations.h include/afl-persistent-replay.h include/afl-prealloc.h include/afl-record-compat.h include/alloc-inl.h include/android-ashmem.h include/cmplog.h include/common.h include/config.h include/coverage-32.h include/coverage-64.h include/debug.h include/envs.h include/forkserver.h include/hash.h include/list.h include/sharedmem.h include/snapshot-inl.h include/t1ha.h include/t1ha0_ia32aes_b.h include/t1ha_bits.h include/t1ha_selfcheck.h include/types.h include/xxhash.h
|
||||
MANPAGES=$(foreach p, $(PROGS) $(SH_PROGS), $(p).8)
|
||||
ASAN_OPTIONS=detect_leaks=0
|
||||
|
@ -148,7 +148,7 @@ afl-common.o: ./src/afl-common.c
|
||||
$(PASSES): instrumentation/afl-gcc-common.h
|
||||
|
||||
./afl-gcc-pass.so: instrumentation/afl-gcc-pass.so.cc | test_deps
|
||||
$(CXX) $(CXXEFLAGS) $(PLUGIN_FLAGS) -shared $< -o $@
|
||||
$(CXX) $(CXXEFLAGS) $(PLUGIN_FLAGS) -shared $< -o $@ $(LDFLAGS)
|
||||
ln -sf afl-cc afl-gcc-fast
|
||||
ln -sf afl-cc afl-g++-fast
|
||||
ln -sf afl-cc.8 afl-gcc-fast.8
|
||||
|
@ -32,8 +32,8 @@ VERSION = $(shell grep '^ *$(HASH)define VERSION ' ./config.h | cut -d '"' -
|
||||
|
||||
SYS = $(shell uname -s)
|
||||
|
||||
override LLVM_TOO_NEW_DEFAULT := 19
|
||||
override LLVM_TOO_OLD_DEFAULT := 13
|
||||
override LLVM_TOO_NEW_DEFAULT := 20
|
||||
override LLVM_TOO_OLD_DEFAULT := 14
|
||||
|
||||
ifeq "$(SYS)" "OpenBSD"
|
||||
LLVM_CONFIG ?= $(BIN_PATH)/llvm-config
|
||||
@ -470,7 +470,7 @@ endif
|
||||
|
||||
./afl-ld-lto: src/afl-ld-lto.c
|
||||
ifeq "$(LLVM_LTO)" "1"
|
||||
$(CC) $(CFLAGS) $(CPPFLAGS) $< -o $@
|
||||
$(CC) $(CFLAGS) $(CPPFLAGS) $< -o $@ $(LDFLAGS)
|
||||
ifdef IS_IOS
|
||||
@ldid -Sentitlements.plist $@ && echo "[+] Signed $@" || { echo "[-] Failed to sign $@"; }
|
||||
endif
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
Release version: [4.32c](https://github.com/AFLplusplus/AFLplusplus/releases)
|
||||
|
||||
GitHub version: 4.32c
|
||||
GitHub version: 4.33a
|
||||
|
||||
Repository:
|
||||
[https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus)
|
||||
@ -230,7 +230,7 @@ Thank you! (For people sending pull requests - please add yourself to this list
|
||||
fuzzah @intrigus-lgtm
|
||||
Yaakov Saxon Sergej Schumilo
|
||||
Ziqiao Kong Ryan Berger
|
||||
Sangjun Park
|
||||
Sangjun Park Scott Guest
|
||||
```
|
||||
|
||||
</details>
|
||||
|
9
afl-cmin
9
afl-cmin
@ -1,12 +1,19 @@
|
||||
#!/usr/bin/env sh
|
||||
THISPATH=`dirname ${0}`
|
||||
|
||||
# call afl-cmin.py if it can be executed successfully.
|
||||
if $THISPATH/afl-cmin.py --help > /dev/null 2>&1; then
|
||||
exec $THISPATH/afl-cmin.py "$@"
|
||||
fi
|
||||
|
||||
SYS=$(uname -s)
|
||||
test "$SYS" = "Darwin" && {
|
||||
echo Error: afl-cmin does not work on Apple currently. please use afl-cmin.bash instead.
|
||||
exit 1
|
||||
}
|
||||
|
||||
export AFL_QUIET=1
|
||||
export ASAN_OPTIONS=detect_leaks=0
|
||||
THISPATH=`dirname ${0}`
|
||||
export PATH="${THISPATH}:$PATH"
|
||||
awk -f - -- ${@+"$@"} <<'EOF'
|
||||
#!/usr/bin/awk -f
|
||||
|
760
afl-cmin.py
Executable file
760
afl-cmin.py
Executable file
@ -0,0 +1,760 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright 2016-2025 Google Inc.
|
||||
# Copyright 2025 AFLplusplus Project. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import argparse
|
||||
import array
|
||||
import base64
|
||||
import collections
|
||||
import glob
|
||||
import hashlib
|
||||
import itertools
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
# https://more-itertools.readthedocs.io/en/stable/_modules/more_itertools/recipes.html#batched
|
||||
from sys import hexversion
|
||||
|
||||
def _batched(iterable, n, *, strict=False):
|
||||
"""Batch data into tuples of length *n*. If the number of items in
|
||||
*iterable* is not divisible by *n*:
|
||||
* The last batch will be shorter if *strict* is ``False``.
|
||||
* :exc:`ValueError` will be raised if *strict* is ``True``.
|
||||
|
||||
>>> list(batched('ABCDEFG', 3))
|
||||
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G',)]
|
||||
|
||||
On Python 3.13 and above, this is an alias for :func:`itertools.batched`.
|
||||
"""
|
||||
if n < 1:
|
||||
raise ValueError('n must be at least one')
|
||||
iterator = iter(iterable)
|
||||
while batch := tuple(itertools.islice(iterator, n)):
|
||||
if strict and len(batch) != n:
|
||||
raise ValueError('batched(): incomplete batch')
|
||||
yield batch
|
||||
|
||||
|
||||
if hexversion >= 0x30D00A2: # pragma: no cover
|
||||
from itertools import batched as itertools_batched
|
||||
def batched(iterable, n, *, strict=False):
|
||||
return itertools_batched(iterable, n, strict=strict)
|
||||
else:
|
||||
batched = _batched
|
||||
|
||||
batched.__doc__ = _batched.__doc__
|
||||
|
||||
try:
|
||||
from tqdm import tqdm
|
||||
except ImportError:
|
||||
print('Hint: install python module "tqdm" to show progress bar')
|
||||
|
||||
class tqdm:
|
||||
|
||||
def __init__(self, data=None, *args, **argd):
|
||||
self.data = data
|
||||
|
||||
def __iter__(self):
|
||||
yield from self.data
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
pass
|
||||
|
||||
def update(self, *args):
|
||||
pass
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
cpu_count = multiprocessing.cpu_count()
|
||||
group = parser.add_argument_group("Required parameters")
|
||||
group.add_argument(
|
||||
"-i",
|
||||
dest="input",
|
||||
action="append",
|
||||
metavar="dir",
|
||||
required=True,
|
||||
help="input directory with the starting corpus",
|
||||
)
|
||||
group.add_argument(
|
||||
"-o",
|
||||
dest="output",
|
||||
metavar="dir",
|
||||
required=True,
|
||||
help="output directory for minimized files",
|
||||
)
|
||||
|
||||
group = parser.add_argument_group("Execution control settings")
|
||||
group.add_argument(
|
||||
"-f",
|
||||
dest="stdin_file",
|
||||
metavar="file",
|
||||
help="location read by the fuzzed program (stdin)",
|
||||
)
|
||||
group.add_argument(
|
||||
"-m",
|
||||
dest="memory_limit",
|
||||
default="none",
|
||||
metavar="megs",
|
||||
type=lambda x: x if x == "none" else int(x),
|
||||
help="memory limit for child process (default: %(default)s)",
|
||||
)
|
||||
group.add_argument(
|
||||
"-t",
|
||||
dest="time_limit",
|
||||
default=5000,
|
||||
metavar="msec",
|
||||
type=lambda x: x if x == "none" else int(x),
|
||||
help="timeout for each run (default: %(default)s)",
|
||||
)
|
||||
group.add_argument(
|
||||
"-O",
|
||||
dest="frida_mode",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="use binary-only instrumentation (FRIDA mode)",
|
||||
)
|
||||
group.add_argument(
|
||||
"-Q",
|
||||
dest="qemu_mode",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="use binary-only instrumentation (QEMU mode)",
|
||||
)
|
||||
group.add_argument(
|
||||
"-U",
|
||||
dest="unicorn_mode",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="use unicorn-based instrumentation (Unicorn mode)",
|
||||
)
|
||||
group.add_argument(
|
||||
"-X", dest="nyx_mode", action="store_true", default=False, help="use Nyx mode"
|
||||
)
|
||||
|
||||
group = parser.add_argument_group("Minimization settings")
|
||||
group.add_argument(
|
||||
"--crash-dir",
|
||||
dest="crash_dir",
|
||||
metavar="dir",
|
||||
default=None,
|
||||
help="move crashes to a separate dir, always deduplicated",
|
||||
)
|
||||
group.add_argument(
|
||||
"-A",
|
||||
dest="allow_any",
|
||||
action="store_true",
|
||||
help="allow crashes and timeouts (not recommended)",
|
||||
)
|
||||
group.add_argument(
|
||||
"-C",
|
||||
dest="crash_only",
|
||||
action="store_true",
|
||||
help="keep crashing inputs, reject everything else",
|
||||
)
|
||||
group.add_argument(
|
||||
"-e",
|
||||
dest="edge_mode",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="solve for edge coverage only, ignore hit counts",
|
||||
)
|
||||
|
||||
group = parser.add_argument_group("Misc")
|
||||
group.add_argument(
|
||||
"-T",
|
||||
dest="workers",
|
||||
type=lambda x: cpu_count if x == "all" else int(x),
|
||||
default=1,
|
||||
help="number of concurrent worker (default: %(default)d)",
|
||||
)
|
||||
group.add_argument(
|
||||
"--as_queue",
|
||||
action="store_true",
|
||||
help='output file name like "id:000000,hash:value"',
|
||||
)
|
||||
group.add_argument(
|
||||
"--no-dedup", action="store_true", help="skip deduplication step for corpus files"
|
||||
)
|
||||
group.add_argument("--debug", action="store_true")
|
||||
|
||||
parser.add_argument("exe", metavar="/path/to/target_app")
|
||||
parser.add_argument("args", nargs="*")
|
||||
|
||||
args = parser.parse_args()
|
||||
logger = None
|
||||
afl_showmap_bin = None
|
||||
tuple_index_type_code = "I"
|
||||
file_index_type_code = None
|
||||
|
||||
|
||||
def init():
|
||||
global logger
|
||||
log_level = logging.DEBUG if args.debug else logging.INFO
|
||||
logging.basicConfig(
|
||||
level=log_level, format="%(asctime)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if args.stdin_file and args.workers > 1:
|
||||
logger.error("-f is only supported with one worker (-T 1)")
|
||||
sys.exit(1)
|
||||
|
||||
if args.memory_limit != "none" and args.memory_limit < 5:
|
||||
logger.error("dangerously low memory limit")
|
||||
sys.exit(1)
|
||||
|
||||
if args.time_limit != "none" and args.time_limit < 10:
|
||||
logger.error("dangerously low timeout")
|
||||
sys.exit(1)
|
||||
|
||||
if not os.path.isfile(args.exe):
|
||||
logger.error('binary "%s" not found or not regular file', args.exe)
|
||||
sys.exit(1)
|
||||
|
||||
if not os.environ.get("AFL_SKIP_BIN_CHECK") and not any(
|
||||
[args.qemu_mode, args.frida_mode, args.unicorn_mode, args.nyx_mode]
|
||||
):
|
||||
if b"__AFL_SHM_ID" not in open(args.exe, "rb").read():
|
||||
logger.error("binary '%s' doesn't appear to be instrumented", args.exe)
|
||||
sys.exit(1)
|
||||
|
||||
for dn in args.input:
|
||||
if not os.path.isdir(dn) and not glob.glob(dn):
|
||||
logger.error('directory "%s" not found', dn)
|
||||
sys.exit(1)
|
||||
|
||||
global afl_showmap_bin
|
||||
searches = [
|
||||
None,
|
||||
os.path.dirname(__file__),
|
||||
os.getcwd(),
|
||||
]
|
||||
if os.environ.get("AFL_PATH"):
|
||||
searches.append(os.environ["AFL_PATH"])
|
||||
|
||||
for search in searches:
|
||||
afl_showmap_bin = shutil.which("afl-showmap", path=search)
|
||||
if afl_showmap_bin:
|
||||
break
|
||||
if not afl_showmap_bin:
|
||||
logger.fatal("cannot find afl-showmap, please set AFL_PATH")
|
||||
sys.exit(1)
|
||||
|
||||
trace_dir = os.path.join(args.output, ".traces")
|
||||
shutil.rmtree(trace_dir, ignore_errors=True)
|
||||
try:
|
||||
os.rmdir(args.output)
|
||||
except OSError:
|
||||
pass
|
||||
if os.path.exists(args.output):
|
||||
logger.error(
|
||||
'directory "%s" exists and is not empty - delete it first', args.output
|
||||
)
|
||||
sys.exit(1)
|
||||
if args.crash_dir and not os.path.exists(args.crash_dir):
|
||||
os.makedirs(args.crash_dir)
|
||||
os.makedirs(trace_dir)
|
||||
|
||||
logger.info("use %d workers (-T)", args.workers)
|
||||
|
||||
|
||||
def detect_type_code(size):
|
||||
for type_code in ["B", "H", "I", "L", "Q"]:
|
||||
if 256 ** array.array(type_code).itemsize > size:
|
||||
return type_code
|
||||
|
||||
|
||||
def afl_showmap(input_path=None, batch=None, afl_map_size=None, first=False):
|
||||
assert input_path or batch
|
||||
# yapf: disable
|
||||
cmd = [
|
||||
afl_showmap_bin,
|
||||
'-m', str(args.memory_limit),
|
||||
'-t', str(args.time_limit),
|
||||
'-Z', # cmin mode
|
||||
]
|
||||
# yapf: enable
|
||||
found_atat = False
|
||||
for arg in args.args:
|
||||
if "@@" in arg:
|
||||
found_atat = True
|
||||
|
||||
if args.stdin_file:
|
||||
assert args.workers == 1
|
||||
input_from_file = True
|
||||
stdin_file = args.stdin_file
|
||||
cmd += ["-H", stdin_file]
|
||||
elif found_atat:
|
||||
input_from_file = True
|
||||
stdin_file = os.path.join(args.output, f".input.{os.getpid()}")
|
||||
cmd += ["-H", stdin_file]
|
||||
else:
|
||||
input_from_file = False
|
||||
|
||||
if batch:
|
||||
input_from_file = True
|
||||
filelist = os.path.join(args.output, f".filelist.{os.getpid()}")
|
||||
with open(filelist, "w") as f:
|
||||
for _, path in batch:
|
||||
f.write(path + "\n")
|
||||
cmd += ["-I", filelist]
|
||||
output_path = os.path.join(args.output, f".showmap.{os.getpid()}")
|
||||
cmd += ["-o", output_path]
|
||||
else:
|
||||
if input_from_file:
|
||||
shutil.copy(input_path, stdin_file)
|
||||
cmd += ["-o", "-"]
|
||||
|
||||
if args.frida_mode:
|
||||
cmd += ["-O"]
|
||||
if args.qemu_mode:
|
||||
cmd += ["-Q"]
|
||||
if args.unicorn_mode:
|
||||
cmd += ["-U"]
|
||||
if args.nyx_mode:
|
||||
cmd += ["-X"]
|
||||
if args.edge_mode:
|
||||
cmd += ["-e"]
|
||||
cmd += ["--", args.exe] + args.args
|
||||
|
||||
env = os.environ.copy()
|
||||
env["AFL_QUIET"] = "1"
|
||||
env["ASAN_OPTIONS"] = "detect_leaks=0"
|
||||
if first:
|
||||
logger.debug("run command line: %s", subprocess.list2cmdline(cmd))
|
||||
env["AFL_CMIN_ALLOW_ANY"] = "1"
|
||||
if afl_map_size:
|
||||
env["AFL_MAP_SIZE"] = str(afl_map_size)
|
||||
if args.crash_only:
|
||||
env["AFL_CMIN_CRASHES_ONLY"] = "1"
|
||||
if args.allow_any:
|
||||
env["AFL_CMIN_ALLOW_ANY"] = "1"
|
||||
|
||||
if input_from_file:
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env, bufsize=1048576)
|
||||
else:
|
||||
p = subprocess.Popen(
|
||||
cmd,
|
||||
stdin=open(input_path, "rb"),
|
||||
stdout=subprocess.PIPE,
|
||||
env=env,
|
||||
bufsize=1048576,
|
||||
)
|
||||
out = p.stdout.read()
|
||||
p.wait()
|
||||
|
||||
if batch:
|
||||
result = []
|
||||
for idx, input_path in batch:
|
||||
basename = os.path.basename(input_path)
|
||||
values = []
|
||||
try:
|
||||
trace_file = os.path.join(output_path, basename)
|
||||
with open(trace_file, "r") as f:
|
||||
values = list(map(int, f))
|
||||
crashed = len(values) == 0
|
||||
os.unlink(trace_file)
|
||||
except FileNotFoundError:
|
||||
a = None
|
||||
crashed = True
|
||||
values = [(t // 1000) * 9 + t % 1000 for t in values]
|
||||
a = array.array(tuple_index_type_code, values)
|
||||
result.append((idx, a, crashed))
|
||||
os.unlink(filelist)
|
||||
os.rmdir(output_path)
|
||||
return result
|
||||
else:
|
||||
values = []
|
||||
for line in out.split():
|
||||
if not line.isdigit():
|
||||
continue
|
||||
values.append(int(line))
|
||||
values = [(t // 1000) * 9 + t % 1000 for t in values]
|
||||
a = array.array(tuple_index_type_code, values)
|
||||
crashed = p.returncode in [2, 3]
|
||||
if input_from_file and stdin_file != args.stdin_file:
|
||||
os.unlink(stdin_file)
|
||||
return a, crashed
|
||||
|
||||
|
||||
class JobDispatcher(multiprocessing.Process):
|
||||
|
||||
def __init__(self, job_queue, jobs):
|
||||
super().__init__()
|
||||
self.job_queue = job_queue
|
||||
self.jobs = jobs
|
||||
|
||||
def run(self):
|
||||
for job in self.jobs:
|
||||
self.job_queue.put(job)
|
||||
self.job_queue.close()
|
||||
|
||||
|
||||
class Worker(multiprocessing.Process):
|
||||
|
||||
def __init__(self, idx, afl_map_size, q_in, p_out, r_out):
|
||||
super().__init__()
|
||||
self.idx = idx
|
||||
self.afl_map_size = afl_map_size
|
||||
self.q_in = q_in
|
||||
self.p_out = p_out
|
||||
self.r_out = r_out
|
||||
|
||||
def run(self):
|
||||
map_size = self.afl_map_size or 65536
|
||||
max_tuple = map_size * 9
|
||||
max_file_index = 256 ** array.array(file_index_type_code).itemsize - 1
|
||||
m = array.array(file_index_type_code, [max_file_index] * max_tuple)
|
||||
counter = collections.Counter()
|
||||
crashes = []
|
||||
|
||||
pack_name = os.path.join(args.output, ".traces", f"{self.idx}.pack")
|
||||
pack_pos = 0
|
||||
with open(pack_name, "wb") as trace_pack:
|
||||
while True:
|
||||
batch = self.q_in.get()
|
||||
if batch is None:
|
||||
break
|
||||
|
||||
for idx, r, crash in afl_showmap(
|
||||
batch=batch, afl_map_size=self.afl_map_size
|
||||
):
|
||||
counter.update(r)
|
||||
|
||||
used = False
|
||||
|
||||
if crash:
|
||||
crashes.append(idx)
|
||||
|
||||
# If we aren't saving crashes to a separate dir, handle them
|
||||
# the same as other inputs. However, unless AFL_CMIN_ALLOW_ANY=1,
|
||||
# afl_showmap will not return any coverage for crashes so they will
|
||||
# never be retained.
|
||||
if not crash or not args.crash_dir:
|
||||
for t in r:
|
||||
if idx < m[t]:
|
||||
m[t] = idx
|
||||
used = True
|
||||
|
||||
if used:
|
||||
tuple_count = len(r)
|
||||
r.tofile(trace_pack)
|
||||
self.p_out.put((idx, self.idx, pack_pos, tuple_count))
|
||||
pack_pos += tuple_count * r.itemsize
|
||||
else:
|
||||
self.p_out.put(None)
|
||||
|
||||
self.r_out.put((self.idx, m, counter, crashes))
|
||||
|
||||
|
||||
class CombineTraceWorker(multiprocessing.Process):
|
||||
|
||||
def __init__(self, pack_name, jobs, r_out):
|
||||
super().__init__()
|
||||
self.pack_name = pack_name
|
||||
self.jobs = jobs
|
||||
self.r_out = r_out
|
||||
|
||||
def run(self):
|
||||
already_have = set()
|
||||
with open(self.pack_name, "rb") as f:
|
||||
for pos, tuple_count in self.jobs:
|
||||
f.seek(pos)
|
||||
result = array.array(tuple_index_type_code)
|
||||
result.fromfile(f, tuple_count)
|
||||
already_have.update(result)
|
||||
self.r_out.put(already_have)
|
||||
|
||||
|
||||
def hash_file(path):
|
||||
m = hashlib.sha1()
|
||||
with open(path, "rb") as f:
|
||||
m.update(f.read())
|
||||
return m.digest()
|
||||
|
||||
|
||||
def dedup(files):
|
||||
with multiprocessing.Pool(args.workers) as pool:
|
||||
seen_hash = set()
|
||||
result = []
|
||||
hash_list = []
|
||||
# use large chunksize to reduce multiprocessing overhead
|
||||
chunksize = max(1, min(256, len(files) // args.workers))
|
||||
for i, h in enumerate(
|
||||
tqdm(
|
||||
pool.imap(hash_file, files, chunksize),
|
||||
desc="dedup",
|
||||
total=len(files),
|
||||
ncols=0,
|
||||
leave=(len(files) > 100000),
|
||||
)
|
||||
):
|
||||
if h in seen_hash:
|
||||
continue
|
||||
seen_hash.add(h)
|
||||
result.append(files[i])
|
||||
hash_list.append(h)
|
||||
return result, hash_list
|
||||
|
||||
|
||||
def is_afl_dir(dirnames, filenames):
|
||||
return (
|
||||
"queue" in dirnames
|
||||
and "hangs" in dirnames
|
||||
and "crashes" in dirnames
|
||||
and "fuzzer_setup" in filenames
|
||||
)
|
||||
|
||||
|
||||
def collect_files(input_paths):
|
||||
paths = []
|
||||
for s in input_paths:
|
||||
paths += glob.glob(s)
|
||||
|
||||
files = []
|
||||
with tqdm(desc="search", unit=" files", ncols=0) as pbar:
|
||||
for path in paths:
|
||||
for root, dirnames, filenames in os.walk(path, followlinks=True):
|
||||
for dirname in dirnames:
|
||||
if dirname.startswith("."):
|
||||
dirnames.remove(dirname)
|
||||
|
||||
if not args.crash_only and is_afl_dir(dirnames, filenames):
|
||||
continue
|
||||
|
||||
for filename in filenames:
|
||||
if filename.startswith("."):
|
||||
continue
|
||||
pbar.update(1)
|
||||
files.append(os.path.join(root, filename))
|
||||
return files
|
||||
|
||||
|
||||
def main():
|
||||
init()
|
||||
|
||||
files = collect_files(args.input)
|
||||
if len(files) == 0:
|
||||
logger.error("no inputs in the target directory - nothing to be done")
|
||||
sys.exit(1)
|
||||
logger.info("Found %d input files in %d directories", len(files), len(args.input))
|
||||
|
||||
if not args.no_dedup:
|
||||
files, hash_list = dedup(files)
|
||||
logger.info("Remain %d files after dedup", len(files))
|
||||
else:
|
||||
logger.info("Skipping file deduplication.")
|
||||
|
||||
global file_index_type_code
|
||||
file_index_type_code = detect_type_code(len(files))
|
||||
|
||||
logger.info("Sorting files.")
|
||||
with multiprocessing.Pool(args.workers) as pool:
|
||||
chunksize = max(1, min(512, len(files) // args.workers))
|
||||
size_list = list(pool.map(os.path.getsize, files, chunksize))
|
||||
idxes = sorted(range(len(files)), key=lambda x: size_list[x])
|
||||
files = [files[idx] for idx in idxes]
|
||||
hash_list = [hash_list[idx] for idx in idxes]
|
||||
|
||||
afl_map_size = None
|
||||
if b"AFL_DUMP_MAP_SIZE" in open(args.exe, "rb").read():
|
||||
output = subprocess.run(
|
||||
[args.exe], capture_output=True, env={"AFL_DUMP_MAP_SIZE": "1"}
|
||||
).stdout
|
||||
afl_map_size = int(output)
|
||||
logger.info("Setting AFL_MAP_SIZE=%d", afl_map_size)
|
||||
|
||||
global tuple_index_type_code
|
||||
tuple_index_type_code = detect_type_code(afl_map_size * 9)
|
||||
|
||||
logger.info("Testing the target binary")
|
||||
tuples, _ = afl_showmap(files[0], afl_map_size=afl_map_size, first=True)
|
||||
if tuples:
|
||||
logger.info("ok, %d tuples recorded", len(tuples))
|
||||
else:
|
||||
logger.error("no instrumentation output detected")
|
||||
sys.exit(1)
|
||||
|
||||
job_queue = multiprocessing.Queue()
|
||||
progress_queue = multiprocessing.Queue()
|
||||
result_queue = multiprocessing.Queue()
|
||||
|
||||
workers = []
|
||||
for i in range(args.workers):
|
||||
p = Worker(i, afl_map_size, job_queue, progress_queue, result_queue)
|
||||
p.start()
|
||||
workers.append(p)
|
||||
|
||||
chunk = max(1, min(128, len(files) // args.workers))
|
||||
jobs = list(batched(enumerate(files), chunk))
|
||||
jobs += [None] * args.workers # sentinel
|
||||
|
||||
dispatcher = JobDispatcher(job_queue, jobs)
|
||||
dispatcher.start()
|
||||
|
||||
logger.info("Processing traces")
|
||||
effective = 0
|
||||
trace_info = {}
|
||||
for _ in tqdm(files, ncols=0, smoothing=0.01):
|
||||
r = progress_queue.get()
|
||||
if r is not None:
|
||||
idx, worker_idx, pos, tuple_count = r
|
||||
trace_info[idx] = worker_idx, pos, tuple_count
|
||||
effective += 1
|
||||
dispatcher.join()
|
||||
|
||||
logger.info("Obtaining trace results")
|
||||
ms = []
|
||||
crashes = []
|
||||
counter = collections.Counter()
|
||||
for _ in tqdm(range(args.workers), ncols=0):
|
||||
idx, m, c, crs = result_queue.get()
|
||||
ms.append(m)
|
||||
counter.update(c)
|
||||
crashes.extend(crs)
|
||||
workers[idx].join()
|
||||
best_idxes = list(map(min, zip(*ms)))
|
||||
|
||||
if not args.crash_dir:
|
||||
logger.info(
|
||||
"Found %d unique tuples across %d files (%d effective)",
|
||||
len(counter),
|
||||
len(files),
|
||||
effective,
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"Found %d unique tuples across %d files (%d effective, %d crashes)",
|
||||
len(counter),
|
||||
len(files),
|
||||
effective,
|
||||
len(crashes),
|
||||
)
|
||||
all_unique = counter.most_common()
|
||||
|
||||
logger.info("Processing candidates and writing output")
|
||||
already_have = set()
|
||||
count = 0
|
||||
|
||||
def save_file(idx):
|
||||
input_path = files[idx]
|
||||
fn = (
|
||||
base64.b16encode(hash_list[idx]).decode("utf8").lower()
|
||||
if not args.no_dedup
|
||||
else os.path.basename(input_path)
|
||||
)
|
||||
if args.as_queue:
|
||||
if args.no_dedup:
|
||||
fn = "id:%06d,orig:%s" % (count, fn)
|
||||
else:
|
||||
fn = "id:%06d,hash:%s" % (count, fn)
|
||||
output_path = os.path.join(args.output, fn)
|
||||
try:
|
||||
os.link(input_path, output_path)
|
||||
except OSError:
|
||||
shutil.copy(input_path, output_path)
|
||||
|
||||
jobs = [[] for i in range(args.workers)]
|
||||
saved = set()
|
||||
for t, c in all_unique:
|
||||
if c != 1:
|
||||
continue
|
||||
idx = best_idxes[t]
|
||||
if idx in saved:
|
||||
continue
|
||||
save_file(idx)
|
||||
saved.add(idx)
|
||||
count += 1
|
||||
|
||||
worker_idx, pos, tuple_count = trace_info[idx]
|
||||
job = (pos, tuple_count)
|
||||
jobs[worker_idx].append(job)
|
||||
|
||||
trace_packs = []
|
||||
workers = []
|
||||
for i in range(args.workers):
|
||||
pack_name = os.path.join(args.output, ".traces", f"{i}.pack")
|
||||
trace_f = open(pack_name, "rb")
|
||||
trace_packs.append(trace_f)
|
||||
|
||||
p = CombineTraceWorker(pack_name, jobs[i], result_queue)
|
||||
p.start()
|
||||
workers.append(p)
|
||||
|
||||
for _ in range(args.workers):
|
||||
result = result_queue.get()
|
||||
already_have.update(result)
|
||||
|
||||
for t, c in tqdm(list(reversed(all_unique)), ncols=0):
|
||||
if t in already_have:
|
||||
continue
|
||||
|
||||
idx = best_idxes[t]
|
||||
save_file(idx)
|
||||
count += 1
|
||||
|
||||
worker_idx, pos, tuple_count = trace_info[idx]
|
||||
trace_pack = trace_packs[worker_idx]
|
||||
trace_pack.seek(pos)
|
||||
result = array.array(tuple_index_type_code)
|
||||
result.fromfile(trace_pack, tuple_count)
|
||||
|
||||
already_have.update(result)
|
||||
|
||||
for f in trace_packs:
|
||||
f.close()
|
||||
|
||||
if args.crash_dir:
|
||||
logger.info("Saving crashes to %s", args.crash_dir)
|
||||
crash_files = [files[c] for c in crashes]
|
||||
|
||||
if args.no_dedup:
|
||||
# Unless we deduped previously, we have to dedup the crash files
|
||||
# now.
|
||||
crash_files, hash_list = dedup(crash_files)
|
||||
|
||||
for idx, crash_path in enumerate(crash_files):
|
||||
fn = base64.b16encode(hash_list[idx]).decode("utf8").lower()
|
||||
output_path = os.path.join(args.crash_dir, fn)
|
||||
try:
|
||||
os.link(crash_path, output_path)
|
||||
except OSError:
|
||||
try:
|
||||
shutil.copy(crash_path, output_path)
|
||||
except shutil.Error:
|
||||
# This error happens when src and dest are hardlinks of the
|
||||
# same file. We have nothing to do in this case, but handle
|
||||
# it gracefully.
|
||||
pass
|
||||
|
||||
if count == 1:
|
||||
logger.warning("all test cases had the same traces, check syntax!")
|
||||
logger.info('narrowed down to %s files, saved in "%s"', count, args.output)
|
||||
if not os.environ.get("AFL_KEEP_TRACES"):
|
||||
logger.info("Deleting trace files")
|
||||
trace_dir = os.path.join(args.output, ".traces")
|
||||
shutil.rmtree(trace_dir, ignore_errors=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -4,7 +4,7 @@ CFLAGS = -O3 -funroll-loops -fPIC
|
||||
all: aflpp-standalone
|
||||
|
||||
aflpp-standalone: aflpp-standalone.c
|
||||
$(CC) $(CFLAGS) -DBIN_PATH=\"foo\" -I../../../include -I. -o aflpp-standalone aflpp-standalone.c ../../../src/afl-performance.c ../../../src/afl-fuzz-extras.c ../../../src/afl-common.c
|
||||
$(CC) $(CFLAGS) -w -DBIN_PATH=\"foo\" -I../../../include -I. -o aflpp-standalone aflpp-standalone.c ../../../src/afl-performance.c ../../../src/afl-fuzz-extras.c ../../../src/afl-common.c
|
||||
|
||||
clean:
|
||||
rm -f *.o *~ aflpp-standalone core
|
||||
|
@ -1,19 +1,20 @@
|
||||
|
||||
CFLAGS = -g -O3 -funroll-loops -fPIC -D_STANDALONE_MODULE=1 -Wno-implicit-function-declaration
|
||||
CFLAGS = -g -O3 -funroll-loops -fPIC -D_STANDALONE_MODULE=1 -Wno-pointer-sign
|
||||
CXXFLAGS= -g -O3 -funroll-loops -fPIC -D_STANDALONE_MODULE=1
|
||||
|
||||
all: autotokens-standalone
|
||||
|
||||
autotokens.o: ../autotokens.cpp
|
||||
$(CXX) $(CXXFLAGS) -I../../../include -I. -I../.. -c ../autotokens.cpp
|
||||
$(CXX) $(CXXFLAGS) -g -I../../../include -I. -I../.. -c ../autotokens.cpp
|
||||
|
||||
autotokens-standalone: autotokens-standalone.c autotokens.o
|
||||
$(CC) $(CFLAGS) -DBIN_PATH=\"foo\" -I../../../include -I. -c autotokens-standalone.c
|
||||
$(CC) $(CFLAGS) -DBIN_PATH=\"foo\" -I../../../include -I. -c ../../../src/afl-performance.c
|
||||
$(CC) $(CFLAGS) -DBIN_PATH=\"foo\" -I../../../include -I. -c ../../../src/afl-fuzz-extras.c
|
||||
$(CC) $(CFLAGS) -DBIN_PATH=\"foo\" -I../../../include -I. -c ../../../src/afl-fuzz-queue.c
|
||||
$(CC) $(CFLAGS) -DBIN_PATH=\"foo\" -I../../../include -I. -c ../../../src/afl-common.c
|
||||
$(CXX) $(CFLAGS) -DBIN_PATH=\"foo\" -I../../../include -I. -o autotokens-standalone *.o
|
||||
$(CC) $(CFLAGS) -g -DBIN_PATH=\"foo\" -I../../../include -I. -c autotokens-standalone.c
|
||||
$(CC) $(CFLAGS) -g -DBIN_PATH=\"foo\" -I../../../include -I. -c ../../../src/afl-performance.c
|
||||
$(CC) $(CFLAGS) -g -DBIN_PATH=\"foo\" -I../../../include -I. -c ../../../src/afl-fuzz-extras.c
|
||||
$(CC) $(CFLAGS) -g -DBIN_PATH=\"foo\" -I../../../include -I. -c ../../../src/afl-fuzz-queue.c
|
||||
$(CC) $(CFLAGS) -g -DBIN_PATH=\"foo\" -I../../../include -I. -c ../../../src/afl-common.c
|
||||
$(CXX) $(CFLAGS) -g -DBIN_PATH=\"foo\" -I../../../include -I. -o autotokens-standalone *.o
|
||||
@rm -f ../../../src/afl-common.o ../../../src/afl-fuzz-queue.o ../../../src/afl-fuzz-extras.o ../../../src/afl-performance.o
|
||||
|
||||
clean:
|
||||
rm -f *.o *~ autotokens-standalone core
|
||||
|
@ -1,15 +1,28 @@
|
||||
#include "afl-fuzz.h"
|
||||
#include "afl-mutations.h"
|
||||
#include "forkserver.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <getopt.h>
|
||||
|
||||
static int max_havoc = 16, verbose;
|
||||
static unsigned char *dict, *mh = "16";
|
||||
static char _mh[4] = "16";
|
||||
static char *dict, *mh = _mh;
|
||||
|
||||
extern int module_disabled;
|
||||
|
||||
void *afl_custom_init(afl_state_t *, unsigned int);
|
||||
u8 afl_custom_queue_get(void *data, const u8 *filename);
|
||||
size_t afl_custom_fuzz(void *data, u8 *buf, size_t buf_size, u8 **out_buf,
|
||||
u8 *add_buf, size_t add_buf_size, size_t max_size);
|
||||
|
||||
u32 write_to_testcase(afl_state_t *afl, void **mem, u32 a, u32 b) {
|
||||
return 0;
|
||||
}
|
||||
fsrv_run_result_t fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv,
|
||||
u32 i) {
|
||||
return FSRV_RUN_OK;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
|
||||
@ -144,7 +157,7 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
if (dict) {
|
||||
|
||||
load_extras(afl, dict);
|
||||
load_extras(afl, (u8*)dict);
|
||||
if (verbose)
|
||||
fprintf(stderr, "Loaded dictionary: %s (%u entries)\n", dict,
|
||||
afl->extras_cnt);
|
||||
|
@ -3,6 +3,23 @@
|
||||
This is the list of all noteworthy changes made in every public
|
||||
release of the tool. See README.md for the general instruction manual.
|
||||
|
||||
|
||||
### Version ++4.33a (dev)
|
||||
- afl-fuzz:
|
||||
- Use `AFL_PRELOAD_DISCRIMINATE_FORKSERVER_PARENT` if you use AFL_PRELOAD
|
||||
to disable fork, see docs (thanks to @alexandredoyen29)
|
||||
- Fix for FAST power schedules (introduced in 4.32c) (thanks to @kcwu)
|
||||
- Colors for NO_UI output (thanks to @smoelius)
|
||||
- more 64 bit archicture support by @maribu
|
||||
- afl-cc:
|
||||
- Fix to make AFL_SAN_NO_INST work with gcc_plugin
|
||||
- qemuafl:
|
||||
- better MIPS persistent mode support
|
||||
- afl-cmin:
|
||||
- new afl-cmin.py which is much faster, will be executed by default via
|
||||
afl-cmin if it executes successfully (thanks to @kcwu!)
|
||||
|
||||
|
||||
### Version ++4.32c (release)
|
||||
- Fixed a bug where after a fast restart of a full fuzzed corpus afl-fuzz
|
||||
terminates with "need at least one valid input seed that does not crash"
|
||||
@ -22,7 +39,6 @@
|
||||
- frida_mode:
|
||||
- fixes for new MacOS + M4 hardware
|
||||
|
||||
|
||||
### Version ++4.31c (release)
|
||||
- SAND mode added (docs/SAND.md) for more effecient fuzzing with sanitizers
|
||||
(thanks to @wtdcode !)
|
||||
|
12
docs/FAQ.md
12
docs/FAQ.md
@ -11,7 +11,7 @@ If you find an interesting or important question missing, submit it via
|
||||
AFL++ is a superior fork to Google's AFL - more speed, more and better
|
||||
mutations, more and better instrumentation, custom module support, etc.
|
||||
|
||||
American Fuzzy Lop (AFL) was developed by Michał "lcamtuf" Zalewski starting
|
||||
American Fuzzy Lop (AFL) was developed by Michal "lcamtuf" Zalewski starting
|
||||
in 2013/2014, and when he left Google end of 2017 he stopped developing it.
|
||||
|
||||
At the end of 2019, the Google fuzzing team took over maintenance of AFL,
|
||||
@ -284,14 +284,14 @@ If you find an interesting or important question missing, submit it via
|
||||
afl-cc/afl-clang-fast/afl-clang-lto:
|
||||
|
||||
```
|
||||
/prg/tmp/llvm-project/build/bin/clang-13: symbol lookup error: /usr/local/bin/../lib/afl//cmplog-instructions-pass.so: undefined symbol: _ZNK4llvm8TypeSizecvmEv
|
||||
clang-13: error: unable to execute command: No such file or directory
|
||||
clang-13: error: clang frontend command failed due to signal (use -v to see invocation)
|
||||
clang version 13.0.0 (https://github.com/llvm/llvm-project 1d7cf550721c51030144f3cd295c5789d51c4aad)
|
||||
/prg/tmp/llvm-project/build/bin/clang-18: symbol lookup error: /usr/local/bin/../lib/afl//cmplog-instructions-pass.so: undefined symbol: _ZNK4llvm8TypeSizecvmEv
|
||||
clang-18: error: unable to execute command: No such file or directory
|
||||
clang-18: error: clang frontend command failed due to signal (use -v to see invocation)
|
||||
clang version 18.0.0 (https://github.com/llvm/llvm-project 1d7cf550721c51030144f3cd295c5789d51c4aad)
|
||||
Target: x86_64-unknown-linux-gnu
|
||||
Thread model: posix
|
||||
InstalledDir: /prg/tmp/llvm-project/build/bin
|
||||
clang-13: note: diagnostic msg:
|
||||
clang-18: note: diagnostic msg:
|
||||
********************
|
||||
```
|
||||
|
||||
|
@ -21,7 +21,7 @@ If you want to build AFL++ yourself, you have many options. The easiest choice
|
||||
is to build and install everything:
|
||||
|
||||
NOTE: depending on your Debian/Ubuntu/Kali/... release, replace `-14` with
|
||||
whatever llvm version is available. We recommend llvm 13 or newer.
|
||||
whatever llvm version is available. We recommend llvm 14 or newer.
|
||||
|
||||
```shell
|
||||
sudo apt-get update
|
||||
|
@ -21,7 +21,7 @@ For a normal fuzzing workflow, we have:
|
||||
For SAND fuzzing workflow, this is slightly different:
|
||||
|
||||
1. Build target project _without_ any sanitizers to get `target_native`, which we will define as a "native binary". It is usually done by using `afl-clang-fast/lto(++)` to compile your project _without_ `AFL_USE_ASAN/UBSAN/MSAN`.
|
||||
2. Build target project with AFL_USE_ASAN=1 AFL_SAN_NO_INST=1 to get `target_asan`. Do note this step can be repeated for multiple sanitizers, like MSAN, UBSAN etc. It is also possible to have ASAN and UBSAN to build together.
|
||||
2. Build target project with AFL_USE_ASAN=1 AFL_LLVM_ONLY_FSRV=1 to get `target_asan`. Do note this step can be repeated for multiple sanitizers, like MSAN, UBSAN etc. It is also possible to have ASAN and UBSAN to build together.
|
||||
3. Fuzz the target with `afl-fuzz -i seeds -o out -w ./target_asan -- ./target_native`. Note `-w` can be specified multiple times.
|
||||
|
||||
Then you get:
|
||||
@ -44,11 +44,11 @@ Just like the normal building process, except using `afl-clang-fast`
|
||||
2. Build the sanitizers-enabled binaries.
|
||||
|
||||
```bash
|
||||
AFL_SAN_NO_INST=1 AFL_USE_UBSAN=1 AFL_USE_ASAN=1 afl-clang-fast test-instr.c -o ./asanubsan
|
||||
AFL_SAN_NO_INST=1 AFL_USE_MSAN=1 afl-clang-fast test-instr.c -o ./msan
|
||||
AFL_LLVM_ONLY_FSRV=1 AFL_USE_UBSAN=1 AFL_USE_ASAN=1 afl-clang-fast test-instr.c -o ./asanubsan
|
||||
AFL_LLVM_ONLY_FSRV=1 AFL_USE_MSAN=1 afl-clang-fast test-instr.c -o ./msan
|
||||
```
|
||||
|
||||
Do note `AFL_SAN_NO_INST=1` is crucial, this enables forkservers but disables pc instrumentation. Do not reuse sanitizers-enabled binaries built _without_ `AFL_SAN_NO_INST=1`. This will mess up SAND execution pattern.
|
||||
Do note `AFL_LLVM_ONLY_FSRV=1` is crucial, this enables forkservers but disables pc instrumentation. You are allowed to reuse sanitizers-enabled binaries, i.e. binaries built _without_ `AFL_LLVM_ONLY_FSRV=1`, at a cost of reduced speed.
|
||||
|
||||
3. Start fuzzing
|
||||
|
||||
|
@ -111,6 +111,10 @@ fairly broad use of environment variables instead:
|
||||
|
||||
- Note: both `AFL_CFISAN_VERBOSE=1` and `AFL_UBSAN_VERBOSE=1` are disabled by default as verbose output can significantly slow down fuzzing performance. Use these options only during debugging or when additional crash diagnostics are required
|
||||
|
||||
- `AFL_LLVM_ONLY_FSRV`/`AFL_GCC_ONLY_FSRV` will inject forkserver but not pc instrumentation. Please note this is different compared to `AFL_LLVM_DISABLE_INSTRUMENTATION`, which will totally disable forkserver implementation. This env is pretty useful in two cases:
|
||||
- [SAND](./SAND.md). In this case, the binaries built in this way will serve as extra oracles. Check the corresponding documents for details.
|
||||
- Compatible with LibAFL ForkserverExecutor implementation and thus faster to repeatedly run, compared to simple CommandExecutor.
|
||||
|
||||
- `TMPDIR` is used by afl-as for temporary files; if this variable is not set,
|
||||
the tool defaults to /tmp.
|
||||
|
||||
@ -664,6 +668,24 @@ checks or alter some of the more exotic semantics of the tool:
|
||||
Note that will not be exact and with slow targets it can take seconds
|
||||
until there is a slice for the time test.
|
||||
|
||||
- When using `AFL_PRELOAD` with a preload that disable `fork()` calls in
|
||||
the target, the forkserver becomes unable to fork.
|
||||
To overcome this issue, the `AFL_PRELOAD_DISCRIMINATE_FORKSERVER_PARENT`
|
||||
permits to be able to check in the preloaded library if the environment
|
||||
variable `AFL_FORKSERVER_PARENT` is set, to be able to use vanilla
|
||||
`fork()` in the forkserver, and the placeholder in the target.
|
||||
Here is a POC :
|
||||
```C
|
||||
// AFL_PRELOAD_DISCRIMINATE_FORKSERVER_PARENT=1 afl-fuzz ...
|
||||
pid_t fork(void)
|
||||
{
|
||||
if (getenv("AFL_FORKSERVER_PARENT") == NULL)
|
||||
return 0; // We are in the target
|
||||
else
|
||||
return real_fork(); // We are in the forkserver
|
||||
}
|
||||
```
|
||||
|
||||
## 6) Settings for afl-qemu-trace
|
||||
|
||||
The QEMU wrapper used to instrument binary-only code supports several settings:
|
||||
|
@ -45,7 +45,7 @@ E. CmpLog is our enhanced
|
||||
implementation, see
|
||||
[instrumentation/README.cmplog.md](../instrumentation/README.cmplog.md)
|
||||
|
||||
F. Similar and compatible to clang 13+ sancov sanitize-coverage-allow/deny but
|
||||
F. Similar and compatible to clang 14+ sancov sanitize-coverage-allow/deny but
|
||||
for all llvm versions and all our compile modes, only instrument what should
|
||||
be instrumented, for more speed, directed fuzzing and less instability; see
|
||||
[instrumentation/README.instrument_list.md](../instrumentation/README.instrument_list.md)
|
||||
|
@ -132,11 +132,15 @@ options are available:
|
||||
locations. This technique is very fast and good - if the target does not
|
||||
transform input data before comparison. Therefore, this technique is called
|
||||
`input to state` or `redqueen`. If you want to use this technique, then you
|
||||
have to compile the target twice, once specifically with/for this mode by
|
||||
setting `AFL_LLVM_CMPLOG=1`, and pass this binary to afl-fuzz via the `-c`
|
||||
parameter. Note that you can compile also just a cmplog binary and use that
|
||||
for both, however, there will be a performance penalty. You can read more
|
||||
about this in
|
||||
have to compile the target with `AFL_LLVM_CMPLOG=1`.
|
||||
You could use the resulting binary for both normal fuzzing and `-c` CMPLOG
|
||||
mode (with `-c 0`), however this will result in a performance loss of about
|
||||
20%.
|
||||
It is therefore better to compile a specific CMPLOG target with
|
||||
`AFL_LLVM_ONLY_FSRV=1 AFL_LLVM_CMPLOG=1` and pass this binary name via
|
||||
`-c cmplog-fuzzing-target` and compile target again normally with `afl-cc`
|
||||
and use this is the fuzzing target as usual.
|
||||
You can read more about this in
|
||||
[instrumentation/README.cmplog.md](../instrumentation/README.cmplog.md).
|
||||
|
||||
If you use LTO, LLVM, or GCC_PLUGIN mode
|
||||
|
@ -770,6 +770,7 @@ typedef struct afl_state {
|
||||
#define FOREIGN_SYNCS_MAX 32U
|
||||
u8 foreign_sync_cnt;
|
||||
struct foreign_sync foreign_syncs[FOREIGN_SYNCS_MAX];
|
||||
char *foreign_file;
|
||||
|
||||
#ifdef _AFL_DOCUMENT_MUTATIONS
|
||||
u8 do_document;
|
||||
@ -1180,7 +1181,7 @@ u8 havoc_mutation_probability_py(void *);
|
||||
u8 queue_get_py(void *, const u8 *);
|
||||
const char *introspection_py(void *);
|
||||
u8 queue_new_entry_py(void *, const u8 *, const u8 *);
|
||||
void splice_optout(void *);
|
||||
void splice_optout_py(void *);
|
||||
void deinit_py(void *);
|
||||
|
||||
#endif
|
||||
@ -1278,7 +1279,6 @@ u8 fuzz_one(afl_state_t *);
|
||||
#ifdef HAVE_AFFINITY
|
||||
void bind_to_free_cpu(afl_state_t *);
|
||||
#endif
|
||||
void setup_post(afl_state_t *);
|
||||
void read_testcases(afl_state_t *, u8 *);
|
||||
void perform_dry_run(afl_state_t *);
|
||||
void pivot_inputs(afl_state_t *);
|
||||
|
@ -26,7 +26,7 @@
|
||||
/* Version string: */
|
||||
|
||||
// c = release, a = volatile github dev, e = experimental branch
|
||||
#define VERSION "++4.32c"
|
||||
#define VERSION "++4.33a"
|
||||
|
||||
/******************************************************
|
||||
* *
|
||||
@ -172,7 +172,8 @@
|
||||
|
||||
/* 64bit arch MACRO */
|
||||
#if (defined(__x86_64__) || defined(__arm64__) || defined(__aarch64__) || \
|
||||
(defined(__riscv) && __riscv_xlen == 64))
|
||||
(defined(__riscv) && __riscv_xlen == 64) || defined(__powerpc64le__) || \
|
||||
defined(__s390x__) || defined(__loongarch64))
|
||||
#define WORD_SIZE_64 1
|
||||
#endif
|
||||
|
||||
|
@ -10,6 +10,7 @@ static char *afl_environment_deprecated[] = {
|
||||
"AFL_DEFER_FORKSRV",
|
||||
"AFL_POST_LIBRARY",
|
||||
"AFL_PERSISTENT",
|
||||
"AFL_SAN_NO_INST",
|
||||
NULL
|
||||
|
||||
};
|
||||
@ -118,7 +119,8 @@ static char *afl_environment_variables[] = {
|
||||
"AFL_CFISAN_VERBOSE", "AFL_USE_LSAN", "AFL_WINE_PATH", "AFL_NO_SNAPSHOT",
|
||||
"AFL_EXPAND_HAVOC_NOW", "AFL_USE_FASAN", "AFL_USE_QASAN",
|
||||
"AFL_PRINT_FILENAMES", "AFL_PIZZA_MODE", "AFL_NO_FASTRESUME",
|
||||
"AFL_SAN_ABSTRACTION", "AFL_SAN_NO_INST", "AFL_SAN_RECOVER", NULL};
|
||||
"AFL_SAN_ABSTRACTION", "AFL_LLVM_ONLY_FSRV", "AFL_GCC_ONLY_FRSV",
|
||||
"AFL_SAN_RECOVER", "AFL_PRELOAD_DISCRIMINATE_FORKSERVER_PARENT", NULL};
|
||||
|
||||
extern char *afl_environment_variables[];
|
||||
|
||||
|
@ -159,6 +159,8 @@ typedef struct afl_forkserver {
|
||||
|
||||
u8 uses_asan; /* Target uses ASAN/LSAN/MSAN? (bit 0/1/2 respectively) */
|
||||
|
||||
bool setenv; /* setenv() to discriminate the forkserver? */
|
||||
|
||||
bool debug; /* debug mode? */
|
||||
|
||||
u8 san_but_not_instrumented; /* Is it sanitizer enabled but not instrumented?
|
||||
|
@ -2,18 +2,6 @@
|
||||
american fuzzy lop++ - hashing function
|
||||
---------------------------------------
|
||||
|
||||
The hash32() function is a variant of MurmurHash3, a good
|
||||
non-cryptosafe hashing function developed by Austin Appleby.
|
||||
|
||||
For simplicity, this variant does *NOT* accept buffer lengths
|
||||
that are not divisible by 8 bytes. The 32-bit version is otherwise
|
||||
similar to the original; the 64-bit one is a custom hack with
|
||||
mostly-unproven properties.
|
||||
|
||||
Austin's original code is public domain.
|
||||
|
||||
Other code written by Michal Zalewski
|
||||
|
||||
Copyright 2016 Google Inc. All rights reserved.
|
||||
Copyright 2019-2024 AFLplusplus Project. All rights reserved.
|
||||
|
||||
@ -33,82 +21,5 @@
|
||||
u32 hash32(u8 *key, u32 len, u32 seed);
|
||||
u64 hash64(u8 *key, u32 len, u64 seed);
|
||||
|
||||
#if 0
|
||||
|
||||
The following code is disabled because xxh3 is 30% faster
|
||||
|
||||
#ifdef __x86_64__
|
||||
|
||||
#define ROL64(_x, _r) ((((u64)(_x)) << (_r)) | (((u64)(_x)) >> (64 - (_r))))
|
||||
|
||||
static inline u32 hash32(u8 *key, u32 len, u32 seed) {
|
||||
|
||||
const u64 *data = (u64 *)key;
|
||||
u64 h1 = seed ^ len;
|
||||
|
||||
len >>= 3;
|
||||
|
||||
while (len--) {
|
||||
|
||||
u64 k1 = *data++;
|
||||
|
||||
k1 *= 0x87c37b91114253d5ULL;
|
||||
k1 = ROL64(k1, 31);
|
||||
k1 *= 0x4cf5ad432745937fULL;
|
||||
|
||||
h1 ^= k1;
|
||||
h1 = ROL64(h1, 27);
|
||||
h1 = h1 * 5 + 0x52dce729;
|
||||
|
||||
}
|
||||
|
||||
h1 ^= h1 >> 33;
|
||||
h1 *= 0xff51afd7ed558ccdULL;
|
||||
h1 ^= h1 >> 33;
|
||||
h1 *= 0xc4ceb9fe1a85ec53ULL;
|
||||
h1 ^= h1 >> 33;
|
||||
|
||||
return h1;
|
||||
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define ROL32(_x, _r) ((((u32)(_x)) << (_r)) | (((u32)(_x)) >> (32 - (_r))))
|
||||
|
||||
static inline u32 hash32(const void *key, u32 len, u32 seed) {
|
||||
|
||||
const u32 *data = (u32 *)key;
|
||||
u32 h1 = seed ^ len;
|
||||
|
||||
len >>= 2;
|
||||
|
||||
while (len--) {
|
||||
|
||||
u32 k1 = *data++;
|
||||
|
||||
k1 *= 0xcc9e2d51;
|
||||
k1 = ROL32(k1, 15);
|
||||
k1 *= 0x1b873593;
|
||||
|
||||
h1 ^= k1;
|
||||
h1 = ROL32(h1, 13);
|
||||
h1 = h1 * 5 + 0xe6546b64;
|
||||
|
||||
}
|
||||
|
||||
h1 ^= h1 >> 16;
|
||||
h1 *= 0x85ebca6b;
|
||||
h1 ^= h1 >> 13;
|
||||
h1 *= 0xc2b2ae35;
|
||||
h1 ^= h1 >> 16;
|
||||
|
||||
return h1;
|
||||
|
||||
}
|
||||
|
||||
#endif /* ^__x86_64__ */
|
||||
#endif
|
||||
|
||||
#endif /* !_HAVE_HASH_H */
|
||||
|
||||
|
@ -327,7 +327,7 @@ class ModuleSanitizerCoverageLTOLegacyPass : public ModulePass {
|
||||
|
||||
};
|
||||
|
||||
if (!getenv("AFL_SAN_NO_INST")) {
|
||||
if (!getenv("AFL_LLVM_ONLY_FSRV")) {
|
||||
|
||||
return ModuleSancov.instrumentModule(M, DTCallback, PDTCallback);
|
||||
|
||||
@ -389,7 +389,7 @@ PreservedAnalyses ModuleSanitizerCoverageLTO::run(Module &M,
|
||||
|
||||
};
|
||||
|
||||
if (!getenv("AFL_SAN_NO_INST")) {
|
||||
if (!getenv("AFL_LLVM_ONLY_FSRV")) {
|
||||
|
||||
if (ModuleSancov.instrumentModule(M, DTCallback, PDTCallback))
|
||||
return PreservedAnalyses::none();
|
||||
|
@ -272,7 +272,7 @@ PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M,
|
||||
// TODO: Support LTO or llvm classic?
|
||||
// Note we still need afl-compiler-rt so we just disable the instrumentation
|
||||
// here.
|
||||
if (!getenv("AFL_SAN_NO_INST")) {
|
||||
if (!getenv("AFL_LLVM_ONLY_FSRV")) {
|
||||
|
||||
if (ModuleSancov.instrumentModule(M, DTCallback, PDTCallback))
|
||||
return PreservedAnalyses::none();
|
||||
|
@ -119,6 +119,8 @@ u64 __afl_map_addr;
|
||||
u32 __afl_first_final_loc;
|
||||
u32 __afl_old_forkserver;
|
||||
|
||||
u8 __afl_forkserver_setenv = 0;
|
||||
|
||||
#ifdef __AFL_CODE_COVERAGE
|
||||
typedef struct afl_module_info_t afl_module_info_t;
|
||||
|
||||
@ -888,6 +890,12 @@ static void __afl_start_forkserver(void) {
|
||||
|
||||
}
|
||||
|
||||
if (getenv("AFL_PRELOAD_DISCRIMINATE_FORKSERVER_PARENT") != NULL) {
|
||||
|
||||
__afl_forkserver_setenv = 1;
|
||||
|
||||
}
|
||||
|
||||
/* Phone home and tell the parent that we're OK. If parent isn't there,
|
||||
assume we're not running in forkserver mode and just execute program. */
|
||||
|
||||
@ -1054,6 +1062,13 @@ static void __afl_start_forkserver(void) {
|
||||
|
||||
close(FORKSRV_FD);
|
||||
close(FORKSRV_FD + 1);
|
||||
|
||||
if (unlikely(__afl_forkserver_setenv)) {
|
||||
|
||||
unsetenv("AFL_FORKSERVER_PARENT");
|
||||
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
}
|
||||
@ -1226,6 +1241,15 @@ void __afl_manual_init(void) {
|
||||
|
||||
}
|
||||
|
||||
if (getenv("AFL_LLVM_ONLY_FSRV") || getenv("AFL_GCC_ONLY_FRSV")) {
|
||||
|
||||
fprintf(stderr,
|
||||
"DEBUG: Overwrite area_ptr to dummy due to "
|
||||
"AFL_LLVM_ONLY_FSRV/AFL_GCC_ONLY_FRSV\n");
|
||||
__afl_area_ptr = __afl_area_ptr_dummy;
|
||||
|
||||
}
|
||||
|
||||
if (!init_done) {
|
||||
|
||||
__afl_start_forkserver();
|
||||
|
@ -462,6 +462,7 @@ static struct plugin_info afl_plugin = {
|
||||
.help = G_("AFL gcc plugin\n\
|
||||
\n\
|
||||
Set AFL_QUIET in the environment to silence it.\n\
|
||||
Set AFL_GCC_ONLY_FRSV in the environment to disable instrumentation.\n\
|
||||
\n\
|
||||
Set AFL_INST_RATIO in the environment to a number from 0 to 100\n\
|
||||
to control how likely a block will be chosen for instrumentation.\n\
|
||||
@ -502,9 +503,10 @@ int plugin_init(struct plugin_name_args *info,
|
||||
case it was specified in the command line's -frandom-seed for
|
||||
reproducible instrumentation. */
|
||||
srandom(get_random_seed(false));
|
||||
bool fsrv_only = !!getenv("AFL_GCC_ONLY_FRSV");
|
||||
|
||||
const char *name = info->base_name;
|
||||
register_callback(name, PLUGIN_INFO, NULL, &afl_plugin);
|
||||
if (!fsrv_only) { register_callback(name, PLUGIN_INFO, NULL, &afl_plugin); }
|
||||
|
||||
afl_pass *aflp = new afl_pass(quiet, inst_ratio);
|
||||
struct register_pass_info pass_info = {
|
||||
@ -516,14 +518,20 @@ int plugin_init(struct plugin_name_args *info,
|
||||
|
||||
};
|
||||
|
||||
if (!fsrv_only) {
|
||||
|
||||
register_callback(name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pass_info);
|
||||
register_callback(name, PLUGIN_FINISH, afl_pass::plugin_finalize,
|
||||
pass_info.pass);
|
||||
|
||||
}
|
||||
|
||||
if (!quiet)
|
||||
ACTF(G_("%s instrumentation at ratio of %u%% in %s mode."),
|
||||
aflp->out_of_line ? G_("Call-based") : G_("Inline"), inst_ratio,
|
||||
getenv("AFL_HARDEN") ? G_("hardened") : G_("non-hardened"));
|
||||
else if (fsrv_only)
|
||||
ACTF("Instrumentation disabled due to AFL_GCC_ONLY_FRSV");
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -225,7 +225,7 @@ bool AFLCoverage::runOnModule(Module &M) {
|
||||
if (getenv("AFL_DEBUG")) debug = 1;
|
||||
|
||||
#if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */
|
||||
if (getenv("AFL_SAN_NO_INST")) {
|
||||
if (getenv("AFL_LLVM_ONLY_FSRV")) {
|
||||
|
||||
if (debug) { fprintf(stderr, "Instrument disabled\n"); }
|
||||
return PreservedAnalyses::all();
|
||||
@ -233,7 +233,7 @@ bool AFLCoverage::runOnModule(Module &M) {
|
||||
}
|
||||
|
||||
#else
|
||||
if (getenv("AFL_SAN_NO_INST")) {
|
||||
if (getenv("AFL_LLVM_ONLY_FSRV")) {
|
||||
|
||||
if (debug) { fprintf(stderr, "Instrument disabled\n"); }
|
||||
return true;
|
||||
|
@ -1 +1 @@
|
||||
ef1cd9a8cb
|
||||
c43dd6e036
|
||||
|
Submodule qemu_mode/qemuafl updated: ef1cd9a8cb...c43dd6e036
13
src/afl-cc.c
13
src/afl-cc.c
@ -244,7 +244,7 @@ static inline void insert_object(aflcc_state_t *aflcc, u8 *obj, u8 *fmt,
|
||||
/* Insert params into the new argv, make clang load the pass. */
|
||||
static inline void load_llvm_pass(aflcc_state_t *aflcc, u8 *pass) {
|
||||
|
||||
if (getenv("AFL_SAN_NO_INST")) {
|
||||
if (getenv("AFL_LLVM_ONLY_FSRV")) {
|
||||
|
||||
if (!be_quiet) { DEBUGF("SAND: Coverage instrumentation disabled\n"); }
|
||||
return;
|
||||
@ -2097,7 +2097,7 @@ void add_native_pcguard(aflcc_state_t *aflcc) {
|
||||
* anyway.
|
||||
*/
|
||||
if (aflcc->have_rust_asanrt) { return; }
|
||||
if (getenv("AFL_SAN_NO_INST")) {
|
||||
if (getenv("AFL_LLVM_ONLY_FSRV")) {
|
||||
|
||||
if (!be_quiet) { DEBUGF("SAND: Coverage instrumentation disabled\n"); }
|
||||
return;
|
||||
@ -2138,7 +2138,7 @@ void add_native_pcguard(aflcc_state_t *aflcc) {
|
||||
*/
|
||||
void add_optimized_pcguard(aflcc_state_t *aflcc) {
|
||||
|
||||
if (getenv("AFL_SAN_NO_INST")) {
|
||||
if (getenv("AFL_LLVM_ONLY_FSRV")) {
|
||||
|
||||
if (!be_quiet) { DEBUGF("SAND: Coverage instrumentation disabled\n"); }
|
||||
return;
|
||||
@ -2600,6 +2600,13 @@ void add_assembler(aflcc_state_t *aflcc) {
|
||||
/* Add params to launch the gcc plugins for instrumentation. */
|
||||
void add_gcc_plugin(aflcc_state_t *aflcc) {
|
||||
|
||||
if (getenv("AFL_GCC_ONLY_FSRV")) {
|
||||
|
||||
if (!be_quiet) { DEBUGF("SAND: Coverage instrumentation disabled\n"); }
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
if (aflcc->cmplog_mode) {
|
||||
|
||||
insert_object(aflcc, "afl-gcc-cmplog-pass.so", "-fplugin=%s", 0);
|
||||
|
@ -179,7 +179,7 @@ u32 check_binary_signatures(u8 *fn) {
|
||||
if (f_data == MAP_FAILED) { PFATAL("Unable to mmap file '%s'", fn); }
|
||||
close(fd);
|
||||
|
||||
if (afl_memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) {
|
||||
if (afl_memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG))) {
|
||||
|
||||
if (!be_quiet) { OKF(cPIN "Persistent mode binary detected."); }
|
||||
setenv(PERSIST_ENV_VAR, "1", 1);
|
||||
@ -204,7 +204,7 @@ u32 check_binary_signatures(u8 *fn) {
|
||||
|
||||
}
|
||||
|
||||
if (afl_memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) {
|
||||
if (afl_memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG))) {
|
||||
|
||||
if (!be_quiet) { OKF(cPIN "Deferred forkserver binary detected."); }
|
||||
setenv(DEFER_ENV_VAR, "1", 1);
|
||||
@ -819,8 +819,22 @@ void check_environment_vars(char **envp) {
|
||||
|
||||
WARNF("AFL environment variable %s is deprecated!",
|
||||
afl_environment_deprecated[i]);
|
||||
|
||||
if (strncmp(afl_environment_deprecated[i], "AFL_SAN_NO_INST",
|
||||
strlen(afl_environment_deprecated[i])) == 0) {
|
||||
|
||||
WARNF(
|
||||
"AFL_LLVM_ONLY_FSRV/AFL_GCC_ONLY_FSRV is induced and set "
|
||||
"instead.");
|
||||
setenv("AFL_GCC_ONLY_FSRV", "1", 0);
|
||||
setenv("AFL_LLVM_ONLY_FSRV", "1", 0);
|
||||
|
||||
} else {
|
||||
|
||||
issue_detected = 1;
|
||||
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
i++;
|
||||
|
@ -140,7 +140,7 @@ nyx_plugin_handler_t *afl_load_libnyx_plugin(u8 *libnyx_binary) {
|
||||
if (plugin->nyx_get_target_hash64 == NULL) { goto fail; }
|
||||
|
||||
plugin->nyx_config_free = dlsym(handle, "nyx_config_free");
|
||||
if (plugin->nyx_get_target_hash64 == NULL) { goto fail; }
|
||||
if (plugin->nyx_config_free == NULL) { goto fail; }
|
||||
|
||||
OKF("libnyx plugin is ready!");
|
||||
return plugin;
|
||||
@ -250,6 +250,16 @@ void afl_fsrv_init(afl_forkserver_t *fsrv) {
|
||||
fsrv->child_kill_signal = SIGKILL;
|
||||
fsrv->max_length = MAX_FILE;
|
||||
|
||||
if (getenv("AFL_PRELOAD_DISCRIMINATE_FORKSERVER_PARENT") != NULL) {
|
||||
|
||||
fsrv->setenv = 1;
|
||||
|
||||
} else {
|
||||
|
||||
fsrv->setenv = 0;
|
||||
|
||||
}
|
||||
|
||||
/* exec related stuff */
|
||||
fsrv->child_pid = -1;
|
||||
fsrv->map_size = get_map_size();
|
||||
@ -878,6 +888,8 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
|
||||
|
||||
/* CHILD PROCESS */
|
||||
|
||||
if (unlikely(fsrv->setenv)) { setenv("AFL_FORKSERVER_PARENT", "1", 0); }
|
||||
|
||||
// enable terminating on sigpipe in the children
|
||||
struct sigaction sa;
|
||||
memset((char *)&sa, 0, sizeof(sa));
|
||||
|
@ -317,8 +317,16 @@ u8 *describe_op(afl_state_t *afl, u8 new_bits, size_t max_description_len) {
|
||||
|
||||
if (unlikely(afl->syncing_party)) {
|
||||
|
||||
if (unlikely(afl->foreign_file)) {
|
||||
|
||||
sprintf(ret, "sync:%s,src:%.20s", afl->syncing_party, afl->foreign_file);
|
||||
|
||||
} else {
|
||||
|
||||
sprintf(ret, "sync:%s,src:%06u", afl->syncing_party, afl->syncing_case);
|
||||
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
sprintf(ret, "src:%06u", afl->current_entry);
|
||||
@ -469,8 +477,6 @@ void write_crash_readme(afl_state_t *afl) {
|
||||
u8 __attribute__((hot)) save_if_interesting(afl_state_t *afl, void *mem,
|
||||
u32 len, u8 fault) {
|
||||
|
||||
u8 classified = 0;
|
||||
|
||||
if (unlikely(len == 0)) { return 0; }
|
||||
|
||||
if (unlikely(fault == FSRV_RUN_TMOUT && afl->afl_env.afl_ignore_timeouts)) {
|
||||
@ -479,7 +485,6 @@ u8 __attribute__((hot)) save_if_interesting(afl_state_t *afl, void *mem,
|
||||
|
||||
classify_counts(&afl->fsrv);
|
||||
u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
|
||||
classified = 1;
|
||||
|
||||
// Saturated increment
|
||||
if (likely(afl->n_fuzz[cksum % N_FUZZ_SIZE] < 0xFFFFFFFF))
|
||||
@ -494,12 +499,11 @@ u8 __attribute__((hot)) save_if_interesting(afl_state_t *afl, void *mem,
|
||||
u8 fn[PATH_MAX];
|
||||
u8 *queue_fn = "";
|
||||
u8 new_bits = 0, keeping = 0, res, is_timeout = 0, need_hash = 1;
|
||||
u8 classified = 0;
|
||||
u8 san_fault = 0, san_idx = 0, feed_san = 0;
|
||||
s32 fd;
|
||||
u64 cksum = 0;
|
||||
u32 cksum_simplified = 0, cksum_unique = 0;
|
||||
u8 san_fault = 0;
|
||||
u8 san_idx = 0;
|
||||
u8 feed_san = 0;
|
||||
|
||||
afl->san_case_status = 0;
|
||||
|
||||
@ -510,6 +514,7 @@ u8 __attribute__((hot)) save_if_interesting(afl_state_t *afl, void *mem,
|
||||
if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE)) {
|
||||
|
||||
classify_counts(&afl->fsrv);
|
||||
classified = 1;
|
||||
need_hash = 0;
|
||||
|
||||
cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
|
||||
@ -546,8 +551,17 @@ u8 __attribute__((hot)) save_if_interesting(afl_state_t *afl, void *mem,
|
||||
if (unlikely(afl->san_binary_length) &&
|
||||
unlikely(afl->san_abstraction == COVERAGE_INCREASE)) {
|
||||
|
||||
/* Check if the input increase the coverage */
|
||||
if (classified) {
|
||||
|
||||
/* We could have classified the bits in SAND with COVERAGE_INCREASE */
|
||||
new_bits = has_new_bits(afl, afl->virgin_bits);
|
||||
|
||||
} else {
|
||||
|
||||
new_bits = has_new_bits_unclassified(afl, afl->virgin_bits);
|
||||
classified = 1;
|
||||
|
||||
}
|
||||
|
||||
if (unlikely(new_bits)) { feed_san = 1; }
|
||||
|
||||
@ -636,6 +650,7 @@ u8 __attribute__((hot)) save_if_interesting(afl_state_t *afl, void *mem,
|
||||
} else {
|
||||
|
||||
new_bits = has_new_bits_unclassified(afl, afl->virgin_bits);
|
||||
classified = 1;
|
||||
|
||||
}
|
||||
|
||||
@ -859,7 +874,12 @@ may_save_fault:
|
||||
}
|
||||
|
||||
new_fault = fuzz_run_target(afl, &afl->fsrv, afl->hang_tmout);
|
||||
if (!classified) {
|
||||
|
||||
classify_counts(&afl->fsrv);
|
||||
classified = 1;
|
||||
|
||||
}
|
||||
|
||||
/* A corner case that one user reported bumping into: increasing the
|
||||
timeout actually uncovers a crash. Make sure we don't discard it if
|
||||
|
@ -589,8 +589,6 @@ void read_foreign_testcases(afl_state_t *afl, int first) {
|
||||
u8 *fn2 =
|
||||
alloc_printf("%s/%s", afl->foreign_syncs[iter].dir, nl[i]->d_name);
|
||||
|
||||
free(nl[i]); /* not tracked */
|
||||
|
||||
if (unlikely(lstat(fn2, &st) || access(fn2, R_OK))) {
|
||||
|
||||
if (first) PFATAL("Unable to access '%s'", fn2);
|
||||
@ -653,32 +651,38 @@ void read_foreign_testcases(afl_state_t *afl, int first) {
|
||||
u32 len = write_to_testcase(afl, (void **)&mem, st.st_size, 1);
|
||||
fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
|
||||
afl->syncing_party = foreign_name;
|
||||
afl->foreign_file = nl[i]->d_name;
|
||||
afl->queued_imported += save_if_interesting(afl, mem, len, fault);
|
||||
afl->syncing_party = 0;
|
||||
|
||||
munmap(mem, st.st_size);
|
||||
close(fd);
|
||||
|
||||
if (st.st_mtime > mtime_max) {
|
||||
|
||||
mtime_max = st.st_mtime;
|
||||
if (st.st_mtime > mtime_max) { mtime_max = st.st_mtime; }
|
||||
show_stats(afl);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (mtime_max > afl->foreign_syncs[iter].mtime) {
|
||||
|
||||
afl->foreign_syncs[iter].mtime = mtime_max;
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < (u32)nl_cnt; ++i) {
|
||||
|
||||
free(nl[i]); /* not tracked */
|
||||
|
||||
}
|
||||
|
||||
free(nl); /* not tracked */
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
afl->foreign_file = NULL;
|
||||
afl->syncing_party = 0;
|
||||
|
||||
if (first) {
|
||||
|
||||
afl->last_find_time = 0;
|
||||
@ -3116,7 +3120,7 @@ void check_binary(afl_state_t *afl, u8 *fname) {
|
||||
!afl->fsrv.nyx_mode &&
|
||||
#endif
|
||||
!afl->fsrv.cs_mode && !afl->non_instrumented_mode &&
|
||||
!afl_memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
|
||||
!afl_memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR))) {
|
||||
|
||||
SAYF("\n" cLRD "[-] " cRST
|
||||
"Looks like the target binary is not instrumented! The fuzzer depends "
|
||||
@ -3147,7 +3151,7 @@ void check_binary(afl_state_t *afl, u8 *fname) {
|
||||
}
|
||||
|
||||
if ((afl->fsrv.cs_mode || afl->fsrv.qemu_mode || afl->fsrv.frida_mode) &&
|
||||
afl_memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) {
|
||||
afl_memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR))) {
|
||||
|
||||
SAYF("\n" cLRD "[-] " cRST
|
||||
"This program appears to be instrumented with AFL++ compilers, but is "
|
||||
@ -3182,7 +3186,7 @@ void check_binary(afl_state_t *afl, u8 *fname) {
|
||||
|
||||
/* Detect persistent & deferred init signatures in the binary. */
|
||||
|
||||
if (afl_memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) {
|
||||
if (afl_memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG))) {
|
||||
|
||||
OKF(cPIN "Persistent mode binary detected.");
|
||||
setenv(PERSIST_ENV_VAR, "1", 1);
|
||||
@ -3209,7 +3213,7 @@ void check_binary(afl_state_t *afl, u8 *fname) {
|
||||
}
|
||||
|
||||
if (afl->fsrv.frida_mode ||
|
||||
afl_memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) {
|
||||
afl_memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG))) {
|
||||
|
||||
OKF(cPIN "Deferred forkserver binary detected.");
|
||||
setenv(DEFER_ENV_VAR, "1", 1);
|
||||
|
@ -411,11 +411,12 @@ u8 fuzz_one_original(afl_state_t *afl) {
|
||||
u_simplestring_time_diff(time_tmp, afl->prev_run_time + get_cur_time(),
|
||||
afl->start_time);
|
||||
ACTF(
|
||||
"Fuzzing test case #%u (%u total, %llu crashes saved, state: %s, "
|
||||
"Fuzzing test case #%u (%u total, %s%llu crashes saved%s, state: %s, "
|
||||
"mode=%s, "
|
||||
"perf_score=%0.0f, weight=%0.0f, favorite=%u, was_fuzzed=%u, "
|
||||
"exec_us=%llu, hits=%u, map=%u, ascii=%u, run_time=%s)...",
|
||||
afl->current_entry, afl->queued_items, afl->saved_crashes,
|
||||
afl->current_entry, afl->queued_items,
|
||||
afl->saved_crashes != 0 ? cRED : "", afl->saved_crashes, cRST,
|
||||
get_fuzzing_state(afl), afl->fuzz_mode ? "exploit" : "explore",
|
||||
afl->queue_cur->perf_score, afl->queue_cur->weight,
|
||||
afl->queue_cur->favored, afl->queue_cur->was_fuzzed,
|
||||
|
@ -39,6 +39,9 @@ void sanfuzz_exec_child(afl_forkserver_t *fsrv, char **argv) {
|
||||
|
||||
}
|
||||
|
||||
// In case users provide the normally instrumented binaries, this servers as
|
||||
// the last resort to avoid collecting incorrect coverage.
|
||||
setenv("AFL_LLVM_ONLY_FSRV", "1", 0);
|
||||
execv(fsrv->target_path, argv);
|
||||
|
||||
}
|
||||
|
@ -28,8 +28,13 @@
|
||||
#include "envs.h"
|
||||
#include <limits.h>
|
||||
|
||||
static char fuzzing_state[4][12] = {"started :-)", "in progress", "final phase",
|
||||
"finished..."};
|
||||
// 7 is the number of characters in a color control code
|
||||
// 11 is the number of characters in the fuzzing state itself
|
||||
// 5 is the number of characters in `cRST`
|
||||
// 1 is for the null character
|
||||
static char fuzzing_state[4][7 + 11 + 5 + 1] = {
|
||||
|
||||
"started :-)", "in progress", "final phase", cRED "finished..." cRST};
|
||||
|
||||
char *get_fuzzing_state(afl_state_t *afl) {
|
||||
|
||||
@ -54,13 +59,13 @@ char *get_fuzzing_state(afl_state_t *afl) {
|
||||
u64 percent_cur = last_find_100 / cur_run_time;
|
||||
u64 percent_total = last_find_100 / cur_total_run_time;
|
||||
|
||||
if (unlikely(percent_cur >= 80 && percent_total >= 80)) {
|
||||
if (unlikely(percent_cur >= 75 && percent_total >= 75)) {
|
||||
|
||||
if (unlikely(afl->afl_env.afl_exit_when_done)) { afl->stop_soon = 2; }
|
||||
|
||||
return fuzzing_state[3];
|
||||
|
||||
} else if (unlikely(percent_cur >= 55 && percent_total >= 55)) {
|
||||
} else if (unlikely(percent_cur >= 50 && percent_total >= 50)) {
|
||||
|
||||
return fuzzing_state[2];
|
||||
|
||||
|
@ -1745,7 +1745,7 @@ int main(int argc, char **argv_orig, char **envp) {
|
||||
|
||||
if (afl->cycle_schedules) {
|
||||
|
||||
afl->top_rated_candidates = ck_alloc(map_size * sizeof(u32));
|
||||
afl->top_rated_candidates = ck_alloc(map_size * sizeof(u32 *));
|
||||
|
||||
}
|
||||
|
||||
@ -2307,11 +2307,7 @@ int main(int argc, char **argv_orig, char **envp) {
|
||||
u64 target_hash = get_binary_hash(afl->fsrv.target_path);
|
||||
#endif
|
||||
|
||||
if ((!target_hash || prev_target_hash != target_hash)
|
||||
#ifdef __linux__
|
||||
|| (afl->fsrv.nyx_mode && target_hash == 0)
|
||||
#endif
|
||||
) {
|
||||
if (!target_hash || prev_target_hash != target_hash) {
|
||||
|
||||
ACTF("Target binary is different, cannot perform FAST RESUME!");
|
||||
|
||||
|
@ -1589,7 +1589,7 @@ int main(int argc, char **argv_orig, char **envp) {
|
||||
|
||||
// only reinitialize when it makes sense
|
||||
if (map_size < new_map_size ||
|
||||
(new_map_size > map_size && new_map_size - map_size > MAP_SIZE)) {
|
||||
(new_map_size < map_size && map_size - new_map_size > MAP_SIZE)) {
|
||||
|
||||
if (!be_quiet)
|
||||
ACTF("Acquired new map size for target: %u bytes\n", new_map_size);
|
||||
|
@ -49,7 +49,11 @@ int main(int argc, char **argv) {
|
||||
if ((cnt = read(fd, buf, sizeof(buf) - 1)) < 1) {
|
||||
|
||||
printf("Hum?\n");
|
||||
#ifdef EXIT_AT_END
|
||||
exit(1);
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
@ -77,6 +81,10 @@ int main(int argc, char **argv) {
|
||||
|
||||
}
|
||||
|
||||
#ifdef EXIT_AT_END
|
||||
exit(0);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
@ -16,7 +16,8 @@ test -z "$AFL_CC" && {
|
||||
test -e ../afl-qemu-trace && {
|
||||
cc -pie -fPIE -o test-instr ../test-instr.c
|
||||
cc -o test-compcov test-compcov.c
|
||||
test -e test-instr -a -e test-compcov && {
|
||||
cc -pie -fPIE -o test-instr-exit-at-end -DEXIT_AT_END ../test-instr.c
|
||||
test -e test-instr -a -e test-compcov -a -e test-instr-exit-at-end && {
|
||||
{
|
||||
mkdir -p in
|
||||
echo 00000 > in/in
|
||||
@ -149,11 +150,63 @@ test -e ../afl-qemu-trace && {
|
||||
$ECHO "$RED[!] afl-fuzz is not working correctly with persistent qemu_mode"
|
||||
CODE=1
|
||||
}
|
||||
rm -rf in out errors
|
||||
rm -rf out errors
|
||||
} || {
|
||||
$ECHO "$YELLOW[-] not an intel or arm platform, cannot test persistent qemu_mode"
|
||||
}
|
||||
|
||||
test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc" -o "$SYS" = "aarch64" -o ! "${SYS%%arm*}" && {
|
||||
$ECHO "$GREY[*] running afl-fuzz for persistent qemu_mode with AFL_QEMU_PERSISTENT_EXITS, this will take approx 10 seconds"
|
||||
{
|
||||
IS_STATIC=""
|
||||
file test-instr-exit-at-end | grep -q 'statically linked' && IS_STATIC=1
|
||||
test -z "$IS_STATIC" && {
|
||||
if file test-instr-exit-at-end | grep -q "32-bit"; then
|
||||
# for 32-bit reduce 8 nibbles to the lower 7 nibbles
|
||||
ADDR_LOWER_PART=`nm test-instr-exit-at-end | grep "T main" | awk '{print $1}' | sed 's/^.//'`
|
||||
else
|
||||
# for 64-bit reduce 16 nibbles to the lower 9 nibbles
|
||||
ADDR_LOWER_PART=`nm test-instr-exit-at-end | grep "T main" | awk '{print $1}' | sed 's/^.......//'`
|
||||
fi
|
||||
export AFL_QEMU_PERSISTENT_ADDR=`expr 0x4${ADDR_LOWER_PART}`
|
||||
}
|
||||
test -n "$IS_STATIC" && {
|
||||
export AFL_QEMU_PERSISTENT_ADDR=0x`nm test-instr-exit-at-end | grep "T main" | awk '{print $1}'`
|
||||
}
|
||||
export AFL_QEMU_PERSISTENT_GPR=1
|
||||
$ECHO "Info: AFL_QEMU_PERSISTENT_ADDR=$AFL_QEMU_PERSISTENT_ADDR <= $(nm test-instr-exit-at-end | grep "T main" | awk '{print $1}')"
|
||||
export AFL_QEMU_PERSISTENT_EXITS=1
|
||||
../afl-fuzz -m ${MEM_LIMIT} -V07 -Q -i in -o out -- ./test-instr-exit-at-end
|
||||
echo status "$?"
|
||||
unset AFL_QEMU_PERSISTENT_ADDR
|
||||
unset AFL_QEMU_PERSISTENT_GPR
|
||||
unset AFL_QEMU_PERSISTENT_EXITS
|
||||
} >>errors 2>&1
|
||||
test -n "$( ls out/default/queue/id:000000* 2>/dev/null )" && {
|
||||
$ECHO "$GREEN[+] afl-fuzz is working correctly with persistent qemu_mode and AFL_QEMU_PERSISTENT_EXITS"
|
||||
RUNTIMEP_EXIT=`grep execs_done out/default/fuzzer_stats | awk '{print$3}'`
|
||||
test -n "$RUNTIME" -a -n "$RUNTIMEP_EXIT" && {
|
||||
DIFF=`expr $RUNTIMEP_EXIT / $RUNTIME`
|
||||
test "$DIFF" -gt 1 && { # must be at least twice as fast
|
||||
$ECHO "$GREEN[+] persistent qemu_mode with AFL_QEMU_PERSISTENT_EXITS was noticeable faster than standard qemu_mode"
|
||||
} || {
|
||||
$ECHO "$YELLOW[-] persistent qemu_mode with AFL_QEMU_PERSISTENT_EXITS was not noticeable faster than standard qemu_mode"
|
||||
}
|
||||
} || {
|
||||
$ECHO "$YELLOW[-] we got no data on executions performed? weird!"
|
||||
}
|
||||
} || {
|
||||
echo CUT------------------------------------------------------------------CUT
|
||||
cat errors
|
||||
echo CUT------------------------------------------------------------------CUT
|
||||
$ECHO "$RED[!] afl-fuzz is not working correctly with persistent qemu_mode and AFL_QEMU_PERSISTENT_EXITS"
|
||||
CODE=1
|
||||
}
|
||||
rm -rf in out errors
|
||||
} || {
|
||||
$ECHO "$YELLOW[-] not an intel or arm platform, cannot test persistent qemu_mode with AFL_QEMU_PERSISTENT_EXITS"
|
||||
}
|
||||
|
||||
test -e ../qemu_mode/unsigaction/unsigaction32.so && {
|
||||
${AFL_CC} -o test-unsigaction32 -m32 test-unsigaction.c >> errors 2>&1 && {
|
||||
./test-unsigaction32
|
||||
@ -212,7 +265,7 @@ test -e ../afl-qemu-trace && {
|
||||
CODE=1
|
||||
}
|
||||
|
||||
rm -f test-instr test-compcov
|
||||
rm -f test-instr test-compcov test-instr-exit-at-end
|
||||
} || {
|
||||
$ECHO "$YELLOW[-] qemu_mode is not compiled, cannot test"
|
||||
INCOMPLETE=1
|
||||
|
@ -18,7 +18,8 @@ ifneq "" "$(LLVM_BINDIR)"
|
||||
endif
|
||||
endif
|
||||
|
||||
CFLAGS := -O3 -funroll-loops -g -fPIC
|
||||
CFLAGS := -O3 -funroll-loops -g -fPIC -fno-lto
|
||||
AR ?= ar
|
||||
|
||||
ifdef IOS_SDK_PATH
|
||||
CFLAGS += -isysroot $(IOS_SDK_PATH)
|
||||
@ -30,7 +31,7 @@ aflpp_driver.o: aflpp_driver.c
|
||||
-$(CC) -I. -I../../include $(CFLAGS) -c aflpp_driver.c
|
||||
|
||||
libAFLDriver.a: aflpp_driver.o
|
||||
@ar rc libAFLDriver.a aflpp_driver.o
|
||||
@$(AR) rc libAFLDriver.a aflpp_driver.o
|
||||
@cp -vf libAFLDriver.a ../../
|
||||
|
||||
debug:
|
||||
@ -38,13 +39,13 @@ debug:
|
||||
$(CC) -I../../include -D_DEBUG=\"1\" -g -funroll-loops -c aflpp_driver.c
|
||||
#$(CC) -S -emit-llvm -Wno-deprecated -I../../include $(CFLAGS) -D_DEBUG=\"1\" -c -o afl-performance.ll ../../src/afl-performance.c
|
||||
#$(CC) -S -emit-llvm -I../../include -D_DEBUG=\"1\" -g -funroll-loops -c aflpp_driver.c
|
||||
ar rc libAFLDriver.a afl-performance.o aflpp_driver.o
|
||||
$(AR) rc libAFLDriver.a afl-performance.o aflpp_driver.o
|
||||
|
||||
aflpp_qemu_driver.o: aflpp_qemu_driver.c
|
||||
-$(CC) $(CFLAGS) -O0 -funroll-loops -c aflpp_qemu_driver.c
|
||||
|
||||
libAFLQemuDriver.a: aflpp_qemu_driver.o
|
||||
@-ar rc libAFLQemuDriver.a aflpp_qemu_driver.o
|
||||
@-$(AR) rc libAFLQemuDriver.a aflpp_qemu_driver.o
|
||||
@-cp -vf libAFLQemuDriver.a ../../
|
||||
|
||||
aflpp_qemu_driver_hook.so: aflpp_qemu_driver_hook.o
|
||||
|
@ -19,11 +19,11 @@ HELPER_PATH = $(PREFIX)/lib/afl
|
||||
VERSION = $(shell grep '^\#define VERSION ' ../../config.h | cut -d '"' -f2)
|
||||
|
||||
CFLAGS ?= -O3 -funroll-loops -D_FORTIFY_SOURCE=2
|
||||
CFLAGS += -I ../../include/ -Wall -g -Wno-pointer-sign
|
||||
override CFLAGS += -I ../../include/ -Wall -g -Wno-pointer-sign
|
||||
|
||||
CFLAGS_ADD=$(USEHUGEPAGE:1=-DUSEHUGEPAGE)
|
||||
CFLAGS_ADD += $(USENAMEDPAGE:1=-DUSENAMEDPAGE)
|
||||
CFLAGS += $(CFLAGS_ADD)
|
||||
override CFLAGS += $(CFLAGS_ADD)
|
||||
|
||||
all: libdislocator.so
|
||||
|
||||
@ -41,4 +41,3 @@ install: all
|
||||
install -m 755 -d $${DESTDIR}$(HELPER_PATH)
|
||||
install -m 755 ../../libdislocator.so $${DESTDIR}$(HELPER_PATH)
|
||||
install -m 644 -T README.md $${DESTDIR}$(HELPER_PATH)/README.dislocator.md
|
||||
|
||||
|
Reference in New Issue
Block a user