mirror of
https://github.com/microsoft/onefuzz.git
synced 2025-06-14 02:58:10 +00:00
initial public release
This commit is contained in:
26
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
26
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report errors or unexpected behavior
|
||||
labels: Issue-Bug
|
||||
---
|
||||
<!-- Please read our Rules of Conduct: https://opensource.microsoft.com/codeofconduct/ -->
|
||||
<!-- Please search existing issues to avoid creating duplicates. -->
|
||||
|
||||
## Information
|
||||
|
||||
- Onefuzz version:
|
||||
- OS:
|
||||
|
||||
## Provide detailed reproduction steps (if any)
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
### Expected result
|
||||
|
||||
_What is the expected result of the above steps?_
|
||||
|
||||
### Actual result
|
||||
|
||||
_What is the actual result of the above steps?_
|
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
1
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@ -0,0 +1 @@
|
||||
blank_issues_enabled: false
|
14
.github/ISSUE_TEMPLATE/documentation-issue.md
vendored
Normal file
14
.github/ISSUE_TEMPLATE/documentation-issue.md
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
name: Documentation Issue
|
||||
about: Report issues in our documentation
|
||||
title: ''
|
||||
labels: Issue-Docs
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Briefly describe which document needs to be corrected and why. -->
|
||||
|
||||
## Provide a description of requested docs changes
|
||||
|
||||
_What is the purpose and what should be changed?_
|
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
---
|
||||
<!-- Please read our Rules of Conduct: https://opensource.microsoft.com/codeofconduct/ -->
|
||||
<!-- Please search existing issues to avoid creating duplicates. -->
|
||||
|
||||
<!-- Describe the feature you'd like. -->
|
18
.github/pull_request_template.md
vendored
Normal file
18
.github/pull_request_template.md
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
## Summary of the Pull Request
|
||||
|
||||
_What is this about?_
|
||||
|
||||
## PR Checklist
|
||||
* [ ] Applies to work item: #xxx
|
||||
* [ ] CLA signed. If not, go over [here](https://cla.opensource.microsoft.com/microsoft/onefuzz) and sign the CLI.
|
||||
* [ ] Tests added/passed
|
||||
* [ ] Requires documentation to be updated
|
||||
* [ ] I've discussed this with core contributors already. If not checked, I'm ready to accept this work might be rejected in favor of a different grand plan. Issue number where discussion took place: #xxx
|
||||
|
||||
## Info on Pull Request
|
||||
|
||||
_What does this include?_
|
||||
|
||||
## Validation Steps Performed
|
||||
|
||||
_How does someone test & validate?_
|
236
.github/workflows/ci.yml
vendored
Normal file
236
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,236 @@
|
||||
name: Build
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
agent:
|
||||
runs-on: "${{ matrix.os }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/cache@v2
|
||||
env:
|
||||
cache-name: cache-rust-target
|
||||
with:
|
||||
path: |
|
||||
src/agent/target
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
~/.cargo/bin
|
||||
~/.cache/sccache
|
||||
key: agent-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
agent-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
agent-${{ runner.os }}
|
||||
- name: Linux Prereqs
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install libssl1.0-dev libunwind-dev
|
||||
if: "${{ runner.os == 'Linux' }}"
|
||||
- run: src/ci/agent.sh
|
||||
shell: bash
|
||||
- uses: actions/upload-artifact@v2.1.4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-18.04
|
||||
- windows-2019
|
||||
azcopy:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: src/ci/azcopy.sh
|
||||
- uses: actions/upload-artifact@v2.1.4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
cli:
|
||||
needs:
|
||||
- onefuzztypes
|
||||
runs-on: windows-2019
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: src/ci/set-versions.sh
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
- uses: actions/download-artifact@v2.0.5
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
set -ex
|
||||
ls artifacts
|
||||
cd src/cli
|
||||
pip install -r requirements-dev.txt
|
||||
pip-licenses -uf json > onefuzz/data/licenses.json
|
||||
python setup.py sdist bdist_wheel
|
||||
pip install -r ./requirements.txt ../../artifacts/sdk/*.whl
|
||||
pip install six
|
||||
pyinstaller onefuzz/__main__.py --onefile --name onefuzz --additional-hooks-dir extra/pyinstaller --hidden-import='pkg_resources.py2_warn' --exclude-module tkinter --exclude-module PySide2 --exclude-module PIL.ImageDraw --exclude-module Pillow
|
||||
./dist/onefuzz.exe --version
|
||||
mkdir -p ${GITHUB_WORKSPACE}/artifacts/windows-cli/
|
||||
mkdir -p ${GITHUB_WORKSPACE}/artifacts/sdk/
|
||||
cp dist/*.tar.gz dist/*.whl ${GITHUB_WORKSPACE}/artifacts/sdk/
|
||||
cp dist/onefuzz.exe ${GITHUB_WORKSPACE}/artifacts/windows-cli/
|
||||
- uses: actions/upload-artifact@v2.1.4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
- name: lint
|
||||
run: |
|
||||
cd src/cli
|
||||
pip install -r requirements-lint.txt
|
||||
flake8 .
|
||||
isort --profile black ./onefuzz ./examples/ ./tests/ --check
|
||||
mypy . --ignore-missing-imports
|
||||
pytest -v tests
|
||||
|
||||
# set a minimum confidence to ignore known false positives
|
||||
vulture --min-confidence 61 onefuzz
|
||||
onefuzztypes:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: src/ci/set-versions.sh
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
- run: src/ci/onefuzztypes.sh
|
||||
- uses: actions/upload-artifact@v2.1.4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
proxy:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/cache@v2
|
||||
env:
|
||||
cache-name: cache-rust-proxy
|
||||
with:
|
||||
path: |
|
||||
src/proxy-manager/target
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
~/.cargo/bin
|
||||
~/.cache/sccache
|
||||
key: proxy-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
proxy-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
proxy-${{ runner.os }}
|
||||
- run: src/ci/proxy.sh
|
||||
- uses: actions/upload-artifact@v2.1.4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
service:
|
||||
needs:
|
||||
- onefuzztypes
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: src/ci/set-versions.sh
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
- uses: actions/download-artifact@v2.0.5
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
- name: Build Service
|
||||
run: |
|
||||
cd src/api-service/__app__
|
||||
echo ${GITHUB_RUN_ID} | tee onefuzzlib/build.id
|
||||
echo ${GITHUB_SHA} | tee onefuzzlib/git.version
|
||||
pip install --target="./.python_packages/lib/site-packages" -r ./requirements.txt ${GITHUB_WORKSPACE}/artifacts/sdk/onefuzztypes-*.whl
|
||||
zip -r api-service.zip .
|
||||
mkdir -p ${GITHUB_WORKSPACE}/artifacts/service
|
||||
cp api-service.zip ${GITHUB_WORKSPACE}/artifacts/service
|
||||
- uses: actions/upload-artifact@v2.1.4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
- name: lint
|
||||
run: |
|
||||
cd src/api-service
|
||||
rm -rf __app__/.python_packages
|
||||
pip install ${GITHUB_WORKSPACE}/artifacts/sdk/onefuzztypes-*.whl
|
||||
pip install -r __app__/requirements.txt
|
||||
pip install -r requirements-dev.txt
|
||||
pytest
|
||||
flake8 .
|
||||
isort --profile black ./__app__/ ./tests --check
|
||||
mypy __app__
|
||||
|
||||
# set a minimum confidence to ignore known false positives
|
||||
vulture --min-confidence 61 __app__
|
||||
afl:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: src/ci/afl.sh
|
||||
- uses: actions/upload-artifact@v2.1.4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
radamsa-linux:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: src/ci/radamsa-linux.sh
|
||||
- uses: actions/upload-artifact@v2.1.4
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
package:
|
||||
needs:
|
||||
- agent
|
||||
- azcopy
|
||||
- cli
|
||||
- onefuzztypes
|
||||
- proxy
|
||||
- service
|
||||
- afl
|
||||
- radamsa-linux
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2.0.5
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: artifacts
|
||||
- name: Package Onefuzz
|
||||
run: |
|
||||
set -ex
|
||||
find artifacts
|
||||
mkdir release-artifacts
|
||||
cp -r src/runtime-tools src/deployment/tools
|
||||
cp artifacts/azcopy/azcopy artifacts/azcopy/ThirdPartyNotice.txt src/deployment/tools/linux
|
||||
cp artifacts/azcopy/azcopy.exe artifacts/azcopy/ThirdPartyNotice.txt src/deployment/tools/win64
|
||||
cp artifacts/agent/onefuzz-supervisor.exe src/deployment/tools/win64/
|
||||
cp artifacts/agent/onefuzz-agent.exe src/deployment/tools/win64/
|
||||
cp artifacts/agent/onefuzz-supervisor src/deployment/tools/linux/
|
||||
cp artifacts/agent/onefuzz-agent src/deployment/tools/linux/
|
||||
cp artifacts/proxy/onefuzz-proxy-manager src/deployment/tools/linux/
|
||||
cp artifacts/service/api-service.zip src/deployment
|
||||
cp -r artifacts/third-party src/deployment
|
||||
cp -r src/agent/script/linux/libfuzzer-coverage src/deployment/tools/linux/libfuzzer-coverage
|
||||
cp -r src/agent/script/win64/libfuzzer-coverage src/deployment/tools/win64/libfuzzer-coverage
|
||||
echo $GITHUB_RUN_ID | tee src/deployment/.build.id
|
||||
echo $GITHUB_SHA | tee src/deployment/.sha
|
||||
cp CURRENT_VERSION src/deployment/VERSION
|
||||
(cd src/deployment ; zip -r onefuzz-deployment-$(cat VERSION).zip . )
|
||||
cp src/deployment/onefuzz-deployment*zip release-artifacts
|
||||
cp -r artifacts/sdk release-artifacts
|
||||
cp -r artifacts/windows-cli/onefuzz.exe release-artifacts/onefuzz-cli-$(cat CURRENT_VERSION).exe
|
||||
- uses: actions/upload-artifact@v2.1.4
|
||||
with:
|
||||
name: release-artifacts
|
||||
path: release-artifacts
|
16
.gitignore
vendored
16
.gitignore
vendored
@ -1,10 +1,6 @@
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
/target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
.env
|
||||
.vscode
|
||||
.mypy_cache/
|
||||
.idea
|
||||
**/.direnv
|
||||
**/.envrc
|
||||
|
9
CHANGELOG.md
Normal file
9
CHANGELOG.md
Normal file
@ -0,0 +1,9 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## 1.0.0
|
||||
### Added
|
||||
* Initial public release
|
1
CURRENT_VERSION
Normal file
1
CURRENT_VERSION
Normal file
@ -0,0 +1 @@
|
||||
1.0.0
|
91
README.md
91
README.md
@ -1,30 +1,85 @@
|
||||
# OneFuzz - A self-hosted Fuzzing-As-A-Service platform
|
||||
# OneFuzz
|
||||
|
||||
## A self-hosted Fuzzing-As-A-Service platform
|
||||
|
||||
Project OneFuzz enables continuous developer-driven fuzzing to proactively
|
||||
harden software prior to release. With a single command, which can be baked
|
||||
into CICD, developers can launch fuzz jobs from a few virtual machines to
|
||||
thousands of cores.
|
||||
harden software prior to release. With a [single
|
||||
command](docs/getting-started.md#launching-a-job), which can be [baked into
|
||||
CICD](https://github.com/microsoft/onefuzz-samples), developers can launch
|
||||
fuzz jobs from a few virtual machines to thousands of cores.
|
||||
|
||||
## Announcement
|
||||
## Build Status
|
||||
|
||||
Our source code will drop in sync with [our public presentation at CppCon
|
||||
2020](https://cppcon2020.sched.com/event/e7C0/introducing-microsofts-new-open-source-fuzzing-platform)
|
||||
on September 18th, 2020.
|
||||

|
||||
|
||||
## Privacy & Cookies
|
||||
## Features
|
||||
|
||||
[Microsoft Privacy Statement](https://go.microsoft.com/fwlink/?LinkId=521839)
|
||||
* **Composable fuzzing workflows**: Open source allows users to onboard their own
|
||||
fuzzers, [swap instrumentation](docs/custom-analysis.md), and manage seed inputs.
|
||||
* **Built-in ensemble fuzzing**: By default, fuzzers work as a team to share strengths,
|
||||
swapping inputs of interest between fuzzing technologies.
|
||||
* **Programmatic triage and result de-duplication**: It provides unique flaw cases that
|
||||
always reproduce.
|
||||
* **On-demand live-debugging of found crashes**: It lets you summon a live debugging
|
||||
session on-demand or from your build system.
|
||||
* **Observable and Debug-able**: Transparent design allows introspection into every
|
||||
stage.
|
||||
* **Fuzz on Windows and Linux**: Multi-platform by design. Fuzz using your own [OS
|
||||
build](docs/custom-images.md), kernel, or nested hypervisor.
|
||||
* **Crash reporting notification callbacks**: Including [Azure DevOps Work
|
||||
Items](docs/notifications/ado.md) and [Microsoft Teams
|
||||
messages](docs/notifications/teams.md)
|
||||
|
||||
For information, check out some of our guides:
|
||||
* [Terminology](docs/terminology.md)
|
||||
* [Getting Started](docs/getting-started.md)
|
||||
* [Supported Platforms](docs/supported-platforms.md)
|
||||
* [More documentation](docs)
|
||||
|
||||
Are you a Microsoft employee an interested in fuzzing? Join us on Teams at [Fuzzing @ Microsoft](https://aka.ms/fuzzingatmicrosoft).
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
|
||||
This project welcomes contributions and suggestions. Most contributions require
|
||||
you to agree to a Contributor License Agreement (CLA) declaring that you have
|
||||
the right to, and actually do, grant us the rights to use your contribution.
|
||||
For details, visit [https://cla.microsoft.com](https://cla.microsoft.com).
|
||||
|
||||
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether
|
||||
you need to provide a CLA and decorate the PR appropriately (e.g., label,
|
||||
comment). Simply follow the instructions provided by the bot. You will only
|
||||
need to do this once across all repositories using our CLA.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
||||
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
|
||||
additional questions or comments.
|
||||
|
||||
## Data Collection
|
||||
|
||||
The software may collect information about you and your use of the software and
|
||||
send it to Microsoft. Microsoft may use this information to provide services
|
||||
and improve our products and services. You may [turn off the telemetry as
|
||||
described in the
|
||||
repository](docs/telemetry.md#how-to-disable-sending-telemetry-to-microsoft).
|
||||
There are also some features in the software that may enable you and Microsoft
|
||||
to collect data from users of your applications. If you use these features, you
|
||||
must comply with applicable law, including providing appropriate notices to
|
||||
users of your applications together with a copy of Microsoft's privacy
|
||||
statement. Our privacy statement is located at
|
||||
https://go.microsoft.com/fwlink/?LinkID=824704. You can learn more about data
|
||||
collection and use in the help documentation and our privacy statement. Your
|
||||
use of the software operates as your consent to these practices.
|
||||
|
||||
For more information:
|
||||
* [Onefuzz Telemetry Details](docs/telemetry.md)
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
Security issues and bugs should be reported privately, via email, to the
|
||||
Microsoft Security Response Center (MSRC) at
|
||||
[secure@microsoft.com](mailto:secure@microsoft.com). You should receive a
|
||||
response within 24 hours. If for some reason you do not, please follow up via
|
||||
email to ensure we received your original message. Further information,
|
||||
including the [MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155)
|
||||
key, can be found in the [Security TechCenter](https://technet.microsoft.com/en-us/security/default).
|
||||
|
23
docs/FAQ.md
Normal file
23
docs/FAQ.md
Normal file
@ -0,0 +1,23 @@
|
||||
# Frequently Asked Questions
|
||||
|
||||
## Results sometimes show up before tasks are "running"
|
||||
|
||||
We use VM Scale Sets. Often, some of the VMs in the set provision faster than
|
||||
others. Rather than wait for the entire set to begin, the agent starts on each
|
||||
VM as soon as the VM is up.
|
||||
|
||||
## Debugging issues on scalesets
|
||||
|
||||
You can use az vmss run-command to launch commands in your VMs. As an example,
|
||||
the following command in bash will recursively list c:\onefuzz for a given task:
|
||||
|
||||
```
|
||||
az vmss list-instances --subscription SUBSCRIPTION -n TASK_ID -g RESOURCE_GROUP \
|
||||
--query [].id --output tsv | az vmss run-command invoke --ids @- \
|
||||
--command-id RunPowerShellScript --scripts 'Get-ChildItem -Path c:\onefuzz -Recurse'`
|
||||
```
|
||||
|
||||
On Linux VMs, use RunShellScript. On Windows VMs, use RunPowerShellScript. Note
|
||||
that you will only see the last 4096 bytes of output. See
|
||||
[here](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/run-command#restrictions)
|
||||
for all restrictions on run-command.
|
44
docs/command-replacements.md
Normal file
44
docs/command-replacements.md
Normal file
@ -0,0 +1,44 @@
|
||||
# Commandline Injection
|
||||
|
||||
The following values are replaced with the specific values at runtime.
|
||||
|
||||
* `{input}`: Path to the input file being processed in the current event
|
||||
* `{crashes}`: Path to write crashes
|
||||
* `{input_corpus}`: Path to the input corpus directory
|
||||
* `{generated_inputs}`: Path to the generated inputs directory
|
||||
* `{target_exe}`: Path to the target binary
|
||||
* `{target_options}`: Target options (recursively expanded)
|
||||
* `{output_dir}` : Path to the output directory as defined by the task
|
||||
* `{input_file_name}`: the input file name with the extension (available
|
||||
wherever `input` is available)
|
||||
* `{input_file_name_no_ext}`: the input file name without the extension
|
||||
(available wherever `input` is available)
|
||||
* `{runtime_dir}`: Path to the runtime directory for the task
|
||||
* `{tools_dir}`: Path to the task specific `tools` directory
|
||||
|
||||
## Example
|
||||
|
||||
Assume the following:
|
||||
|
||||
* `supervisor_options` is: "a", "{target_options}", "b"
|
||||
* `target_options` is: "c", "{target_exe}"
|
||||
* `target_exe` is: "d"
|
||||
|
||||
The resulting `supervisor_options` is: "a", "c", "d", "b"
|
||||
|
||||
## Uses
|
||||
|
||||
These are currently used in the following tasks:
|
||||
|
||||
* libfuzzer_fuzz: `target_exe`, `target_options`, `input_corpus`, `crashes`
|
||||
* libfuzzer_crash_report: `target_exe`, `target_options`, `input`
|
||||
* libfuzzer_merge: `target_exe`, `target_options`, `input_corpus`
|
||||
* libfuzzer_coverage: NONE (see work item #129626)
|
||||
* generic_analysis: `input`, `target_exe`, `target_options`, `analyzer_exe`,
|
||||
`anayzer_options`, `output_dir`, `tools_dir`
|
||||
* generic_generator: `generated_inputs`, `input_corpus`, `tools_dir`,
|
||||
`generator_exe`, `generator_options`, `target_exe`, `target_options`, `input`
|
||||
* generic_supervisor: `crashes`, `runtime_dir`, `target_exe`, `target_options`,
|
||||
`input_corpus`, `input`, `supervisor_exe`, `supervisor_options`, `tools_dir`
|
||||
* generic_merge: `input`, `input_corpus`, `output_dir`, `target_exe`,
|
||||
`target_options`, `supervisor_exe`, `supervisor_options`, `tools_dir`
|
29
docs/comms-channels.md
Normal file
29
docs/comms-channels.md
Normal file
@ -0,0 +1,29 @@
|
||||
# Various internal comms channels used in Onefuzz
|
||||
|
||||
Storage Queues:
|
||||
|
||||
* Per pool queue. These are used to queue Worksets to nodes in a given pool.
|
||||
(location: `<pool_id>` on `fuzz` storage account)
|
||||
|
||||
* Per task queue. These are used for tasks that use an input queue such as "new
|
||||
crashing inputs" (location: `<task_id>` on `fuzz` storage account)
|
||||
|
||||
* Per instance 'Heartbeat' queue. Agents send status 'heartbeat' messages to the
|
||||
API service (location: `heartbeats` on `func` storage account)
|
||||
|
||||
* Oauth2 enabled `Backchannel` HTTP endpoint. Agents send & receive messages via
|
||||
endpoint
|
||||
|
||||
* location: `agent/commands` on `azure functions` instance
|
||||
* location: `agent/events` on `azure functions` instance
|
||||
* location: `agent/register` on `azure functions` instance
|
||||
|
||||
* Per instance file change notification queue. Event-Grid updates file updates
|
||||
in the `fuzz` storage account (location: `file-changes` on `func` storage
|
||||
account)
|
||||
|
||||
* Per instance proxy queue. Scaleset proxy agents send status updates to the API
|
||||
service (location: `proxy` on `func` storage account)
|
||||
|
||||
* Per instance update queue. API service uses this to queue updates in the
|
||||
future (location `update-queue` on `func` storage account)
|
68
docs/containers.md
Normal file
68
docs/containers.md
Normal file
@ -0,0 +1,68 @@
|
||||
# Containers in OneFuzz
|
||||
|
||||
An
|
||||
[Azure Blob Storage Container](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction).
|
||||
Each fuzzing task has a set of required and potentially optional containers that
|
||||
are used in a specific context.
|
||||
|
||||
As an example, the libFuzzer fuzzing task uses the following:
|
||||
|
||||
* `setup`: A container with the libFuzzer target executable and any prerequisites
|
||||
(shared objects, DLLs, config files, etc)
|
||||
* `crashes`: A container used to store any crashing input
|
||||
* `inputs`: A container of an initial corpus of seeding input for the libFuzzer
|
||||
target. Any newly discovered inputs are also saved to this container. All
|
||||
files saved in the `inputs` container are bidirectionally synced with the blob
|
||||
store.
|
||||
* `readonly_inputs`: An arbitrary set of additional input seed corpus containers.
|
||||
This container automatically pulls new files \_from* the blob store, but
|
||||
nothing saved to these containers on the fuzzing VM is synced _to_ the
|
||||
container.
|
||||
|
||||
Tasks can target a container for an input queue. As an example, the crash
|
||||
reporting tasks queue off of specified `crashes` containers, processing files
|
||||
iteratively from the queue.
|
||||
|
||||
## Considerations on naming Containers
|
||||
|
||||
Users can create arbitrary containers (see
|
||||
[Container Name Requirements](https://docs.microsoft.com/en-us/rest/api/storageservices/Naming-and-Referencing-Containers--Blobs--and-Metadata#container-names)),
|
||||
including the ability to set
|
||||
[arbitrary metadata](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-and-retrieving-properties-and-metadata-for-blob-resources)
|
||||
for a container.
|
||||
|
||||
Templates use containers built from the context it's being used (setup) and a
|
||||
namespaced GUID to enable automatic re-use of containers across multiple builds
|
||||
of the same target. NOTE: A helper utility is available to craft namespaced
|
||||
GUIDs `onefuzz utils namespaced_guid`.
|
||||
|
||||
As an example, setup containers are namespaced with the `project`, `name`,
|
||||
`build`, and `platform` (Linux or Windows). All other containers (inputs,
|
||||
crashes, coverage, etc) use `project` and `name`.
|
||||
|
||||
Example:
|
||||
|
||||
The `libfuzzer` template with the project 'myproject', the name of 'mytarget',
|
||||
and build of 'build_1' on the Linux platform uses the following:
|
||||
|
||||
* oft-setup-fd4addc373f3551caf780e80abaaa658
|
||||
* oft-inputs-d532156b72765c21be5a29f73718af7e
|
||||
* oft-crashes-d532156b72765c21be5a29f73718af7e
|
||||
* oft-reports-d532156b72765c21be5a29f73718af7e
|
||||
* oft-unique-reports-d532156b72765c21be5a29f73718af7e
|
||||
* oft-no-repro-d532156b72765c21be5a29f73718af7e
|
||||
* oft-coverage-d532156b72765c21be5a29f73718af7e
|
||||
|
||||
The same target, but build_2 uses the following containers:
|
||||
|
||||
* oft-setup-270ee492f18c5f71a0a3e1cffcb98f77
|
||||
* oft-inputs-d532156b72765c21be5a29f73718af7e
|
||||
* oft-crashes-d532156b72765c21be5a29f73718af7e
|
||||
* oft-reports-d532156b72765c21be5a29f73718af7e
|
||||
* oft-unique-reports-d532156b72765c21be5a29f73718af7e
|
||||
* oft-no-repro-d532156b72765c21be5a29f73718af7e
|
||||
* oft-coverage-d532156b72765c21be5a29f73718af7e
|
||||
|
||||
The only difference is a unique oft-setup container.
|
||||
|
||||
In these examples, `oft` stands for *O*ne*F*uzz *T*emplate "setup" container.
|
28
docs/custom-analysis.md
Normal file
28
docs/custom-analysis.md
Normal file
@ -0,0 +1,28 @@
|
||||
# Custom Analysis Tasks
|
||||
|
||||
OneFuzz supports the ability to create user-defined analysis tasks, enabling
|
||||
custom triage of crashes.
|
||||
|
||||
## Example use case
|
||||
|
||||
Users can automatically record the output of
|
||||
[!analyze](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/using-the--analyze-extension)
|
||||
for crash using a `generic_generator` task with analyzer_exe of `cdb`, and the
|
||||
`analyzer_options` of
|
||||
|
||||
```
|
||||
[
|
||||
"-c", "!analyze;q", "-logo", "{output_dir}\\{input_file_name_no_ext}.report",
|
||||
"{target_exe}", "{target_options}"
|
||||
]
|
||||
```
|
||||
|
||||
For a crash named `mycrash.txt`, this will create `mycrash.report` in the
|
||||
`analysis` container.
|
||||
|
||||
This can be seen in the [radamsa](../src/cli/onefuzz/templates/radamsa.py)
|
||||
template for any Windows targets.
|
||||
|
||||
See also:
|
||||
|
||||
* [Command Replacements](command-replacements.md)
|
23
docs/custom-images.md
Normal file
23
docs/custom-images.md
Normal file
@ -0,0 +1,23 @@
|
||||
# Fuzzing using Custom OS Images
|
||||
|
||||
In order to use custom OS images in OneFzuz, the image _must_ run the
|
||||
[Azure VM Agent](https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/overview).
|
||||
|
||||
Building custom images can be automated using the
|
||||
[Linux](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/image-builder)
|
||||
or
|
||||
[Windows](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/image-builder)
|
||||
image builders for Azure.
|
||||
|
||||
If you have a custom Windows VHD, you should follow the
|
||||
[Guide to prepare a VHD for Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/prepare-for-upload-vhd-image).
|
||||
|
||||
From there, rather than using Image SKUs such as
|
||||
`Canonical:UbuntuServer:18.04-LTS:latest`, use the full resource ID to the
|
||||
shared image, such as
|
||||
`/subscriptions/MYSUBSCRIPTION/resourceGroups/MYGROUP/providers/Microsoft.Compute/galleries/MYGALLERY/images/MYDEFINITION/versions/MYVERSION`
|
||||
|
||||
The images must be hosted in a
|
||||
[Shared Image Gallery](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/shared-image-galleries).
|
||||
The Service Principal for the OneFuzz instance must have RBAC to the shared
|
||||
image gallery sufficient to deploy the images.
|
211
docs/getting-started.md
Normal file
211
docs/getting-started.md
Normal file
@ -0,0 +1,211 @@
|
||||
# Getting started using Onefuzz
|
||||
|
||||
If you have access to an existing Onefuzz Instance, skip ahead to "Starting Jobs"
|
||||
|
||||
## Choosing a subscription
|
||||
|
||||
An instance of OneFuzz is a collection of Azure resources contained within a single Azure resource group.
|
||||
If it doesn't already exist, this resource group will be created for you when running the deploy script.
|
||||
However, the subscription itself must exist and have the following Azure
|
||||
[Resource Providers](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider)
|
||||
registered:
|
||||
- `Microsoft.EventGrid`
|
||||
- `Microsoft.Network`
|
||||
- `Microsoft.Compute`
|
||||
|
||||
## Deploying an instance of Onefuzz
|
||||
|
||||
From the [Latest Release of
|
||||
Onefuzz](https://github.com/microsoft/onefuzz/releases) download the
|
||||
`onefuzz-deployment` package.
|
||||
|
||||
On a host with the [Azure CLI logged
|
||||
in](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest),
|
||||
do the following:
|
||||
|
||||
```
|
||||
unzip onefuzz-deployment-$VERSION.zip
|
||||
pip install -r requirements.txt
|
||||
./deploy.py $REGION $RESOURCE_GROUP_NAME $ONEFUZZ_INSTANCE_NAME $CONTACT_EMAIL_ADDRESS
|
||||
```
|
||||
|
||||
When running `deploy.py` the first time for an instance, you will be prompted
|
||||
to follow a manual step to initialize your CLI config.
|
||||
|
||||
## Install the CLI
|
||||
|
||||
Download the Python SDK (make sure to download both `onefuzz` and `onefuzztypes`)
|
||||
from the [Latest Release of Onefuzz](https://github.com/microsoft/onefuzz/releases).
|
||||
|
||||
If you're using the SDK, install via:
|
||||
|
||||
```
|
||||
pip install ./onefuzz*.whl
|
||||
```
|
||||
|
||||
### Connecting to your instance
|
||||
|
||||
Use the `onefuzz config` command to specify your instance of Onefuzz.
|
||||
This example uses the MSR hosted playground instance (only available
|
||||
to Microsoft employees).
|
||||
|
||||
```
|
||||
$ onefuzz config --endpoint https://onefuzz-playground.azurewebsites.net
|
||||
$ onefuzz versions check --exact
|
||||
"compatible"
|
||||
$
|
||||
```
|
||||
|
||||
From here, you can use Onefuzz.
|
||||
|
||||
#### Using Onefuzz Playground
|
||||
|
||||
Microsoft employees are welcome to use MSR's Playground instance of OneFuzz for
|
||||
experimentation purposes, but we reserve the right to stop or limit fuzzing
|
||||
jobs. [Come talk to us](https://aka.ms/fuzzingatmicrosoft) about running OneFuzz
|
||||
for your group!
|
||||
|
||||
## Creating Worker Pools
|
||||
|
||||
Onefuzz distributes tasks to pools of workers, and manages workers using [VM Scalesets](https://azure.microsoft.com/en-us/services/virtual-machine-scale-sets/).
|
||||
|
||||
To create a pool:
|
||||
|
||||
```
|
||||
$ onefuzz pools create my-pool linux --query pool_id
|
||||
"9e779388-a9c2-4934-9fa2-6ed6f6a7792a"
|
||||
$
|
||||
```
|
||||
|
||||
To create a managed scaleset of Ubuntu 18.04 VMs using a [general purpose
|
||||
Azure VM](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) that
|
||||
belongs to the pool:
|
||||
|
||||
```
|
||||
$ onefuzz scalesets create my-pool $VM_COUNT
|
||||
{
|
||||
"image": "Canonical:UbuntuServer:18.04-LTS:latest",
|
||||
"pool_name": "my-pool",
|
||||
"region": "eastus",
|
||||
"scaleset_id": "eb1e9602-4acf-40b8-9216-a5d598d27195",
|
||||
"size": 3,
|
||||
"spot_instances": false,
|
||||
"state": "init",
|
||||
"tags": {},
|
||||
"vm_sku": "Standard_DS1_v2"
|
||||
}
|
||||
$
|
||||
```
|
||||
|
||||
## Deploying Jobs
|
||||
|
||||
Users can deploy fuzzing jobs using customized fuzzing pipelines or using
|
||||
pre-built templates.
|
||||
|
||||
For most use cases, pre-built templates are the best choice.
|
||||
|
||||
### Building a Libfuzzer Target
|
||||
|
||||
Building your first target to run in OneFuzz:
|
||||
|
||||
```
|
||||
$ git clone -q https://github.com/microsoft/onefuzz-samples
|
||||
$ cd examples/simple-libfuzzer
|
||||
$ make
|
||||
clang -g3 -fsanitize=fuzzer -fsanitize=address fuzz.c -o fuzz.exe
|
||||
$
|
||||
```
|
||||
|
||||
### Launching a Job
|
||||
|
||||
With a built fuzzing target, launching a libFuzzer-based job can be done in
|
||||
a single command:
|
||||
|
||||
```
|
||||
$ onefuzz template libfuzzer basic my-project my-target build-1 my-pool --target_exe fuzz.exe
|
||||
INFO:onefuzz:creating libfuzzer from template
|
||||
INFO:onefuzz:creating job (runtime: 24 hours)
|
||||
INFO:onefuzz:created job: a6eda06f-d2e3-4a50-8754-1c1de5c6ea23
|
||||
INFO:onefuzz:using container: oft-setup-f83f5d9b34305bf98ee56a5944fb5fa3
|
||||
INFO:onefuzz:using container: oft-inputs-14b8ea05ca635426bd9ccf3ee71b2e45
|
||||
INFO:onefuzz:using container: oft-crashes-14b8ea05ca635426bd9ccf3ee71b2e45
|
||||
INFO:onefuzz:using container: oft-reports-14b8ea05ca635426bd9ccf3ee71b2e45
|
||||
INFO:onefuzz:using container: oft-unique-reports-14b8ea05ca635426bd9ccf3ee71b2e45
|
||||
INFO:onefuzz:using container: oft-no-repro-14b8ea05ca635426bd9ccf3ee71b2e45
|
||||
INFO:onefuzz:using container: oft-coverage-14b8ea05ca635426bd9ccf3ee71b2e45
|
||||
INFO:onefuzz:uploading target exe `fuzz.exe`
|
||||
INFO:onefuzz:creating libfuzzer task
|
||||
INFO:onefuzz:creating libfuzzer_coverage task
|
||||
INFO:onefuzz:creating libfuzzer_crash_report task
|
||||
INFO:onefuzz:done creating tasks
|
||||
$
|
||||
```
|
||||
|
||||
### Launching a job from the SDK
|
||||
|
||||
Every action from the CLI is exposed in the SDK. Launching the same template as above
|
||||
can be done with the Python SDK:
|
||||
|
||||
```
|
||||
$ python
|
||||
>>> from onefuzz.api import Onefuzz
|
||||
>>> Onefuzz().template.libfuzzer.basic('my-project', 'my-first-job', 'build-1', 'my-pool', target_exe="fuzz.exe")
|
||||
>>>
|
||||
```
|
||||
|
||||
## Investigating Crashes
|
||||
|
||||
Enabling [notifications](notifications.md) provides automatic reporting of identified
|
||||
crashes. The CLI can be used to manually monitor for crash reports:
|
||||
|
||||
```
|
||||
$ onefuzz jobs containers list a6eda06f-d2e3-4a50-8754-1c1de5c6ea23 --container_type unique_reports
|
||||
{
|
||||
"oft-unique-reports-05ca06fd172b5db6a862a38e95c83730": [
|
||||
"972a371a291ed5668a77576368ead0c46c2bac9f9a16b7fa7c0b48aec5b059b1.json"
|
||||
]
|
||||
}
|
||||
$
|
||||
```
|
||||
|
||||
Then view the results of a crash report with [jq](https://github.com/stedolan/jq):
|
||||
```
|
||||
$ onefuzz containers files get oft-unique-reports-05ca06fd172b5db6a862a38e95c83730 972a371a291ed5668a77576368ead0c46c2bac9f9a16b7fa7c0b48aec5b059b1.json > report.json
|
||||
$ jq .call_stack report.json
|
||||
[
|
||||
"#0 0x4fd706 in LLVMFuzzerTestOneInput /home/vsts/work/1/s/sample-target.c:50:83",
|
||||
"#1 0x43b271 in fuzzer::Fuzzer::ExecuteCallback(unsigned char const*, unsigned long) (/onefuzz/setup/fuzz.exe+0x43b271)",
|
||||
"#2 0x423767 in fuzzer::RunOneTest(fuzzer::Fuzzer*, char const*, unsigned long) (/onefuzz/setup/fuzz.exe+0x423767)",
|
||||
"#3 0x429741 in fuzzer::FuzzerDriver(int*, char***, int (*)(unsigned char const*, unsigned long)) (/onefuzz/setup/fuzz.exe+0x429741)",
|
||||
"#4 0x4557a2 in main (/onefuzz/setup/fuzz.exe+0x4557a2)",
|
||||
"#5 0x7ffff6a9bb96 in __libc_start_main /build/glibc-2ORdQG/glibc-2.27/csu/../csu/libc-start.c:310",
|
||||
"#6 0x41db59 in _start (/onefuzz/setup/fuzz.exe+0x41db59)"
|
||||
]
|
||||
$
|
||||
```
|
||||
|
||||
### Live debugging of a crash sample
|
||||
|
||||
Using the crash report, Onefuzz can enable live remote debugging of the crash
|
||||
using a platform-appropriate debugger (gdb for Linux and cdb for Windows):
|
||||
|
||||
```
|
||||
$ onefuzz repro create_and_connect get oft-unique-reports-05ca06fd172b5db6a862a38e95c83730 972a371a291ed5668a77576368ead0c46c2bac9f9a16b7fa7c0b48aec5b059b1.json
|
||||
INFO:onefuzz:creating repro vm: get oft-unique-reports-05ca06fd172b5db6a862a38e95c83730 972a371a291ed5668a77576368ead0c46c2bac9f9a16b7fa7c0b48aec5b059b1.json (24 hours)
|
||||
INFO:onefuzz:connecting to reproduction VM: c6525b82-7269-45ee-8a62-2d9d61d1e269
|
||||
- launching reproducing vm. current state: extensions_launch
|
||||
Remote debugging using :1337
|
||||
Reading /onefuzz/setup/fuzz.exe from remote target...
|
||||
warning: File transfers from remote targets can be slow. Use "set sysroot" to access files locally instead.
|
||||
Reading /onefuzz/setup/fuzz.exe from remote target...
|
||||
Reading symbols from target:/onefuzz/setup/fuzz.exe...done.
|
||||
Reading /lib64/ld-linux-x86-64.so.2 from remote target...
|
||||
Reading /lib64/ld-linux-x86-64.so.2 from remote target...
|
||||
Reading symbols from target:/lib64/ld-linux-x86-64.so.2...Reading /lib64/ld-2.27.so from remote target...
|
||||
Reading /lib64/.debug/ld-2.27.so from remote target...
|
||||
(no debugging symbols found)...done.
|
||||
0x00007ffff7dd6090 in ?? () from target:/lib64/ld-linux-x86-64.so.2
|
||||
(gdb) info reg rip
|
||||
rip 0x7ffff7dd6090 0x7ffff7dd6090
|
||||
(gdb)
|
||||
```
|
83
docs/how-to/add-edit-workbook.md
Normal file
83
docs/how-to/add-edit-workbook.md
Normal file
@ -0,0 +1,83 @@
|
||||
# Add or Edit an Azure Monitor Workbook
|
||||
|
||||
## About
|
||||
|
||||
Azure Monitor Workbooks are a way to provide query-driven, lightweight reporting
|
||||
from within the Azure Portal. You can read more about them
|
||||
[here][workbooks-overview].
|
||||
|
||||
Workbooks can be deployed via ARM templates, and OneFuzz ships some out of the
|
||||
box.
|
||||
|
||||
[workbooks-overview]:
|
||||
https://docs.microsoft.com/en-us/azure/azure-monitor/platform/workbooks-overview
|
||||
|
||||
## Steps
|
||||
|
||||
1. Create or edit a workbook in the Azure Portal
|
||||
|
||||
Create a new workbook instance, or open an existing instance in the Azure
|
||||
Portal. Add parameters, queries, charts, and other elements, as desired.
|
||||
|
||||
2. Extract the `serializedData` that describes the workbook
|
||||
|
||||
While viewing an open workbook instance:
|
||||
|
||||
> 1. Cick the "Edit" button.
|
||||
>
|
||||
> 2. Click the Advanced Editor button (uses the `</>` "code" icon)
|
||||
>
|
||||
> 3. Click the "ARM Template" tab.
|
||||
>
|
||||
> 4. In the displayed JSON, copy the string value of the
|
||||
> `resources.properties.serializedData` field. Be sure to include the outer
|
||||
> double quotes, so the copied value is a serialized JSON object.
|
||||
|
||||
3. Update `workbook-data.json`
|
||||
|
||||
Each workbook is stored as a serialized JSON string value in
|
||||
`deployments/workbook-data.json`.
|
||||
|
||||
The serialized workbook data will be referenced in `azuredeploy.json` using the
|
||||
property in `workbook-data.json`.
|
||||
|
||||
The value must be the exact string you copied from the example ARM Template in
|
||||
the Advanced Editor view.
|
||||
|
||||
If adding a new workbook, add a new property and value. If editing a workbook,
|
||||
overwrite the existing value.
|
||||
|
||||
4. Ensure the resource is deployed in `azuredeploy.json`
|
||||
|
||||
To actually deploy a workbook instance, you must include it as a resource in
|
||||
`azuredeploy.json`.
|
||||
|
||||
It should be a child resource of the Log Analytics workspace resource
|
||||
(`Microsoft.Insights/components` component).
|
||||
|
||||
Example:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "<uuid>",
|
||||
"type": "microsoft.insights/workbooks",
|
||||
"location": "[resourceGroup().location]",
|
||||
"apiVersion": "2018-06-17-preview",
|
||||
"dependsOn": [
|
||||
"[resourceId('Microsoft.Insights/components', parameters('name'))]"
|
||||
],
|
||||
"kind": "shared",
|
||||
"properties": {
|
||||
"displayName": "<display-name>",
|
||||
"serializedData": "[parameters('workbookData').<workbook-property>]",
|
||||
"version": "1.0",
|
||||
"sourceId": "[resourceId('Microsoft.Insights/components', parameters('name'))]",
|
||||
"category": "tsg"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In the above, `<uuid>` is any unique UUID of your choosing. The `<display-name>`
|
||||
value is the workbook display name, and `<workbook-property>` should be the
|
||||
property name in `workbook-data.json` that maps to the `serializedData` for your
|
||||
workbook.
|
34
docs/known-issues.md
Normal file
34
docs/known-issues.md
Normal file
@ -0,0 +1,34 @@
|
||||
# Known Issues
|
||||
|
||||
## Initial Deployments
|
||||
|
||||
1. A website with `myname` already exists
|
||||
|
||||
This means someone already is using `myname.azurewebsites.net`. You'll need
|
||||
to pick a different name for your onefuzz instance.
|
||||
|
||||
1. The workspace name 'logs-wb-XXX' is not unique
|
||||
|
||||
This means the workbook created by the onefuzz deployment is already
|
||||
allocated in a different resource group, even if said resource group has
|
||||
been deleted.
|
||||
|
||||
1. Use a new resource group name
|
||||
1. Delete the referenced workbook manually following [Migrating Regions](migrating-regions.md)
|
||||
1. Wait a few weeks for Azure to automatically delete the deleted workbook.
|
||||
|
||||
1. PrincipalNotFound: Principal XXX does not exist in the directory YYY
|
||||
|
||||
This means you encountered a race condition from the System allocated
|
||||
service principal for the function app deployment. You should be able
|
||||
to rerun the deploy script without issue.
|
||||
|
||||
1. Azure.Functions.Cli.Arm.ArmResourceNotFoundException: Can't find app with
|
||||
name "XXX"
|
||||
|
||||
The resources for the onefuzz instance were deployed, but the SCM component
|
||||
of Azure Functions was not available yet. This race condition is solved by
|
||||
ARM reporting the deployment is finished too early. Retry the deployment and
|
||||
the error should be corrected automatically.
|
||||
|
||||
Work item: #122629
|
23
docs/managed-identities.md
Normal file
23
docs/managed-identities.md
Normal file
@ -0,0 +1,23 @@
|
||||
# Managed Identities in OneFuzz
|
||||
|
||||
Onefuzz makes use of
|
||||
[Managed identities](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview)
|
||||
both in the API service as well as the managed VMs.
|
||||
|
||||
There are currently two uses of Managed Identities within Onefuzz:
|
||||
|
||||
1. The API service manages the full lifecycle of VMs, VM Scalesets, and Networks
|
||||
in use in OneFuzz. In order to enable this, the service must have appropriate
|
||||
role assignments permissions to manage these resources. At the moment, the
|
||||
role assignments granted to the Onefuzz API are:
|
||||
|
||||
1. [Virtual Machine Contributor](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#virtual-machine-contributor)
|
||||
1. [Network Contributor](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#network-contributor)
|
||||
1. [Log Analytics Contributor](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#log-analytics-contributor)
|
||||
|
||||
See [azuredeploy.json](../src/deployment/azuredeploy.json) for the specific
|
||||
implementation of these role assignments.
|
||||
|
||||
1. VMs created by Onefuzz are created using the Managed Identities without roles
|
||||
assigned in order to enable the Onefuzz agent running in the VMs to
|
||||
authenticate to the service itself.
|
57
docs/migrating-regions.md
Normal file
57
docs/migrating-regions.md
Normal file
@ -0,0 +1,57 @@
|
||||
# Instructions
|
||||
|
||||
To migrate an instance to a new region, do the following:
|
||||
|
||||
1. Manually hard-delete the Azure Monitor workbook / Log Analytics Workspace.
|
||||
(See instructions below)
|
||||
1. Delete resource group. (Example: `az group delete -y GROUP_NAME`)
|
||||
1. Delete
|
||||
[RBAC entry](https://ms.portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps)
|
||||
1. Deploy instance using the new region name
|
||||
|
||||
If you try deleting the resource group and moving to a different region without
|
||||
the above procedure, you'll get an error like this:
|
||||
|
||||
`"The workspace name 'logs-wb-XXXX' is not unique"`
|
||||
|
||||
## Hard-deleting a Log Analytics Workspace
|
||||
|
||||
Full, official instructions for deleting a Log Analytics Workspace can be found
|
||||
[here](https://docs.microsoft.com/en-us/azure/azure-monitor/platform/delete-workspace#permanent-workspace-delete).
|
||||
Review this page before continuing.
|
||||
|
||||
To summarize, you need your subscription ID, resource group name, log analytics
|
||||
workspace name, and a valid bearer token, to authenticate a REST API (only!)
|
||||
DELETE request.
|
||||
|
||||
To quickly, manually get a bearer token, you can go to any Azure REST API
|
||||
documentation page with a green "Try It" button. For example,
|
||||
[this one](https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/list).
|
||||
|
||||
Click "Try It", authenticate, and you will find a valid `Authorization` header
|
||||
in the "Preview" section at the bottom of the "REST API Try It" pane. Copy the
|
||||
_entire_ header value (i.e. `Authorization: Bearer <base64-bearer-token-data>`).
|
||||
Remember, this is a credential (and will expire), so do not log it, track it in
|
||||
version control, &c.
|
||||
|
||||
You can then edit the following bash script template (or equivalent) to
|
||||
permanently delete the workspace:
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Set to the value copy-pasted from the Try It pane, like:
|
||||
# "Authorization: Bearer ${BEARER}"
|
||||
AUTH_H='<copied-authorization-header>'
|
||||
|
||||
# Set these variables using values from your resource group.
|
||||
SUBSCRIPTION='<subscription-id>'
|
||||
RESOURCE_GROUP='<resource-group-name>'
|
||||
WORKSPACE_NAME='<log-analytics-workspace-name>'
|
||||
|
||||
# Does not need to be edited.
|
||||
URL="https://management.azure.com/subscriptions/${SUBSCRIPTION}/resourcegroups/${RESOURCE_GROUP}/providers/Microsoft.OperationalInsights/workspaces/${WORKSPACE_NAME}?api-version=2015-11-01-preview&force=true"
|
||||
|
||||
# Requires the cURL command.
|
||||
curl -X DELETE -H "${AUTH_H}" "${URL}"
|
||||
```
|
41
docs/notifications.md
Normal file
41
docs/notifications.md
Normal file
@ -0,0 +1,41 @@
|
||||
# Notifications in Onefuzz
|
||||
|
||||
Onefuzz supports built-in [container monitoring](containers.md) and reporting
|
||||
via Notifications. Onefuzz notifications monitor user specified containers
|
||||
for changes and will perform the notifications upon new file creation.
|
||||
|
||||
## Features
|
||||
|
||||
* Arbitrary notification integrations per container
|
||||
* Integration is tied to the containers, not tasks, enabling monitoring of
|
||||
container use outside of Onefuzz
|
||||
|
||||
## Implementation
|
||||
|
||||
Notifications can be created via the CLI via:
|
||||
|
||||
`onefuzz notifications create <CONTAINER <CONFIG>`: Create a notification using a JSON config (See [onefuzztypes.models.NotificationConfig](../src/pytypes/onefuzztypes/models.py)
|
||||
for syntax)
|
||||
|
||||
Existing notifications can be viewed via:
|
||||
|
||||
`onefuzz notifications list`
|
||||
|
||||
Existing notifications can be deleted via:
|
||||
|
||||
`onefuzz notifications delete <CONTAINER> <NOTIFICATION_ID>`
|
||||
|
||||
NOTE: While notifications are tied to containers, not tasks, the job templates support
|
||||
creation notifications during execution. Example:
|
||||
|
||||
```
|
||||
onefuzz template libfuzzer basic my-project target-1 build-1 --notification_config @./notifications.json
|
||||
```
|
||||
|
||||
You can specify a path to a file using `@/path/to/file` syntax, or specify the
|
||||
JSON via a string, such as `'{"config":{...}}'`
|
||||
|
||||
## Supported integrations
|
||||
|
||||
* [Microsoft Teams](notifications/teams.md)
|
||||
* [Azure Devops Work Items](notifications/ado.md)
|
149
docs/notifications/ado.md
Normal file
149
docs/notifications/ado.md
Normal file
@ -0,0 +1,149 @@
|
||||
# Azure Devops Work Item creation
|
||||
|
||||
Automatic creation of ADO Work Items from OneFuzz allows for the user to
|
||||
customize any field using [jinja2](https://jinja.palletsprojects.com/)
|
||||
templates.
|
||||
|
||||
There are multiple Python objects provided via the template engine that
|
||||
can be used such that any arbitrary component can be used to flesh out
|
||||
the configuration:
|
||||
|
||||
* task (See [TaskConfig](../../src/pytypes/onefuzztypes/models.py))
|
||||
* report (See [Report](../../src/pytypes/onefuzztypes/models.py))
|
||||
* job (See [JobConfig](../../src/pytypes/onefuzztypes/models.py))
|
||||
|
||||
Using these objects allows dynamic configuration. As an example, the `project`
|
||||
could be specified directly, or dynamically pulled from a template:
|
||||
|
||||
```json
|
||||
{
|
||||
"project": "{{ task.tags['project'] }}"
|
||||
}
|
||||
```
|
||||
|
||||
There are additional values that can be used in any template:
|
||||
|
||||
* report_url: This will link to an authenticated download link for the report
|
||||
* input_url: This will link to an authenticated download link for crashing input
|
||||
* target_url: This will link to an authenticated download link for the target
|
||||
executable
|
||||
* repro_cmd: This will give an example command to initiate a live reproduction
|
||||
of a crash
|
||||
* report_container: This will give the name of the report storage container
|
||||
* report_filename: This will give the container relative path to the report
|
||||
|
||||
Note, _most_, but not all fields in ADO take HTML. If you want the URL to be
|
||||
clickable, make it a link.
|
||||
|
||||
# Example Config
|
||||
|
||||
```json
|
||||
{
|
||||
"config": {
|
||||
"base_url": "https://dev.azure.com/org_name",
|
||||
"auth_token": "ADO_AUTH_TOKEN",
|
||||
"type": "Bug",
|
||||
"project": "Project Name",
|
||||
"ado_fields": {
|
||||
"System.AreaPath": "Area Path Here",
|
||||
"Microsoft.VSTS.Scheduling.StoryPoints": "1",
|
||||
"System.IterationPath": "Iteration\\Path\\Here",
|
||||
"System.Title": "{{ report.crash_site }} - {{ report.executable }}",
|
||||
"Microsoft.VSTS.TCM.ReproSteps": "This is my call stack: <ul> {% for item in report.call_stack %} <li> {{ item }} </li> {% endfor %} </ul>"
|
||||
},
|
||||
"comment": "This is my comment. {{ report.input_sha256 }} {{ input_url }} <br> <pre>{{ repro_cmd }}</pre>",
|
||||
"unique_fields": ["System.Title", "System.AreaPath"],
|
||||
"on_duplicate": {
|
||||
"comment": "Another <a href='{{ input_url }}'>POC</a> was found in <a href='{{ target_url }}'>target</a>. <br> <pre>{{ repro_cmd }}</pre>",
|
||||
"set_state": { "Resolved": "Active" },
|
||||
"ado_fields": {
|
||||
"System.IterationPath": "Iteration\\Path\\Here2"
|
||||
},
|
||||
"increment": ["Microsoft.VSTS.Scheduling.StoryPoints"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# How to uniquely identify work items
|
||||
|
||||
The `unique_fields` is used as a tuple to uniquely identify an ADO work item.
|
||||
For the above configuration, this evaluates to the following
|
||||
[Wiql](https://docs.microsoft.com/en-us/azure/devops/boards/queries/wiql-syntax?view=azure-devops)
|
||||
query.
|
||||
|
||||
Given the report crash site of "example" and executable of "fuzz.exe"
|
||||
|
||||
```
|
||||
select [System.Id] from WorkItems where
|
||||
[System.Title] = "example - fuzz.exe" AND
|
||||
[System.AreaPath] = "Area Path Here"
|
||||
```
|
||||
|
||||
This allows for customized ADO work item de-duplication.
|
||||
|
||||
_NOTE_: In some instances, while work items are created serially, ADO work item
|
||||
creation has latency such that created work items do not always immediately show
|
||||
up in the queries. In some cases, this may cause spurious duplicate work items
|
||||
in the case that duplicate crash reports occur in rapid succession.
|
||||
|
||||
# On creating a new work item
|
||||
|
||||
If no existing work items match the aforementioned tuple, a new work item is
|
||||
created.
|
||||
|
||||
1. Define arbitrary rendered fields to be created.
|
||||
2. Optionally provide a rendered comment. To not comment on the new work item,
|
||||
remove the `comment` field.
|
||||
|
||||
# On identifying duplicate work items
|
||||
|
||||
There are multiple configurable actions that can performed upon finding a
|
||||
duplicate work item.
|
||||
|
||||
1. Add a rendered comment to the original work item. To add a comment, remove
|
||||
the `comment` field within `on_duplicate`.
|
||||
2. Replace any field with a rendered value. In the above example,
|
||||
`System.IterationPath` replaced with `Iteration\\Path\\Here2` whenever a
|
||||
duplicate bug is found.
|
||||
3. Increment any number of arbitrary fields. In the above example,
|
||||
`Microsoft.VSTS.Scheduling.StoryPoints` is initially set to 1 and incremented
|
||||
each time a duplicate crash report is found. To not increment any field, set
|
||||
it to an empty array.
|
||||
|
||||
# To provide no change on duplicate work items
|
||||
|
||||
To do nothing on duplicate reports, use the following `on_duplicate` entries:
|
||||
|
||||
```json
|
||||
"on_duplicate": {
|
||||
"comment": null,
|
||||
"set_state": {},
|
||||
"fields": {},
|
||||
"increment": []
|
||||
}
|
||||
```
|
||||
|
||||
In the CLI, don't provide any of the --on*dup*\* arguments
|
||||
|
||||
# Example CLI usage:
|
||||
|
||||
To create a similar configuration monitoring the container
|
||||
oft-my-demo-job-reports, use the following command:
|
||||
|
||||
```bash
|
||||
onefuzz notifications create_ado oft-my-demo-job-reports \
|
||||
"Project Name" https://dev.azure.com/org_name \
|
||||
ADO_AUTH_TOKEN Bug System.Title System.AreaPath \
|
||||
--fields \
|
||||
System.AreaPath=OneFuzz-Ado-Integration \
|
||||
Microsoft.VSTS.Scheduling.StoryPoints=1 \
|
||||
"System.IterationPath=Iteration\\Path\\Here" \
|
||||
"System.Title={{ report.crash_site }} - {{ report.executable }}" \
|
||||
"Microsoft.VSTS.TCM.ReproSteps=This is my call stack: <ul> {% for item in report.call_stack %} <li> {{ item }} </li> {% endfor %} </ul>" \
|
||||
--comment "This is my comment. {{ report.input_sha256 }} {{ input_url }}" \
|
||||
--on_dup_comment "Another <a href='{{ input_url }}'>POC</a> was found in <a href='{{ target_url }}'>target</a>" \
|
||||
--on_dup_set_state Resolved=Active \
|
||||
--on_dup_fields "System.IterationPath=Iteration\\Path\\Here2" \
|
||||
--on_dup_increment Microsoft.VSTS.Scheduling.StoryPoints
|
||||
```
|
BIN
docs/notifications/teams-message.png
Executable file
BIN
docs/notifications/teams-message.png
Executable file
Binary file not shown.
After Width: | Height: | Size: 57 KiB |
39
docs/notifications/teams.md
Normal file
39
docs/notifications/teams.md
Normal file
@ -0,0 +1,39 @@
|
||||
# Notifications via Microsoft Teams
|
||||
|
||||
Onefuzz can send notifications to [Microsoft Teams](https://docs.microsoft.com/en-us/microsoftteams/) channels upon new file creation within Onefuzz managed [containers](../containers.md).
|
||||
|
||||
## Notifications
|
||||
|
||||
Upon creation of new crash reports (see [onefuzztypes.models.Report](../../src/pytypes/onefuzztypes/models.py))
|
||||
in monitored containers, a detailed notification including crash details and download
|
||||
links to the report, executable, and crashing input.
|
||||
|
||||
Upon creation of any other file, a trimmed down notification is created with the name
|
||||
of the file with a download link.
|
||||
|
||||
For this purpose, most users will want to monitor `reports` or `unique_reports` rather than `crashes` containers.
|
||||
|
||||
## Integration
|
||||
|
||||
1. Create an [incoming webhook in Teams](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/connectors-using#setting-up-a-custom-incoming-webhook).
|
||||
1. Add a notification to your Onefuzz instance.
|
||||
|
||||
```
|
||||
onefuzz notifications create <CONTAINER> @./config.json
|
||||
```
|
||||
|
||||
Until the integration is deleted, when a crash report is written to the indicated container, the webhook will be used to send notification of the crash.
|
||||
|
||||
## Example Config
|
||||
|
||||
```json
|
||||
{
|
||||
"config": {
|
||||
"url": "<INCOMING_WEBHOOK_HERE>",
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Screenshot
|
||||
|
||||

|
12
docs/screencasts/README.md
Normal file
12
docs/screencasts/README.md
Normal file
@ -0,0 +1,12 @@
|
||||
# Screencasts of using Onefuzz
|
||||
|
||||
## Launching a Job
|
||||

|
||||
|
||||
|
||||
## Live debugging a crash
|
||||

|
||||
|
||||
Note: Launching the VM is sped up in this screencast. Launching VMs for crash
|
||||
reproduction currently takes approximiately 2 minutes for Linux and 7 minutes
|
||||
for Windows.
|
177
docs/screencasts/launching-job.cast
Normal file
177
docs/screencasts/launching-job.cast
Normal file
@ -0,0 +1,177 @@
|
||||
{"version": 2, "width": 144, "height": 44, "timestamp": 1600442785, "env": {"SHELL": "/bin/bash", "TERM": "xterm-256color"}}
|
||||
[0.02879, "o", "$ "]
|
||||
[2.114019, "o", "l"]
|
||||
[2.206022, "o", "s"]
|
||||
[2.356068, "o", "\r\n"]
|
||||
[2.358799, "o", "Makefile fuzz.c seeds\r\n"]
|
||||
[2.359158, "o", "$ "]
|
||||
[2.804053, "o", "c"]
|
||||
[2.911651, "o", "a"]
|
||||
[3.023869, "o", "t"]
|
||||
[3.10766, "o", " "]
|
||||
[3.344225, "o", "f"]
|
||||
[3.497709, "o", "u"]
|
||||
[3.579124, "o", "z"]
|
||||
[3.737771, "o", "z"]
|
||||
[3.917669, "o", "."]
|
||||
[4.061767, "o", "c"]
|
||||
[4.387845, "o", "\r\n"]
|
||||
[4.390117, "o", "#include <stdint.h>\r\n#include <stdlib.h>\r\n\r\n\r\nint LLVMFuzzerTestOneInput(const uint8_t *data, size_t len) {\r\n int cnt = 0;\r\n\r\n if (len < 4) {\r\n return 1;\r\n }\r\n\r\n if (data[0] == 'x') { cnt++; }\r\n if (data[1] == 'y') { cnt++; }\r\n if (data[2] == 'z') { cnt++; }\r\n\r\n if (cnt >= 3) {\r\n switch (data[3]) {\r\n case '0': {\r\n // segv\r\n int *p = NULL; *p = 123;\r\n break;\r\n }\r\n case '1': {\r\n // stack-buffer-underflow\r\n int* p = &cnt - 32; for (int i = 0; i < 32; i++) { *(p + i) = 0; }\r\n break;\r\n }\r\n case '2': {\r\n // stack-buffer-overflow \r\n int* p = &cnt + 32; for (int i = 0; i < 32; i++) { *(p - i) = 0; }\r\n break;\r\n }\r\n case '3': {\r\n // bad-free\r\n int *p = &cnt; free(p);\r\n break;\r\n }\r\n case '4': {\r\n // double-free\r\n int* p = malloc(sizeof(int)); free(p); free(p);\r\n break;\r\n }\r\n case '5': {\r\n // heap-use-after-free\r\n int* p = malloc(sizeof("]
|
||||
[4.390477, "o", "int)); free(p); *p = 123;\r\n break;\r\n }\r\n case '6': {\r\n // heap-buffer-overflow\r\n int* p = malloc(8 * sizeof(int)); for (int i = 0; i < 32; i++) { *(p + i) = 0; }\r\n break;\r\n }\r\n case '7': {\r\n // fpe\r\n int x = 0; int y = 123 / x;\r\n break;\r\n }\r\n }\r\n }\r\n\r\n return 0;\r\n}\r\n$ "]
|
||||
[5.131973, "o", "l"]
|
||||
[5.230021, "o", "s"]
|
||||
[5.351605, "o", " "]
|
||||
[5.491666, "o", "s"]
|
||||
[5.604053, "o", "e"]
|
||||
[5.78355, "o", "e"]
|
||||
[5.9714, "o", "d"]
|
||||
[6.035766, "o", "s"]
|
||||
[6.445842, "o", "\r\n"]
|
||||
[6.448318, "o", "good.txt\r\n"]
|
||||
[6.448627, "o", "$ "]
|
||||
[6.739915, "o", "c"]
|
||||
[6.853237, "o", "a"]
|
||||
[6.991554, "o", "t"]
|
||||
[7.10373, "o", " "]
|
||||
[7.310628, "o", "s"]
|
||||
[7.379797, "o", "e"]
|
||||
[7.549208, "o", "e"]
|
||||
[7.711106, "o", "d"]
|
||||
[7.751107, "o", "s"]
|
||||
[7.984707, "o", "/"]
|
||||
[8.164636, "o", "good.txt "]
|
||||
[8.966708, "o", "\r\n"]
|
||||
[8.968907, "o", "good\r\n"]
|
||||
[8.969239, "o", "$ "]
|
||||
[10.527868, "o", "m"]
|
||||
[10.632889, "o", "a"]
|
||||
[10.773426, "o", "k"]
|
||||
[10.839725, "o", "e"]
|
||||
[11.191684, "o", "\r\n"]
|
||||
[11.195024, "o", "clang -g3 -fsanitize=fuzzer -fsanitize=address fuzz.c -o fuzz.exe\r\n"]
|
||||
[11.699436, "o", "$ "]
|
||||
[15.852234, "o", "onefuzz "]
|
||||
[15.853432, "o", "template libfuzzer basic my-proj"]
|
||||
[15.85413, "o", "ect my-target bu"]
|
||||
[15.854882, "o", "ild-1 ubuntu --t"]
|
||||
[15.855566, "o", "arget_exe ./fuz"]
|
||||
[15.856165, "o", "z.exe -"]
|
||||
[15.856453, "o", "-inputs "]
|
||||
[15.857116, "o", "./seeds"]
|
||||
[17.776009, "o", "\r\n"]
|
||||
[18.54717, "o", "INFO:onefuzz:creating libfuzzer from template\r\n"]
|
||||
[19.783313, "o", "INFO:onefuzz:creating job (runtime: 24 hours)\r\n"]
|
||||
[20.080545, "o", "INFO:onefuzz:created job: 9b0f7765-d9f2-4906-9e98-1678da9b929a\r\n"]
|
||||
[20.149632, "o", "INFO:onefuzz:creating container: oft-setup-f3de2c2d8fbf536c918a3cb647e92336\r\n"]
|
||||
[20.248317, "o", "INFO:onefuzz:creating container: oft-inputs-14b8ea05ca635426bd9ccf3ee71b2e45\r\n"]
|
||||
[20.305332, "o", "INFO:onefuzz:creating container: oft-crashes-14b8ea05ca635426bd9ccf3ee71b2e45\r\n"]
|
||||
[20.354664, "o", "INFO:onefuzz:creating container: oft-reports-14b8ea05ca635426bd9ccf3ee71b2e45\r\n"]
|
||||
[20.439682, "o", "INFO:onefuzz:creating container: oft-unique-reports-14b8ea05ca635426bd9ccf3ee71b2e45\r\n"]
|
||||
[20.522251, "o", "INFO:onefuzz:creating container: oft-no-repro-14b8ea05ca635426bd9ccf3ee71b2e45\r\n"]
|
||||
[20.589068, "o", "INFO:onefuzz:creating container: oft-coverage-14b8ea05ca635426bd9ccf3ee71b2e45\r\n"]
|
||||
[20.644551, "o", "INFO:onefuzz:uploading target exe `./fuzz.exe`\r\n"]
|
||||
[21.060675, "o", "INFO:onefuzz:uploading inputs: `./seeds`\r\n"]
|
||||
[21.22102, "o", "INFO:onefuzz:creating libfuzzer task\r\n"]
|
||||
[21.424729, "o", "INFO:onefuzz:creating libfuzzer_coverage task\r\n"]
|
||||
[21.67167, "o", "INFO:onefuzz:creating libfuzzer_crash_report task\r\n"]
|
||||
[21.911879, "o", "INFO:onefuzz:done creating tasks\r\n"]
|
||||
[21.992952, "o", "$ "]
|
||||
[23.158603, "o", "o"]
|
||||
[23.269591, "o", "n"]
|
||||
[23.433656, "o", "e"]
|
||||
[23.787621, "o", "f"]
|
||||
[24.005769, "o", "u"]
|
||||
[24.081481, "o", "z"]
|
||||
[24.233147, "o", "z"]
|
||||
[24.369518, "o", " "]
|
||||
[24.507919, "o", "s"]
|
||||
[24.653735, "o", "t"]
|
||||
[24.76604, "o", "a"]
|
||||
[24.8977, "o", "t"]
|
||||
[25.060096, "o", "u"]
|
||||
[25.137164, "o", "s"]
|
||||
[25.30935, "o", " "]
|
||||
[25.473325, "o", "t"]
|
||||
[25.591896, "o", "o"]
|
||||
[25.650336, "o", "p"]
|
||||
[25.813368, "o", "\r\n"]
|
||||
[26.833078, "o", "INFO:onefuzz:connecting to signalr\r\n"]
|
||||
[26.885552, "o", "INFO:onefuzz:getting initial data\r\n"]
|
||||
[27.576861, "o", "INFO:onefuzz:rendering\r\n"]
|
||||
[27.579015, "o", "\u001b[?1049h\u001b[22;0;0t\u001b[1;44r\u001b(B\u001b[m\u001b[4l\u001b[?7h\u001b[?1h\u001b="]
|
||||
[27.579566, "o", "\u001b[39;49m\u001b[?1h\u001b=\u001b[?25l"]
|
||||
[27.579815, "o", "\u001b[?1000h"]
|
||||
[27.580304, "o", "\u001b[39;49m\u001b[37m\u001b[40m\u001b[H\u001b[2J"]
|
||||
[27.584135, "o", "\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[H\u001b[2J"]
|
||||
[27.62303, "o", "\u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[1;1H┌──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐\u001b[2;1H│\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m11:26:53 | https://onefuzz-bmc.azurewebsites.net \u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m│\u001b[3;1H│───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────"]
|
||||
[27.623475, "o", "───────────────────────│\u001b[4;1H│\u001b(B\u001b[m\u001b[1m\u001b[37m\u001b[40mPools: 1 \u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m│\u001b[5;1H│\u001b[30m\u001b[47mUpdated Pool Name OS State Nodes \u001b[37m\u001b[40m│\u001b[6;1H│\u001b(B\u001b[m\u001b[1m\u001b[37m\u001b[40m11:26:52 0df4dea3 ubuntu linux running Busy:4 Free:16 \u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m│\u001b[7;1H│──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────"]
|
||||
[27.623756, "o", "────────────────│\u001b[8;1H│\u001b(B\u001b[m\u001b[1m\u001b[37m\u001b[40mJobs: 2 \u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m│\u001b[9;1H│\u001b[30m\u001b[47mUpdated State Job Name Files \u001b[37m\u001b[40m│\u001b[10;1H│11:26:53 enabled 9b0f7765 my-project:my-target:build-1 │\u001b[11;1H│11:26:52 enabled 97b7e623 my-project:png:build-1 │\u001b[12;1H│──────────────────────────────────────────────────────────────────────────────────────────────"]
|
||||
[27.623969, "o", "────────────────────────────────────────────────│\u001b[13;1H│\u001b(B\u001b[m\u001b[1m\u001b[37m\u001b[40mTasks: 6 \u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m│\u001b[14;1H│\u001b[30m\u001b[47mUpdated State Job Task Type Name Files Pool Time left \u001b[37m\u001b[40m│\u001b[15;1H│11:26:53 waiting 9b0f7765 e7a07275 libfuzzer_coverage fuzz.exe Setup:1 ubuntu │\u001b[16;1H│11:26:53 waiting 9b0f7765 6321d0d4 libfuzzer_fuzz fuzz.exe Setup:1 ubuntu │\u001b[17;1H│11:26:53 waiting 9b0f7765 077ef033 libfuzzer_crash_report fuzz.exe Setup:1 ubuntu │\u001b[18;1H│11:26:53 running 97b7e623 bd4f1b"]
|
||||
[27.624334, "o", "d3 libfuzzer_fuzz fuzz.exe ubuntu 23h48m │\u001b[19;1H│11:26:53 running 97b7e623 a755b4fd libfuzzer_crash_report fuzz.exe ubuntu 23h48m │\u001b[20;1H│11:26:53 running 97b7e623 6c292aef libfuzzer_coverage fuzz.exe ubuntu 23h48m │\u001b[21;1H│ │\u001b[22;1H│ │\u001b[23;1H│ │\u001b[24;1H│ "]
|
||||
[27.624509, "o", " │\u001b[25;1H│ │\u001b[26;1H│ │\u001b[27;1H│ │\u001b[28;1H│ │\u001b[29;1H│ │\u001b[30;1H│ │\u001b[31;1H│ "]
|
||||
[27.624701, "o", " │\u001b[32;1H│ │\u001b[33;1H│ │\u001b[34;1H│ │\u001b[35;1H│ │\u001b[36;1H│ │\u001b[37;1H│ │\u001b[38;1H│ "]
|
||||
[27.624866, "o", " │\u001b[39;1H│ │\u001b[40;1H│ │\u001b[41;1H│ │\u001b[42;1H│────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────"]
|
||||
[27.625147, "o", "──────│\u001b[43;1H│\u001b(B\u001b[m\u001b[1m\u001b[37m\u001b[40mPress `q` to quit or `r` to reorder. \u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m│\u001b[44;1H└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘"]
|
||||
[28.901289, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H4\u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[10;9H4\u001b[10;57HInputs:1\u001b[11;57HCrashes:20\u001b[11;68HInputs:13\u001b[30m\u001b[47m\u001b[14;77H \u001b[14;84H \u001b[14;89H Pool\u001b[14;98HTime\u001b[14;103Hleft\u001b[37m\u001b[40m\u001b[15;9H4\u001b[15;28H0\u001b[15;30H7ef033\u001b[15;48Hrash_r\u001b[15;55Hport\u001b[15;77H \u001b[15;91Hubuntu\u001b[16;69HInp\u001b[16;73Hts:1 Setup:1\u001b[16;91Hubuntu\u001b[17;28He\u001b[17;30Ha07275\u001b[17;48Hoverag\u001b[17;55H \u001b[17;69HInp\u001b[17;73Hts:1 Setup:1\u001b[17;91Hubuntu\u001b[18;69HCrashes:20 Inputs:13 \u001b[18;91Hubuntu\u001b[18;98H23h47m\u001b[19;69HCrashes:20 \u001b[19;84H \u001b[19;91Hubuntu\u001b[19;98H23h48m\u001b[20;69HCoverage:28 Inputs:13\u001b[20;91Hubuntu\u001b[20;98H23h48m"]
|
||||
[30.157224, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H5\u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[11;78HReports:20\u001b[11;89HUnique\u001b[11;96HReports:8\u001b[30m\u001b[47m\u001b[14;91H \u001b[14;98H \u001b[14;103H \u001b[14;116HPool\u001b[14;123HTime\u001b[14;128Hleft\u001b[37m\u001b[40m\u001b[15;91H \u001b[15;116Hubuntu\u001b[16;91H \u001b[16;116Hubuntu\u001b[17;91H \u001b[17;116Hubuntu\u001b[18;90HSet\u001b[18;94Hp:1\u001b[18;98H \u001b[18;116Hubuntu\u001b[18;123H23h47m\u001b[19;80HReports:20\u001b[19;91HSetup:1 Unique\u001b[19;106HReports:8\u001b[19;116Hubuntu\u001b[19;123H23h48m\u001b[20;91HSetup:1 \u001b[20;116Hubuntu\u001b[20;123H23h48m"]
|
||||
[31.407793, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H6"]
|
||||
[32.662745, "o", "\u001b[2;9H8"]
|
||||
[33.9119, "o", "\u001b[2;9H9"]
|
||||
[35.168969, "o", "\u001b[2;6H7\u001b[2;8H00"]
|
||||
[36.426082, "o", "\u001b[2;9H1"]
|
||||
[37.681452, "o", "\u001b[2;9H3"]
|
||||
[38.935982, "o", "\u001b[2;9H4"]
|
||||
[40.218532, "o", "\u001b[2;9H5"]
|
||||
[41.456909, "o", "\u001b[2;9H7\u001b(B\u001b[m\u001b(B\u001b[m\u001b[30m\u001b[47m\u001b[14;19H Job\u001b[14;28H Task\u001b[14;37H Type\u001b[14;60H Name\u001b[14;69H Files\u001b[14;116H Pool\u001b[14;123H Time left\u001b[37m\u001b[40m\u001b[15;6H7\u001b[15;8H06\u001b[15;11Hscheduled 9b0f7765 6321d0d4 libfuzzer_fuzz \u001b[15;60H fuzz.\u001b[15;68Hxe In\u001b[15;74Huts:1\u001b[15;80HSetup:1\u001b[15;116H \u001b[15;119Hbuntu\u001b[16;9H4\u001b[16;19H 9b0f7765 077ef033 libfuzzer_crash_report fuzz.\u001b[16;68Hxe Se\u001b[16;74Hup:1 \u001b[16;116H \u001b[16;119Hbuntu\u001b[17;19H 9b0f7765 e7a0\u001b[17;35H275 libfuzzer_coverage\u001b[17;60H fuzz.\u001b[17;68Hxe Inputs:1 Setup:1\u001b[17;116H \u001b[17;119Hbuntu\u001b[18;19H 9\u001b[18;23Hb7e623 bd4f1bd3 libfuzzer_fuzz\u001b[18;60H fuzz.\u001b[18;68Hxe Crashes:20 Inputs:13 Setup:1\u001b[18;116H \u001b[18;119Hbuntu 23h47m\u001b[19;19H 9\u001b[19;23Hb7e623 a755b4fd libfuzzer_crash_report fuzz.\u001b[19;68Hxe Crashes:20 Reports:20 Setup:1 Unique Reports:8 \u001b[19;119Hbuntu 23h48m\u001b[20;19H 9\u001b[20;23Hb7e623 6c\u001b[20;33H92aef libfuzzer_coverage\u001b[20;60H fuzz.\u001b[20;68Hxe Coverage:28 Inputs:13 Setup:1\u001b[20;116H \u001b[20;119Hbuntu 23h48m"]
|
||||
[42.71047, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H8"]
|
||||
[43.960634, "o", "\u001b[2;9H9"]
|
||||
[45.217632, "o", "\u001b[2;8H10\u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[19;129H7\u001b[20;129H7"]
|
||||
[46.47457, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H2"]
|
||||
[47.731243, "o", "\u001b[2;9H3\u001b[37m\u001b[6;46H5\u001b[6;54H5\u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[10;6H7\u001b[10;8H13\u001b[30m\u001b[47m\u001b[14;19HJob \u001b[14;28HTask \u001b[14;37HType \u001b[14;60HName \u001b[14;69HFiles \u001b[14;116HPool \u001b[14;123HTime left \u001b[37m\u001b[40m\u001b[15;8H13\u001b[15;11Hrunning 9b0f7765 6321d0\u001b[15;35H4 libfuzzer_fuzz \u001b[15;60Hfuzz.ex\u001b[15;68H Inputs:1 Setup:1 \u001b[15;116Hub\u001b[15;119Hntu \u001b[16;19H9b0f7765 077ef033 libfuzzer_crash_report fuzz.ex\u001b[16;68H Setup:1 \u001b[16;116Hub\u001b[16;119Hntu \u001b[17;19H9b0f7765 e7a072\u001b[17;35H5 libfuzzer_coverage \u001b[17;60Hfuzz.ex\u001b[17;68H Inputs:1 Setup:1 \u001b[17;116Hub\u001b[17;119Hntu \u001b[18;19H97b\u001b[18;23He623 bd4f1bd3 libfuzzer_fuzz \u001b[18;60Hfuzz.ex\u001b[18;68H Crashes:20 Inputs:13 Setup:1 \u001b[18;116Hub\u001b[18;119Hntu 23h47m \u001b[19;19H97b\u001b[19;23He623 a755b4fd libfuzzer_crash_report fuzz.ex\u001b[19;68H Crashes:20 Reports:20 Setup:1 Unique Reports:8 ub\u001b[19;119Hntu 23h47m \u001b[20;19H97b\u001b[20;23He623 6c29\u001b[20;33Haef libfuzzer_coverage \u001b[20;60Hfuzz.ex\u001b[20;68H Coverage:28 Inputs:13 Setup:1 \u001b[20;116Hub\u001b[20;119Hntu 23h47m "]
|
||||
[48.984219, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H4\u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[15;123H23h59m"]
|
||||
[50.233664, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H5\u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[10;9H5\u001b[10;20H7b7e623\u001b[10;39Hpng:build-1 \u001b[10;57HCrashes:21\u001b[10;68HInputs:13\u001b[10;78HReports:20\u001b[10;89HUnique\u001b[10;96HReports:8\u001b[11;6H7\u001b[11;8H15\u001b[11;20Hb0f7765\u001b[11;39Hmy-target:build-1\u001b[11;65H1 Inputs:6 \u001b[11;78H \u001b[11;89H \u001b[11;96H \u001b[15;9H5\u001b[15;20H7b7e623\u001b[15;28Hbd4f1b\u001b[15;35H3\u001b[15;69HCrashes:21 In\u001b[15;83Huts:13\u001b[15;90HSetup:1\u001b[15;126H47\u001b[16;6H7\u001b[16;8H15\u001b[16;11Hrunn\u001b[16;20H7b7e623\u001b[16;28Ha\u001b[16;30H55b4fd\u001b[16;69HCrashes:21\u001b[16;80HReports:20\u001b[16;91HSetup:1\u001b[16;99HUnique\u001b[16;106HReports:8\u001b[16;123H23h47m\u001b[17;6H7\u001b[17;8H15\u001b[17;76H6\u001b[18;6H7\u001b[18;8H15\u001b[18;20Hb0f7765\u001b[18;28H6321d0\u001b[18;35H4\u001b[18;77H1 Inputs:6 Setup:1 \u001b[18;126H59\u001b[19;6H7\u001b[19;8H15\u001b[19;11Hwait\u001b[19;20Hb0f7765\u001b[19;28H0\u001b[19;30H7ef033\u001b[19;77H1 Setup:1 \u001b[19;91H \u001b[19;99H \u001b[19;106H \u001b[19;123H "]
|
||||
[51.490071, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H7"]
|
||||
[52.747432, "o", "\u001b[2;9H8"]
|
||||
[54.003661, "o", "\u001b[2;9H9\u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[10;9H8\u001b[10;87H1\u001b[15;9H8\u001b[15;28Ha755b4fd\u001b[15;47Hcrash_report\u001b[15;80HRe\u001b[15;83Horts:21 Setup:1\u001b[15;99HUnique\u001b[15;106HReports:8\u001b[16;28Hbd4f1bd3\u001b[16;47Hfuzz \u001b[16;80HIn\u001b[16;83Huts:13 Setup:1 \u001b[16;99H \u001b[16;106H "]
|
||||
[55.257568, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;8H20"]
|
||||
[56.512493, "o", "\u001b[2;9H2"]
|
||||
[57.763478, "o", "\u001b[2;9H3\u001b(B\u001b[m\u001b(B\u001b[m\u001b[30m\u001b[47m\u001b[14;19H Job\u001b[14;28H Task\u001b[14;37H Type\u001b[14;60H Name\u001b[14;69H Files\u001b[14;116H Pool\u001b[14;123H Time left\u001b[37m\u001b[40m\u001b[15;8H22\u001b[15;11Hscheduled 9b0f7765 e7a07275 libfuzzer_cove\u001b[15;54Hage \u001b[15;60H fuzz.\u001b[15;68Hxe Inputs:6\u001b[15;80HS\u001b[15;82Htup:1 \u001b[15;91H \u001b[15;99H \u001b[15;106H \u001b[15;116H \u001b[15;119Hbuntu \u001b[16;8H22\u001b[16;11Hscheduled 9b0f7765 077ef033 libfuzzer_crash_report fuzz.\u001b[16;68Hxe Crashes:1 Setup\u001b[16;88H \u001b[16;90H \u001b[16;116H \u001b[16;119Hbuntu \u001b[17;9H8\u001b[17;11Hrunn\u001b[17;19H 97b\u001b[17;25He623 \u001b[17;31H755b4fd libfuzzer_crash_report fuzz.\u001b[17;68Hxe Cra\u001b[17;75Hhes:21 Reports:21\u001b[17;93HSetup:1\u001b[17;101HUnique\u001b[17;108HReports:8 \u001b[17;119Hbuntu\u001b[17;125H23h47m\u001b[18;19H 97b\u001b[18;25He623 bd4f1bd3 libfuzzer_fuzz\u001b[18;60H fuzz.\u001b[18;68Hxe Crashes:21 Inputs:13 Setup:1\u001b[18;116H \u001b[18;119Hbuntu 23h47m\u001b[19;11Hrunn\u001b[19;19H 9b0f7765 6321d0d4 libfuzzer_fuzz \u001b[19;60H fuzz.\u001b[19;68Hxe Crashes:1 In\u001b[19;84Huts:6\u001b[19;90HSetup:1\u001b[19;116H \u001b[19;119Hbuntu\u001b[19;125H23h59m\u001b[20;19H 9"]
|
||||
[57.763953, "o", "\u001b[20;23Hb7e623 6c\u001b[20;33H92aef libfuzzer_coverage\u001b[20;60H fuzz.\u001b[20;68Hxe Coverage:28 Inputs:13 Setup:1\u001b[20;116H \u001b[20;119Hbuntu 23h47m"]
|
||||
[59.018799, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H4"]
|
||||
[60.27634, "o", "\u001b[2;9H5\u001b[37m\u001b[6;46H6\u001b[6;54H3\u001b[6;56HReady:1\u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[15;9H5\u001b[15;11Hrunning \u001b[15;30H0\u001b[15;32H7ef033\u001b[15;50Hrash_r\u001b[15;57Hport\u001b[15;71HCrashes:1 Setup:1\u001b[15;125H23h59m\u001b[16;30He\u001b[16;32Ha07275\u001b[16;50Hoverag\u001b[16;57H \u001b[16;71HInputs:6 Setup:1 "]
|
||||
[61.531771, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;9H7"]
|
||||
[62.786964, "o", "\u001b[2;9H8"]
|
||||
[64.044668, "o", "\u001b[2;9H9\u001b[37m\u001b[6;46H7\u001b[6;56H \u001b(B\u001b[m\u001b(B\u001b[m\u001b[37m\u001b[40m\u001b[10;8H2\u001b[10;20Hb0f7765\u001b[10;39Hmy-target:build-1\u001b[10;65H1 Inputs:6 Reports:1 Unique R\u001b[10;95Hports:1 \u001b[11;9H8\u001b[11;20H7b7e623\u001b[11;39Hpng:build-1 \u001b[11;65H21 Inputs:13\u001b[11;78HReports:21\u001b[11;89HUnique\u001b[11;96HReports:8\u001b[30m\u001b[47m\u001b[14;19HJob \u001b[14;28HTask \u001b[14;37HType \u001b[14;60HName \u001b[14;69HFiles \u001b[14;116HPool \u001b[14;123HTime left \u001b[37m\u001b[40m\u001b[15;9H8\u001b[15;19H9b0f7765 077ef033 libfuzzer_crash_report fuzz.ex\u001b[15;68H Crashes:1 Reports\u001b[15;89HSetup:1\u001b[15;97HUnique\u001b[15;104HReports:1\u001b[15;116Hub\u001b[15;119Hntu 23h59m \u001b[16;9H8\u001b[16;11Hrunning 9b0f7765 e7a072\u001b[16;35H5 libfuzzer_coverage \u001b[16;60Hfuzz.ex\u001b[16;68H Inputs:6 Setup:1 \u001b[16;116Hub\u001b[16;119Hntu 23h59m\u001b[17;19H97b\u001b[17;23He623 a755b4fd libfuzzer_crash_report fuzz.ex\u001b[17;68H Crashes:21 Reports:21 Setup:1 Unique Reports:8 ub\u001b[17;119Hntu 23h47m \u001b[18;19H97b\u001b[18;23He623 bd4f1bd3 libfuzzer_fuzz \u001b[18;60Hfuzz.ex\u001b[18;68H Crashes:21 Inputs:13 Setup:1 \u001b[18;116Hub\u001b[18;119Hntu 23h47m \u001b[19;19H9b0f7765 6321d0\u001b[19;35H4 lib"]
|
||||
[64.0451, "o", "fuzzer_fuzz \u001b[19;60Hfuzz.ex\u001b[19;68H Crashes:1 Inputs:6 Setup:1 \u001b[19;116Hub\u001b[19;119Hntu 23h59m \u001b[20;19H97b\u001b[20;23He623 6c29\u001b[20;33Haef libfuzzer_coverage \u001b[20;60Hfuzz.ex\u001b[20;68H Coverage:28 Inputs:13 Setup:1 \u001b[20;116Hub\u001b[20;119Hntu 23h47m "]
|
||||
[65.291799, "o", "\u001b(B\u001b[m\u001b[1m\u001b[30m\u001b[40m\u001b[2;8H30"]
|
||||
[66.548584, "o", "\u001b[2;9H2"]
|
||||
[67.805555, "o", "\u001b[2;9H3"]
|
||||
[68.175559, "o", "\u001b[?1l\u001b>\u001b[?1000l\u001b[39;49m\u001b[44d\u001b[K\u001b[44;1H\u001b[?12l\u001b[?25h\u001b[?1049l\u001b[23;0;0t\r\u001b[?1l\u001b>"]
|
||||
[68.273611, "o", "$ "]
|
||||
[71.138842, "o", "onefuzz "]
|
||||
[71.139354, "o", "jobs con"]
|
||||
[71.139829, "o", "tainers list"]
|
||||
[72.012811, "o", " "]
|
||||
[72.740499, "o", "9"]
|
||||
[73.362476, "o", "b"]
|
||||
[73.641848, "o", "0"]
|
||||
[74.26816, "o", "\r\n"]
|
||||
[75.578752, "o", "{\r\n \"oft-unique-reports-14b8ea05ca635426bd9ccf3ee71b2e45\": [\r\n \"d2ff45d86062f82735a7637c7249b6d09450745d6d92da7085dd2233534a7484.json\"\r\n ]\r\n}\r\n"]
|
||||
[75.678752, "o", "$ "]
|
||||
[80.379505, "o", "onefuzz containers files get oft-unique-reports-14b8ea05ca635426bd9ccf3ee71b2e45 d2ff45d86062f82735a7637c7249b6d09450745d6d92da7085dd2233534a7484.json"]
|
||||
[80.627734, "o", " "]
|
||||
[81.065474, "o", "|"]
|
||||
[81.161622, "o", " "]
|
||||
[81.396816, "o", "j"]
|
||||
[81.505685, "o", "q"]
|
||||
[81.841999, "o", "\r\n"]
|
||||
[82.11295, "o", "\u001b[1;39m{\r\n \u001b[0m\u001b[34;1m\"input_sha256\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"a7392651c2f148dfe49e6f74f8f72b93742e4a3c3e1109233a9313d119ee7161\"\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1m\"input_blob\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m{\r\n \u001b[0m\u001b[34;1m\"account\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"fuzzgqrosqd323lzw\"\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1m\"container\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"oft-crashes-14b8ea05ca635426bd9ccf3ee71b2e45\"\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1m\"name\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"crash-cbe07cd3ebaba15610ecd968c4e4a04c6643e534\"\u001b[0m\u001b[1;39m\r\n \u001b[1;39m}\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1m\"executable\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"setup/fuzz.exe\"\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1m\"crash_type\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"heap-use-after-free\"\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1m\"crash_site\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"AddressSanitizer: heap-use-after-free /home/bcaswell/projects/onefuzz/new-demo/fuzz.c:45:51 in LLVMFuzzerTestOneInput\"\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1m\"call_stack\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[1;39m[\r\n \u001b[0;32m\"#0 0x54e24c in LLVMFuzzerTestOneInput /home/bcaswell/projects/onefuzz/new-demo/fuzz.c:45:51\"\u001b[0m\u001b[1;3"]
|
||||
[82.113372, "o", "9m,\r\n \u001b[0;32m\"#1 0x458671 in fuzzer::Fuzzer::ExecuteCallback(unsigned char const*, unsigned long) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x458671)\"\u001b[0m\u001b[1;39m,\r\n \u001b[0;32m\"#2 0x443de2 in fuzzer::RunOneTest(fuzzer::Fuzzer*, char const*, unsigned long) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x443de2)\"\u001b[0m\u001b[1;39m,\r\n \u001b[0;32m\"#3 0x449896 in fuzzer::FuzzerDriver(int*, char***, int (*)(unsigned char const*, unsigned long)) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x449896)\"\u001b[0m\u001b[1;39m,\r\n \u001b[0;32m\"#4 0x472552 in main (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x472552)\"\u001b[0m\u001b[1;39m,\r\n \u001b[0;32m\"#5 0x7ffff6a9bb96 in __libc_start_main /build/glibc-2ORdQG/glibc-2.27/csu/../csu/libc-start.c:310\"\u001b[0m\u001b[1;39m,\r\n \u001b[0;32m\"#6 0x41e4ad in _start (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x41e4ad)\"\u001b[0m\u001b[1;39m\r\n \u001b[1;39m]\u001b[0m\u001b[1;39m,\r\n"]
|
||||
[82.113665, "o", " \u001b[0m\u001b[34;1m\"call_stack_sha256\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"d2ff45d86062f82735a7637c7249b6d09450745d6d92da7085dd2233534a7484\"\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1m\"asan_log\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"=================================================================\\n==6735==ERROR: AddressSanitizer: heap-use-after-free on address 0x6020000000d0 at pc 0x00000054e24d bp 0x7fffffffe330 sp 0x7fffffffe328\\nWRITE of size 4 at 0x6020000000d0 thread T0\\n #0 0x54e24c in LLVMFuzzerTestOneInput /home/bcaswell/projects/onefuzz/new-demo/fuzz.c:45:51\\n #1 0x458671 in fuzzer::Fuzzer::ExecuteCallback(unsigned char const*, unsigned long) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x458671)\\n #2 0x443de2 in fuzzer::RunOneTest(fuzzer::Fuzzer*, char const*, unsigned long) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x443de2)\\n #3 0x449896 in fuzzer::FuzzerDriver(int*, char***, int (*)(unsigned char const*, unsigned long)) (/onefuzz/blob-containers/oft-setup-f3de2c2d"]
|
||||
[82.114009, "o", "8fbf536c918a3cb647e92336/fuzz.exe+0x449896)\\n #4 0x472552 in main (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x472552)\\n #5 0x7ffff6a9bb96 in __libc_start_main /build/glibc-2ORdQG/glibc-2.27/csu/../csu/libc-start.c:310\\n #6 0x41e4ad in _start (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x41e4ad)\\n\\n0x6020000000d0 is located 0 bytes inside of 4-byte region [0x6020000000d0,0x6020000000d4)\\nfreed by thread T0 here:\\n #0 0x51df5d in free (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x51df5d)\\n #1 0x54e204 in LLVMFuzzerTestOneInput /home/bcaswell/projects/onefuzz/new-demo/fuzz.c:45:39\\n #2 0x458671 in fuzzer::Fuzzer::ExecuteCallback(unsigned char const*, unsigned long) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x458671)\\n #3 0x443de2 in fuzzer::RunOneTest(fuzzer::Fuzzer*, char const*, unsigned long) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb"]
|
||||
[82.114192, "o", "647e92336/fuzz.exe+0x443de2)\\n #4 0x449896 in fuzzer::FuzzerDriver(int*, char***, int (*)(unsigned char const*, unsigned long)) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x449896)\\n #5 0x472552 in main (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x472552)\\n #6 0x7ffff6a9bb96 in __libc_start_main /build/glibc-2ORdQG/glibc-2.27/csu/../csu/libc-start.c:310\\n\\npreviously allocated by thread T0 here:\\n #0 0x51e1dd in malloc (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x51e1dd)\\n #1 0x54e1ee in LLVMFuzzerTestOneInput /home/bcaswell/projects/onefuzz/new-demo/fuzz.c:45:18\\n #2 0x458671 in fuzzer::Fuzzer::ExecuteCallback(unsigned char const*, unsigned long) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x458671)\\n #3 0x443de2 in fuzzer::RunOneTest(fuzzer::Fuzzer*, char const*, unsigned long) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz"]
|
||||
[82.114383, "o", ".exe+0x443de2)\\n #4 0x449896 in fuzzer::FuzzerDriver(int*, char***, int (*)(unsigned char const*, unsigned long)) (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x449896)\\n #5 0x472552 in main (/onefuzz/blob-containers/oft-setup-f3de2c2d8fbf536c918a3cb647e92336/fuzz.exe+0x472552)\\n #6 0x7ffff6a9bb96 in __libc_start_main /build/glibc-2ORdQG/glibc-2.27/csu/../csu/libc-start.c:310\\n\\nSUMMARY: AddressSanitizer: heap-use-after-free /home/bcaswell/projects/onefuzz/new-demo/fuzz.c:45:51 in LLVMFuzzerTestOneInput\\nShadow bytes around the buggy address:\\n 0x0c047fff7fc0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\\n 0x0c047fff7fd0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\\n 0x0c047fff7fe0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\\n 0x0c047fff7ff0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\\n 0x0c047fff8000: fa fa 00 00 fa fa 00 fa fa fa 00 fa fa fa 00 fa\\n=>0x0c047fff8010: fa fa 04 fa fa fa 04 fa fa fa[fd]fa fa fa fa fa\\n 0x0c047fff8020: fa fa fa fa "]
|
||||
[82.114669, "o", "fa fa fa fa fa fa fa fa fa fa fa fa\\n 0x0c047fff8030: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa\\n 0x0c047fff8040: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa\\n 0x0c047fff8050: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa\\n 0x0c047fff8060: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa\\nShadow byte legend (one shadow byte represents 8 application bytes):\\n Addressable: 00\\n Partially addressable: 01 02 03 04 05 06 07 \\n Heap left redzone: fa\\n Freed heap region: fd\\n Stack left redzone: f1\\n Stack mid redzone: f2\\n Stack right redzone: f3\\n Stack after return: f5\\n Stack use after scope: f8\\n Global redzone: f9\\n Global init order: f6\\n Poisoned by user: f7\\n Container overflow: fc\\n Array cookie: ac\\n Intra object redzone: bb\\n ASan internal: fe\\n Left alloca redzone: ca\\n Right alloca redzone: cb\\n Shadow gap: cc\\n==6735==ABORTING\\n\"\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1"]
|
||||
[82.11504, "o", "m\"task_id\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"077ef033-a7a3-4d0f-9b0d-e9f8aa517a03\"\u001b[0m\u001b[1;39m,\r\n \u001b[0m\u001b[34;1m\"job_id\"\u001b[0m\u001b[1;39m: \u001b[0m\u001b[0;32m\"9b0f7765-d9f2-4906-9e98-1678da9b929a\"\u001b[0m\u001b[1;39m\r\n\u001b[1;39m}\u001b[0m\r\n$ "]
|
||||
[82.11504, "o", "\r\n$ "]
|
||||
[84.11504, "o", "\r\n$ "]
|
BIN
docs/screencasts/launching-job.gif
Normal file
BIN
docs/screencasts/launching-job.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.3 MiB |
109
docs/screencasts/live-debugging.cast
Normal file
109
docs/screencasts/live-debugging.cast
Normal file
@ -0,0 +1,109 @@
|
||||
{"version": 2, "width": 138, "height": 40, "timestamp": 1600440726, "env": {"SHELL": "/bin/bash", "TERM": "xterm-256color"}}
|
||||
[0.486271, "o", "$ "]
|
||||
[4.486298, "o", "onefuzz repro create_and_connect oft-unique-reports-33cef15c24a25bd5b99d145f8a5a40df 2115b791a84344e0bf7f9de130fcfcc31702c6b00fcb0b88266074e78ed4280a.json"]
|
||||
[5.486312, "o", "\r\n"]
|
||||
[6.259186, "o", "INFO:onefuzz:creating repro vm: oft-unique-reports-33cef15c24a25bd5b99d145f8a5a40df 2115b791a84344e0bf7f9de130fcfcc31702c6b00fcb0b88266074e78ed4280a.json (24 hours)\r\n"]
|
||||
[7.259481, "o", "INFO:onefuzz:connecting to reproduction VM: e93cc915-acbe-4fed-bcef-0bda9ce142da\r\n"]
|
||||
[7.699319, "o", "- launching reproducing vm. current state: VmState.init"]
|
||||
[8.699934, "o", "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\\ launching reproducing vm. current state: VmState.init"]
|
||||
[9.699986, "o", "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b| launching reproducing vm. current state: VmState.init"]
|
||||
[10.699989, "o", "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b/ launching reproducing vm. current state: VmState.init"]
|
||||
[11.699943, "o", "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b- launching reproducing vm. current state: VmState.init"]
|
||||
[12.699937, "o", "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\\ launching reproducing vm. current state: VmState.init"]
|
||||
[13.699973, "o", "\r\n- launching reproducing vm. current state: VmState.extensions_launch"]
|
||||
[14.699878, "o", "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\\ launching reproducing vm. current state: VmState.extensions_launch"]
|
||||
[15.539874, "o", "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b| launching reproducing vm. current state: VmState.extensions_launch"]
|
||||
[16.699867, "o", "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b/ launching reproducing vm. current state: VmState.extensions_launch"]
|
||||
[17.699889, "o", "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b- launching reproducing vm. current state: VmState.extensions_launch"]
|
||||
[18.699856, "o", "\r\n"]
|
||||
[19.70002, "o", "Warning: Permanently added '13.72.68.30' (ECDSA) to the list of known hosts.\r\r\n"]
|
||||
[20.699901, "o", "stdin/stdout redirected\r\nProcess /onefuzz/setup/fuzz.exe created; pid = 12803\r\nRemote debugging using stdio\r\n"]
|
||||
[21.004707, "o", "Reading /onefuzz/setup/fuzz.exe from remote target...\r\nwarning: File transfers from remote targets can be slow. Use \"set sysroot\" to access files locally instead.\r\n"]
|
||||
[21.691959, "o", "Reading /onefuzz/setup/fuzz.exe from remote target...\r\n"]
|
||||
[22.380059, "o", "Reading symbols from \u001b[32mtarget:/onefuzz/setup/fuzz.exe\u001b[m...\r\n"]
|
||||
[23.380333, "o", "Reading /lib64/ld-linux-x86-64.so.2 from remote target...\r\n"]
|
||||
[23.921515, "o", "Reading /lib64/ld-linux-x86-64.so.2 from remote target...\r\n"]
|
||||
[24.811632, "o", "Reading /lib64/ld-2.27.so from remote target...\r\n"]
|
||||
[24.822009, "o", "Reading /lib64/.debug/ld-2.27.so from remote target...\r\n"]
|
||||
[24.832567, "o", "Reading /usr/lib/debug//lib64/ld-2.27.so from remote target...\r\n"]
|
||||
[24.843205, "o", "Reading /usr/lib/debug/lib64//ld-2.27.so from remote target...\r\n"]
|
||||
[24.855402, "o", "Reading target:/usr/lib/debug/lib64//ld-2.27.so from remote target...\r\n"]
|
||||
[25.85645, "o", "\u001b[34m0x00007ffff7dd6090\u001b[m in \u001b[33m??\u001b[m () from \u001b[32mtarget:/lib64/ld-linux-x86-64.so.2\u001b[m\r\n"]
|
||||
[25.909391, "o", "(gdb) "]
|
||||
[26.910464, "o", "i"]
|
||||
[26.984938, "o", "n"]
|
||||
[27.085719, "o", "f"]
|
||||
[27.173557, "o", "o"]
|
||||
[27.253199, "o", " "]
|
||||
[27.396717, "o", "r"]
|
||||
[27.464192, "o", "e"]
|
||||
[27.6575, "o", "g"]
|
||||
[27.819461, "o", " "]
|
||||
[28.254304, "o", "r"]
|
||||
[28.375054, "o", "i"]
|
||||
[28.423041, "o", "p"]
|
||||
[28.729254, "o", "\r\nrip 0x7ffff7dd6090 0x7ffff7dd6090\r\n(gdb) "]
|
||||
[29.48915, "o", "b"]
|
||||
[29.656765, "o", "t"]
|
||||
[29.853383, "o", "\r\n"]
|
||||
[29.874832, "o", "#0 \u001b[34m0x00007ffff7dd6090\u001b[m in \u001b[33m??\u001b[m () from \u001b[32mtarget:/lib64/ld-linux-x86-64.so.2\u001b[m\r\n#1 \u001b[34m0x0000000000000002\u001b[m in \u001b[33m??\u001b[m ()\r\n"]
|
||||
[29.885248, "o", "#2 \u001b[34m0x00007fffffffee1e\u001b[m in \u001b[33m??\u001b[m ()\r\n"]
|
||||
[29.895946, "o", "#3 \u001b[34m0x00007fffffffee36\u001b[m in \u001b[33m??\u001b[m ()\r\n"]
|
||||
[29.906934, "o", "#4 \u001b[34m0x0000000000000000\u001b[m in \u001b[33m??\u001b[m ()\r\n"]
|
||||
[29.917713, "o", "(gdb) "]
|
||||
[30.918859, "o", "c"]
|
||||
[31.163906, "o", "\r\nContinuing.\r\n"]
|
||||
[31.463871, "o", "Reading /usr/lib/x86_64-linux-gnu/libstdc++.so.6 from remote target...\r\n"]
|
||||
[32.026918, "o", "Reading /lib/x86_64-linux-gnu/libpthread.so.0 from remote target...\r\n"]
|
||||
[32.689075, "o", "Reading /lib/x86_64-linux-gnu/librt.so.1 from remote target...\r\n"]
|
||||
[33.222312, "o", "Reading /lib/x86_64-linux-gnu/libm.so.6 from remote target...\r\n"]
|
||||
[33.774431, "o", "Reading /lib/x86_64-linux-gnu/libdl.so.2 from remote target...\r\n"]
|
||||
[34.193007, "o", "Reading /lib/x86_64-linux-gnu/libgcc_s.so.1 from remote target...\r\n"]
|
||||
[34.664719, "o", "Reading /lib/x86_64-linux-gnu/libc.so.6 from remote target...\r\n"]
|
||||
[35.665226, "o", "Reading /usr/lib/x86_64-linux-gnu/119a44a99758114620c8e9d8e243d7094f77f6.debug from remote target...\r\n"]
|
||||
[35.675005, "o", "Reading /usr/lib/x86_64-linux-gnu/.debug/119a44a99758114620c8e9d8e243d7094f77f6.debug from remote target...\r\n"]
|
||||
[35.685585, "o", "Reading /usr/lib/debug//usr/lib/x86_64-linux-gnu/119a44a99758114620c8e9d8e243d7094f77f6.debug from remote target...\r\n"]
|
||||
[35.696089, "o", "Reading /usr/lib/debug/usr/lib/x86_64-linux-gnu//119a44a99758114620c8e9d8e243d7094f77f6.debug from remote target...\r\n"]
|
||||
[35.7069, "o", "Reading target:/usr/lib/debug/usr/lib/x86_64-linux-gnu//119a44a99758114620c8e9d8e243d7094f77f6.debug from remote target...\r\n"]
|
||||
[36.421278, "o", "Reading /lib/x86_64-linux-gnu/3c06107774266c5f7db3f1f380a3da68af90fa.debug from remote target...\r\n"]
|
||||
[36.431594, "o", "Reading /lib/x86_64-linux-gnu/.debug/3c06107774266c5f7db3f1f380a3da68af90fa.debug from remote target...\r\n"]
|
||||
[36.443459, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/3c06107774266c5f7db3f1f380a3da68af90fa.debug from remote target...\r\n"]
|
||||
[36.454684, "o", "Reading /usr/lib/debug/lib/x86_64-linux-gnu//3c06107774266c5f7db3f1f380a3da68af90fa.debug from remote target...\r\n"]
|
||||
[36.465413, "o", "Reading target:/usr/lib/debug/lib/x86_64-linux-gnu//3c06107774266c5f7db3f1f380a3da68af90fa.debug from remote target...\r\n"]
|
||||
[37.466395, "o", "Reading /lib/x86_64-linux-gnu/librt-2.27.so from remote target...\r\n"]
|
||||
[37.541517, "o", "Reading /lib/x86_64-linux-gnu/.debug/librt-2.27.so from remote target...\r\n"]
|
||||
[37.552108, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/librt-2.27.so from remote target...\r\n"]
|
||||
[38.136906, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/librt-2.27.so from remote target...\r\n"]
|
||||
[39.137279, "o", "Reading /lib/x86_64-linux-gnu/libm-2.27.so from remote target...\r\n"]
|
||||
[39.211292, "o", "Reading /lib/x86_64-linux-gnu/.debug/libm-2.27.so from remote target...\r\n"]
|
||||
[39.222115, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/libm-2.27.so from remote target...\r\n"]
|
||||
[40.223221, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/libm-2.27.so from remote target...\r\n"]
|
||||
[41.223275, "o", "Reading /lib/x86_64-linux-gnu/libdl-2.27.so from remote target...\r\n"]
|
||||
[41.296869, "o", "Reading /lib/x86_64-linux-gnu/.debug/libdl-2.27.so from remote target...\r\n"]
|
||||
[41.313322, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/libdl-2.27.so from remote target...\r\n"]
|
||||
[41.793108, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/libdl-2.27.so from remote target...\r\n"]
|
||||
[42.793404, "o", "Reading /lib/x86_64-linux-gnu/9f3ae11120ec7c483bc9295345d836f5c104f7.debug from remote target...\r\n"]
|
||||
[42.803453, "o", "Reading /lib/x86_64-linux-gnu/.debug/9f3ae11120ec7c483bc9295345d836f5c104f7.debug from remote target...\r\n"]
|
||||
[42.813978, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/9f3ae11120ec7c483bc9295345d836f5c104f7.debug from remote target...\r\n"]
|
||||
[42.824794, "o", "Reading /usr/lib/debug/lib/x86_64-linux-gnu//9f3ae11120ec7c483bc9295345d836f5c104f7.debug from remote target...\r\n"]
|
||||
[42.835343, "o", "Reading target:/usr/lib/debug/lib/x86_64-linux-gnu//9f3ae11120ec7c483bc9295345d836f5c104f7.debug from remote target...\r\n"]
|
||||
[43.836391, "o", "Reading /lib/x86_64-linux-gnu/libc-2.27.so from remote target...\r\n"]
|
||||
[43.910735, "o", "Reading /lib/x86_64-linux-gnu/.debug/libc-2.27.so from remote target...\r\n"]
|
||||
[43.921498, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/libc-2.27.so from remote target...\r\n"]
|
||||
[44.922624, "o", "Reading /usr/lib/debug//lib/x86_64-linux-gnu/libc-2.27.so from remote target...\r\n"]
|
||||
[45.922651, "o", "INFO: Seed: 328165543\r\n"]
|
||||
[45.932855, "o", "INFO: Loaded 1 modules (21 inline 8-bit counters): 21 [0x5a4f00, 0x5a4f15), \r\nINFO: Loaded 1 PC tables (21 PCs): 21 [0x568f38,0x569088), \r\n"]
|
||||
[45.935994, "o", "/onefuzz/setup/fuzz.exe: Running 1 inputs 1 time(s) each.\r\nRunning: /onefuzz/downloaded/crash-a11f5abb8bc1caa83274938d63f15e9e9296d8d8\r\n"]
|
||||
[45.947635, "o", "[New Thread 12803.19062]\r\n\u001b[m--Type <RET> for more, q to quit, c to continue without paging--"]
|
||||
[46.948604, "o", "\r\n\r\n"]
|
||||
[47.062935, "o", "Thread 1 \"fuzz.exe\" received signal SIGSEGV, Segmentation fault.\r\n"]
|
||||
[47.16104, "o", "\u001b[34m0x000000000054df4a\u001b[m in \u001b[33mLLVMFuzzerTestOneInput\u001b[m (\u001b[36mdata\u001b[m=0x6020000000b0 \"xyz0;\", \u001b[36mlen\u001b[m=5) at \u001b[32mfuzz.c\u001b[m:20\r\n20\t int *p = NULL; *p = 123;\r\n(gdb) "]
|
||||
[48.162337, "o", "b"]
|
||||
[48.329104, "o", "t"]
|
||||
[48.455341, "o", "\r\n"]
|
||||
[48.499764, "o", "#0 \u001b[34m0x000000000054df4a\u001b[m in \u001b[33mLLVMFuzzerTestOneInput\u001b[m (\u001b[36mdata\u001b[m=0x6020000000b0 \"xyz0;\", \u001b[36mlen\u001b[m=5) at \u001b[32mfuzz.c\u001b[m:20\r\n#1 \u001b[34m0x0000000000458672\u001b[m in \u001b[33mfuzzer::Fuzzer::ExecuteCallback(unsigned char const*, unsigned long)\u001b[m ()\r\n"]
|
||||
[48.511975, "o", "#2 \u001b[34m0x0000000000443de3\u001b[m in \u001b[33mfuzzer::RunOneTest(fuzzer::Fuzzer*, char const*, unsigned long)\u001b[m ()\r\n"]
|
||||
[48.523664, "o", "#3 \u001b[34m0x0000000000449897\u001b[m in \u001b[33mfuzzer::FuzzerDriver(int*, char***, int (*)(unsigned char const*, unsigned long))\u001b[m ()\r\n"]
|
||||
[48.535194, "o", "#4 \u001b[34m0x0000000000472553\u001b[m in \u001b[33mmain\u001b[m ()\r\n"]
|
||||
[49.536261, "o", "\r\n"]
|
||||
[50.536301, "o", "(gdb) \r\n"]
|
BIN
docs/screencasts/live-debugging.gif
Normal file
BIN
docs/screencasts/live-debugging.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.8 MiB |
20
docs/ssh-config.md
Normal file
20
docs/ssh-config.md
Normal file
@ -0,0 +1,20 @@
|
||||
# SSH within OneFuzz
|
||||
|
||||
Onefuzz enables automatically connecting to fuzzing & crash repro nodes via SSH.
|
||||
Each VM and VM scale set has its own SSH key pair.
|
||||
|
||||
On Linux VMs, the public key is written to `~onefuzz/.ssh/authorized_keys`
|
||||
|
||||
For Windows VMs, the public key is written to
|
||||
`\$env:ProgramData\ssh\administrators_authorized_keys` following
|
||||
[Windows OpenSSH server guides](https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_server_configuration).
|
||||
|
||||
## OneFuzz cli handling keys
|
||||
|
||||
When using any of the SSH enabled components of the onefuzz CLI, the CLI will
|
||||
automatically fetch the key-pair for a VM as needed. The private key is written
|
||||
to a temporary directory and removed upon completion of the SSH command.
|
||||
|
||||
NOTE: As VMs and VM scale sets are intended to be short-lived and ephemeral, the
|
||||
onefuzz CLI configures SSH to not write to the user's known host file and
|
||||
ignores host key checking.
|
32
docs/supported-platforms.md
Normal file
32
docs/supported-platforms.md
Normal file
@ -0,0 +1,32 @@
|
||||
## Supported Platforms
|
||||
|
||||
OneFuzz is cross-platform, and the actively-supported platforms vary by component.
|
||||
|
||||
### CLI
|
||||
|
||||
We continuously test the CLI on Windows 10 Pro and Ubuntu 18.04 LTS, both on the
|
||||
x64 architecture. The CLI client is written in Python 3, and targets Python 3.7
|
||||
and up. We distribute a self-contained executable CLI build for Windows which
|
||||
bundles a Python interpreter.
|
||||
|
||||
### Virtual Machine Scale Sets
|
||||
|
||||
OneFuzz deploys targets into Azure Virtual Machine Scale Sets for fuzzing (and
|
||||
supporting tasks). OneFuzz permits arbitrary choice of VM SKU and OS Image,
|
||||
including custom images. We continuously test on Window 10 Pro x64 (using the
|
||||
Azure OS image URN `MicrosoftWindowsDesktop:Windows-10:rs5-pro:latest`)
|
||||
and Ubuntu 18.04 LTS x64 (using the Azure OS image URN
|
||||
`Canonical:UbuntuServer:18.04-LTS:latest`).
|
||||
|
||||
### LibFuzzer Compilation
|
||||
|
||||
LibFuzzer targets are built by linking the libFuzzer runtime to a test function,
|
||||
tied together with compiler-provided static instrumentation (sanitizers).
|
||||
The resulting executable has runtime options and output that can vary with
|
||||
the compiler and libFuzzer runtime used.
|
||||
|
||||
We actively support libFuzzer targets produced using the following compiler
|
||||
toolchains:
|
||||
|
||||
* LLVM 8 and up, Windows and Linux, x86 and x64
|
||||
* MSVC 16.8 and later that support x64 ASAN instrumentation
|
69
docs/tasks.md
Normal file
69
docs/tasks.md
Normal file
@ -0,0 +1,69 @@
|
||||
# Understanding Tasks
|
||||
|
||||
Tasks a unit of work that executes on a node (typically,
|
||||
[Azure VM Scalesets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview))
|
||||
are made up of a handful of components, primarily including:
|
||||
|
||||
1. An application under test
|
||||
1. Containers for use in specified contexts
|
||||
1. All tasks should have a `setup` container, which contains the application
|
||||
under test and optional a `setup.sh` or `setup.ps1` to customize the VM
|
||||
prior to fuzzing
|
||||
1. Input containers
|
||||
1. Output containers
|
||||
1. Optionally a managed
|
||||
[Azure Storage Queue](https://docs.microsoft.com/en-us/azure/storage/queues/storage-queues-introduction)
|
||||
of new inputs to process (Used for coverage, crash reporting, etc)
|
||||
|
||||
The current task types available are:
|
||||
|
||||
* libfuzzer_fuzz: fuzz with a libFuzzer target
|
||||
* libfuzzer_coverage: extract coverage from a libFuzzer target with the seeds
|
||||
* libfuzzer_crash_report: Execute the target with crashing inputs, attempting to
|
||||
generate an informational report for each discovered crash
|
||||
* libfuzzer_merge: merge newly discovered inputs with an input corpus using
|
||||
corpus minimization
|
||||
* generic_analysis: perform [custom analysis](custom-analysis.md) on every
|
||||
crashing input
|
||||
* generic_supervisor: fuzz using user-provided supervisors (such as AFL)
|
||||
* generic_merge: merge newly discovered inputs with an input corpus using a user
|
||||
provided supervisor (such as afl-merge)
|
||||
* generic_generator: use a generator to craft inputs and call the application
|
||||
under test iteratively to process them
|
||||
* generic_crash_report: use a built-in debugging tool (debugapi or ptrace based)
|
||||
to rerun the crashing input, attempting to generate an informational report
|
||||
for each discovered crash
|
||||
|
||||
Each type of task has a unique set of configuration options available, these
|
||||
include:
|
||||
|
||||
* target_exe: the application under test
|
||||
* target_env: User specified environment variables for the target.
|
||||
* target_options: User specified command line options for the target under test
|
||||
* target_workers: User specified number of workers to launch on a given VM (At
|
||||
this time, only used for `libfuzzer` fuzzing tasks)
|
||||
* target_options_merge: Enable merging supervisor and target arguments in
|
||||
supervisor based merge tasks
|
||||
* analyzer_exe: User specified analysis tool (See:
|
||||
[Custom Analysis Tasks](custom-analysis.md))
|
||||
* analyzer_env: User specified environment variables for the analysis tool
|
||||
* analyzer_options: User specified command line options for the analysis tool
|
||||
* generator_exe: User specified generator (such as radamsa.exe). The generator
|
||||
tool must exist in the task specified `generator` container
|
||||
* generator_env: User specified environment variables for the generator tool
|
||||
* generator_options: User specified command line options for the generator tool
|
||||
* supervisor_exe: User specified generator (such as afl)
|
||||
* supervisor_env: User specified environment variables for the supervisor
|
||||
* supervisor_options: User specified command line options for the supervisor
|
||||
* supervisor_input_marker: Marker to specify the path to the filename for
|
||||
supervisors (Example: for AFL, this should be '@@')
|
||||
* stats_file: Path to the fuzzer's stats file
|
||||
* stats_format: Format of the fuzzer's stats file
|
||||
* input_queue_from_container: Container name to monitor for new changes.
|
||||
* rename_output: Rename generated inputs to the sha256 of the input (used during
|
||||
generator tasks)
|
||||
* wait_for_files: For supervisor tasks (such as AFL), do not execute the
|
||||
supervisor until input files are available in the `inputs` container.
|
||||
|
||||
See [task definitions](../src/api-service/__app__/onefuzzlib/tasks/defs.py) for
|
||||
implementation level details on the types of tasks available.
|
181
docs/telemetry.md
Normal file
181
docs/telemetry.md
Normal file
@ -0,0 +1,181 @@
|
||||
# Telemetry
|
||||
|
||||
Onefuzz reports two types of telemetry, both via
|
||||
[AppInsights](https://docs.microsoft.com/en-us/azure/azure-monitor/app/app-insights-overview).
|
||||
|
||||
1. Onefuzz records fully featured attributable data is to a user-owned
|
||||
[AppInsights](https://docs.microsoft.com/en-us/azure/azure-monitor/app/app-insights-overview)
|
||||
instance. This goal of this information is to enable users to perform
|
||||
detailed analysis of their fuzzing tasks.
|
||||
1. Onefuzz reports non-attributable minimal set of runtime statistics to
|
||||
Microsoft via a Microsoft managed
|
||||
[AppInsights](https://docs.microsoft.com/en-us/azure/azure-monitor/app/app-insights-overview)
|
||||
instance. The goal is to provide insight to the efficacy of OneFuzz and
|
||||
fuzzing engines used in OneFuzz. Information regarding the the users of a
|
||||
OneFuzz instance, any applications under test, or any bug details found via
|
||||
fuzzing is not intended to be recorded in in this telemetry.
|
||||
|
||||
## Who owns OneFuzz Resources
|
||||
|
||||
For the purposes of this document, a "OneFuzz instance" is a user-deployed
|
||||
install of OneFuzz in that user's
|
||||
[Azure Subscription](https://docs.microsoft.com/en-us/azure/cloud-adoption-framework/decision-guides/subscriptions/).
|
||||
|
||||
The user owns and manages all resources used for OneFuzz, including the fuzzing
|
||||
nodes. Onefuzz supports both "managed" nodes, where OneFuzz orchestrates the
|
||||
lifecycle of the fuzzing nodes via
|
||||
[Azure VM Scale Sets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview),
|
||||
and "unmanaged" nodes, where users provide compute however they wish (be that
|
||||
on-premise hardware, third-party clouds, etc).
|
||||
|
||||
## How telemetry is collected
|
||||
|
||||
All telemetry is gathered from two places, the agents that run within fuzzing
|
||||
nodes and the service API running in the Azure Functions instance.
|
||||
|
||||
1. The rust library [onefuzz::telemetry](../src/agent/onefuzz/src/telemetry.rs)
|
||||
provides a detailed set of telemetry types, as well as the function
|
||||
`can_share`, which gates if a given telemetry field should be sent to the
|
||||
Microsoft central telemetry instance.
|
||||
1. The Python library
|
||||
[onefuzzlib.telemetry](../src/api-service/__app__/onefuzzlib/telemetry.py)
|
||||
provides a filtering mechanism to identify a per-object set of filtering
|
||||
records. Each ORM backed table provides a mechanism to identify the field
|
||||
should be sent to the Microsoft central telemetry instance. Example: The
|
||||
[onefuzzlib.jobs.Job.telemetry_include](../src/api-service/__app__/onefuzzlib/jobs.py)
|
||||
implementation describes the set of fields that are to be recorded.
|
||||
|
||||
These mechanisms ensure that any only fields intended to be recorded are sent to
|
||||
the central telemetry service.
|
||||
|
||||
## How to disable sending telemetry to Microsoft
|
||||
|
||||
Remove `ONEFUZZ_TELEMETRY` in the
|
||||
[Application settings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-how-to-use-azure-function-app-settings#settings)
|
||||
of the Azure Functions instance in the OneFuzz instance created during
|
||||
deployment.
|
||||
|
||||
Users are reminded of how to disable the telemetry during each OneFuzz
|
||||
deployment to Azure.
|
||||
|
||||
## Data sent to Microsoft
|
||||
|
||||
The following describes the information sent to Microsoft if telemetry is enabled.
|
||||
|
||||
### Definitions of common data types
|
||||
|
||||
The following are common data types used in multiple locations:
|
||||
|
||||
* Task ID - A randomly generated GUID used to uniquely identify a fuzzing task.
|
||||
* Job ID - A randomly generated GUID used to uniquely identify a job.
|
||||
* Machine ID - A GUID used to identify the machine running the task. When run in
|
||||
Azure, this is the
|
||||
[VM Unique ID](https://azure.microsoft.com/en-us/blog/accessing-and-using-azure-vm-unique-id/).
|
||||
When fuzzing is run outside of Azure, this is a randomly generated GUID
|
||||
created once per node.
|
||||
* Scaleset ID - A randomly generated GUID used to uniquely identify a VM
|
||||
scaleset.
|
||||
* Task Type - The type of task being executed. Examples include
|
||||
"generic_crash_report" or "libfuzzer_coverage". For a full list, see the enum
|
||||
[TaskType](../src/pytypes/onefuzztypes/enums.py).
|
||||
* OS - An enum value describing the OS used (Currently, only Windows or Linux).
|
||||
|
||||
### Data recorded by Agents
|
||||
|
||||
* Task ID
|
||||
* Job ID
|
||||
* Machine ID
|
||||
* Task Type
|
||||
* Features - A u64 representing the number of 'features' in the
|
||||
[SanCov](https://clang.llvm.org/docs/SanitizerCoverage.html)coverage map for a
|
||||
libFuzzer executable.
|
||||
* Covered - A u64 representing the number of 'features' in the
|
||||
[SanCov](https://clang.llvm.org/docs/SanitizerCoverage.html)coverage map for a
|
||||
libFuzzer executable that were exercised during fuzzing.
|
||||
* Rate - A float64 that is calculated as `(Covered / Features)`.
|
||||
* Count - Number of executions done by the fuzzing task.
|
||||
* ExecsSecond - The rate of executions per second.
|
||||
* WorkerID - For fuzzers that run multiple copies concurrently on a single VM,
|
||||
this is differentiates telemetry between each instance on the VM.
|
||||
* RunID - A randomly generated GUID used to uniquely identify the execution of a
|
||||
fuzzing target. For fuzzers that restart, such as libfuzzer, this is used to
|
||||
uniquely identify telemetry for each time the fuzzer is started.
|
||||
* VirtualMemory - The amount virtual memory in use by the fuzzing task.
|
||||
* PhysicalMemory - The amount of physical memory in use by the fuzzing task.
|
||||
* CpuUsage - The amount of CPU in use by the fuzzing task.
|
||||
* Crash Found - A flag that indicates that a crash was found.
|
||||
* Crash Report Created - A flag that indicates a crash was found to be
|
||||
reproducible and a report was generated.
|
||||
* Unique Crash Report Created - A flag that indicates that a crash was found to
|
||||
be reproducible and unique in the set of existing reports.
|
||||
* Tool Name - A string that identifies the tool in use for generic tasks. For
|
||||
custom tools, this will record the custom tool name. Examples: In the
|
||||
[radamsa template](../src/cli/onefuzz/templates/afl.py), this is
|
||||
`{tools_dir}/radamsa` for the `generic_generator` task and `cdb.exe` for the
|
||||
`generic_analysis` task.
|
||||
|
||||
The following are [AFL](https://github.com/google/afl) specific:
|
||||
|
||||
* Mode - A string representing the mode of the AFL task. This is unique to
|
||||
parsing AFL stats, and specifies the "target_mode" that AFL is running in.
|
||||
Examples include, but are not limited to: "default", "qemu", and "persistent".
|
||||
* CoveragePaths - A u64 representing paths_total in AFL stats.
|
||||
* CoveragePathsFavored - A u64 representing paths_favored in AFL stats.
|
||||
* CoveragePathsFound - A u64 representing paths_found in AFL stats.
|
||||
* CoveragePathsImported - A u64 representing paths_imported in AFL stats.
|
||||
* Coverage - A float64 representing bitmap_cvg in AFL stats.
|
||||
|
||||
### Data recorded by the Service
|
||||
|
||||
Each time the state of a job changes, the following information is recorded:
|
||||
|
||||
* Job ID
|
||||
* State - The current state of the job. For a full list, see the enum
|
||||
[JobState](../src/pytypes/onefuzztypes/enums.py).
|
||||
|
||||
Each time the state of a task changes, the following information is recorded at
|
||||
the service level:
|
||||
|
||||
* Task ID
|
||||
* Job ID
|
||||
* Task Type
|
||||
* state: The current state of the task. For a full list, see the enum
|
||||
[TaskState](../src/pytypes/onefuzztypes/enums.py).
|
||||
* VM count: The number of VMs used for the task.
|
||||
|
||||
Each time the state of a scaleset changes, the following information is
|
||||
recorded:
|
||||
|
||||
* Scaleset ID
|
||||
* OS
|
||||
* VM SKU - The
|
||||
[Azure VM Size](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes)
|
||||
* Size - The number of VMs in the scalset. For a full list, see the enum
|
||||
[ScalesetState](../src/pytypes/onefuzztypes/enums.py).
|
||||
* Spot Instances - A boolean representing if Spot Instances are used in a
|
||||
scaleset.
|
||||
|
||||
Each time the state of a pool changes, the following information is recorded:
|
||||
|
||||
* Pool ID - A randomly generated GUID used to uniquely identify a VM scaleset.
|
||||
* OS
|
||||
* State - The current state of the pool. For a full list, see the enum
|
||||
[PoolState](../src/pytypes/onefuzztypes/enums.py).
|
||||
* Managed - A boolean representing if the pool is OneFuzz manages the VMs in
|
||||
use.
|
||||
|
||||
Each time the state of a fuzzing node changes, the following information is
|
||||
recorded:
|
||||
|
||||
* Scaleset ID
|
||||
* Machine ID
|
||||
* State - the current state of the node. For a full list, see the enum
|
||||
[NodeState](../src/pytypes/onefuzztypes/enums.py).
|
||||
|
||||
Each time the state of a task on a node changes, the following information is
|
||||
recorded:
|
||||
|
||||
* Task ID
|
||||
* Machine ID
|
||||
* State - the current state of the task on the node. For a full list, see the
|
||||
enum [NodeTaskState](../src/pytypes/onefuzztypes/enums.py).
|
62
docs/terminology.md
Normal file
62
docs/terminology.md
Normal file
@ -0,0 +1,62 @@
|
||||
# Terminology of OneFuzz
|
||||
|
||||
## Task
|
||||
|
||||
A task is the single unit of work. Example high level descriptions of tasks
|
||||
include:
|
||||
|
||||
* "Run AFL with a given target"
|
||||
* "With a given target, build a coverage map for every input file"
|
||||
|
||||
For more information: [Understanding Tasks](tasks.md)
|
||||
|
||||
## Job
|
||||
|
||||
A Job, at it's core, is merely an easy way to refer to a collection of tasks.
|
||||
|
||||
Jobs are uniquely identified by a job_id (UUID) and include the following
|
||||
information:
|
||||
|
||||
* project (example: MSEdge)
|
||||
* name (example: png_parser)
|
||||
* build (example: 3529725.3)
|
||||
|
||||
## Template
|
||||
|
||||
A template is a pre-configured job with a set of tasks that include the most
|
||||
common configurations for a given fuzz job. Templates are analogous to playbooks
|
||||
or recipes, entirely built on top of the SDK. The templates can be recreated as
|
||||
scripts calling the SDK or by executing the CLI.
|
||||
|
||||
As an example, the 'libfuzzer basic' template includes the following tasks:
|
||||
|
||||
* Fuzzing (Actually perform the fuzzing tasks)
|
||||
* Crash Reporting (evaluate each crash for reproducibility and generating a
|
||||
consumable report)
|
||||
* Coverage Reporting (evaluate every input for code coverage in the application
|
||||
under test)
|
||||
|
||||
At this time, templates are statically defined. In the future, OneFuzz will
|
||||
allow the owner of a OneFuzz instance to manage their own templates, allowing
|
||||
central management of how fuzzing tasks are defined.
|
||||
|
||||
## Repro
|
||||
|
||||
Repro is short for 'reproduction VMs'. These VMs are created on-demand to enable
|
||||
debugging a crash in the same environment used for fuzzing over an SSH tunnel.
|
||||
The repro VM creation automation includes downloading the task data related to
|
||||
the crash, executing any setup scripts, and launching the application under test
|
||||
within a debugger (`cdb -server` on Windows and `gdbserver` on Linux).
|
||||
|
||||
At this time, the automatic-debugger connect is functional only for file-based
|
||||
fuzzing targets (such as libfuzzer or AFL), however users can connect into VMs
|
||||
directly via SSH or RDP (Windows only) and have total control of the VM.
|
||||
|
||||
## Container
|
||||
|
||||
An
|
||||
[Azure Blob Storage Container](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction).
|
||||
Each fuzzing task has a set of required and potentially optional containers that
|
||||
are used in a specific context.
|
||||
|
||||
[More info on Containers](containers.md)
|
29
docs/upgrading.md
Normal file
29
docs/upgrading.md
Normal file
@ -0,0 +1,29 @@
|
||||
# Upgrading OneFuzz Instances
|
||||
|
||||
Upgrading is accomplished by deploying OneFuzz to the same resource group and
|
||||
instance name.
|
||||
|
||||
Unless the release includes breaking changes, as indicated by the
|
||||
[versioning.md](versioning guidelines), currently running jobs should not be
|
||||
negatively impacted during the upgrade process.
|
||||
|
||||
Data from on-going jobs is transmitted through Storage Queues and Storage
|
||||
Containers, which will buffer data during the upgrade process.
|
||||
|
||||
Users should take care over the following items:
|
||||
|
||||
1. Any customization to the Azure Functions instance will likely get
|
||||
overwritten.
|
||||
1. The following containers will be synchronized (and remove out-dated content)
|
||||
on upgrade.
|
||||
1. The `tools` container in the `func` storage account
|
||||
1. The third-party tools containers in the `fuzz` storage account. At the
|
||||
time of writing, these include:
|
||||
* radamsa-linux
|
||||
* radamsa-windows
|
||||
* afl-linux
|
||||
1. Any jobs deployed during the upgrade process may temporarily fail to be
|
||||
submitted.
|
||||
The CLI will automatic retry to submit jobs that fail due error codes known
|
||||
to happen during the service upgrade procedure. If this behavior is
|
||||
undesired, please pause submission of jobs during the upgrade process.
|
35
docs/values.md
Normal file
35
docs/values.md
Normal file
@ -0,0 +1,35 @@
|
||||
# Values for OneFuzz moving forwards
|
||||
|
||||
## Project Level Values
|
||||
|
||||
These are user-focused values for OneFuzz moving forwards, ordered in priority.
|
||||
It is better to sacrifice something later to achieve a higher priority value.
|
||||
|
||||
1. Debuggability. Enable the user to inspect, understand, and address their
|
||||
entire fuzzing workflow.
|
||||
1. Composability. Enable the The ability to create a workflow combining multiple
|
||||
parts into a more complicated part.
|
||||
1. Extensibility. Enable the user to extend the fuzzing infrastructure to meet
|
||||
their needs without requiring our assistance.
|
||||
1. Fuzzing Engine Performance. Enable the fastest bug finding capabilities to be
|
||||
deployed.
|
||||
1. Security. User's software, data, and results should be protected from
|
||||
adversaries.
|
||||
1. Approachability. Users should be able to onboard new software to be fuzzed
|
||||
into their CI/CD pipeline easily.
|
||||
|
||||
## Project Level Non-Values
|
||||
|
||||
All things being equal, these values, while nice to have, are of significantly
|
||||
less importance than those previously discussed.
|
||||
|
||||
1. High-Availability. While an important component for the SDL of any project,
|
||||
fuzzing is not a business critical task.
|
||||
1. Thoroughness. Every use case does not need to be covered from the onset of
|
||||
OneFuzz.
|
||||
|
||||
## Implementation Ideals
|
||||
|
||||
1. Rely directly on Azure services and infrastructure as much as possible.
|
||||
1. Reduce our software install burden on fuzzing nodes
|
||||
1. Support large number of OS distributions & versions
|
28
docs/versioning.md
Normal file
28
docs/versioning.md
Normal file
@ -0,0 +1,28 @@
|
||||
# Versioning
|
||||
|
||||
OneFuzz follows [Semantic Versioning 2.0](https://semver.org/).
|
||||
|
||||
At a high level, the summary from [semver.org](semver.org), says:
|
||||
|
||||
> Given a version number MAJOR.MINOR.PATCH, increment the:
|
||||
>
|
||||
> 1. MAJOR version when you make incompatible API changes,
|
||||
> 1. MINOR version when you add functionality in a backwards compatible manner,
|
||||
> and
|
||||
> 1. PATCH version when you make backwards compatible bug fixes. Additional
|
||||
> labels for pre-release and build metadata are available as extensions to
|
||||
> the MAJOR.MINOR.PATCH format.
|
||||
|
||||
## Versioning Focus
|
||||
|
||||
Our focus for compatibility is the CLI and Service. We will work towards
|
||||
automated compatibility testing of the CLI and Service for one minor version
|
||||
back.
|
||||
|
||||
## Additional Care for Versioning
|
||||
|
||||
A user should _always_ be able to access any artifact related to a crash report
|
||||
or third-party integration for crash reports such as Azure Devops Work Items or
|
||||
Microsoft Teams messages. As such, special care should be taken to ensure
|
||||
compatibility for Crash Reports and Task Configurations, and accessing said
|
||||
information, above and beyond the compatibility for the rest of OneFuzz.
|
2
src/agent/.cargo/config
Normal file
2
src/agent/.cargo/config
Normal file
@ -0,0 +1,2 @@
|
||||
[target.x86_64-pc-windows-msvc]
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
1
src/agent/.gitignore
vendored
Normal file
1
src/agent/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
target
|
1
src/agent/.rustfmt.toml
Normal file
1
src/agent/.rustfmt.toml
Normal file
@ -0,0 +1 @@
|
||||
edition = "2018"
|
2552
src/agent/Cargo.lock
generated
Normal file
2552
src/agent/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
14
src/agent/Cargo.toml
Normal file
14
src/agent/Cargo.toml
Normal file
@ -0,0 +1,14 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"atexit",
|
||||
"debugger",
|
||||
"input-tester",
|
||||
"onefuzz",
|
||||
"onefuzz-agent",
|
||||
"onefuzz-supervisor",
|
||||
"storage-queue",
|
||||
"win-util",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
lto = "thin"
|
11
src/agent/atexit/Cargo.toml
Normal file
11
src/agent/atexit/Cargo.toml
Normal file
@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "atexit"
|
||||
version = "0.1.0"
|
||||
license = "MIT"
|
||||
authors = ["fuzzing@microsoft.com"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
ctrlc = "3.1"
|
||||
lazy_static = "1.4"
|
||||
log = "0.4"
|
64
src/agent/atexit/src/lib.rs
Normal file
64
src/agent/atexit/src/lib.rs
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use log::warn;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
struct AtExit {
|
||||
functions: RwLock<Vec<Box<dyn FnMut() + Send + Sync>>>,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref ATEXIT: Arc<AtExit> = AtExit::new();
|
||||
}
|
||||
|
||||
/// Register a function to run at exit (when `invoke` is called).
|
||||
pub fn register<F: FnMut() + 'static + Send + Sync>(function: F) {
|
||||
ATEXIT.register_function(function)
|
||||
}
|
||||
|
||||
/// Runs the registered functions and terminates the process with the specified exit `code`.
|
||||
///
|
||||
/// This function is not called automatically (e.g. via `drop`).
|
||||
pub fn exit_process(code: i32) -> ! {
|
||||
ATEXIT.exit_process(code)
|
||||
}
|
||||
|
||||
impl AtExit {
|
||||
fn new() -> Arc<Self> {
|
||||
let result = Arc::new(AtExit {
|
||||
functions: RwLock::new(vec![]),
|
||||
});
|
||||
{
|
||||
// This should cover the normal cases of pressing Ctrl+c or Ctrl+Break, but
|
||||
// we might fail to invoke the cleanup functions (e.g. to disable appverifier)
|
||||
// if the process is exiting from a logoff, machine reboot, or console closing event.
|
||||
//
|
||||
// The problem is the handler that `ctrlc` registers is not this handler, but instead
|
||||
// a handler that signals another thread to call our handler and then returns to the OS.
|
||||
// The OS might terminate our application before our handler actually runs.
|
||||
//
|
||||
// This is not a problem for Ctrl+c though because the OS won't terminate the program
|
||||
// (which is why we must exit ourselves.)
|
||||
let result = result.clone();
|
||||
ctrlc::set_handler(move || {
|
||||
warn!("Ctrl+c pressed - some results may not be saved.");
|
||||
result.exit_process(1);
|
||||
})
|
||||
.expect("More than one ctrl+c handler is not allowed");
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn register_function<F: FnMut() + 'static + Send + Sync>(&self, function: F) {
|
||||
self.functions.write().unwrap().push(Box::new(function));
|
||||
}
|
||||
|
||||
fn exit_process(&self, code: i32) -> ! {
|
||||
for function in self.functions.write().unwrap().iter_mut() {
|
||||
function();
|
||||
}
|
||||
std::process::exit(code);
|
||||
}
|
||||
}
|
21
src/agent/coverage/Cargo.toml
Normal file
21
src/agent/coverage/Cargo.toml
Normal file
@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "coverage"
|
||||
version = "0.1.0"
|
||||
authors = ["fuzzing@microsoft.com"]
|
||||
license = "MIT"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
bincode = "1.3"
|
||||
fixedbitset = "0.3"
|
||||
goblin = "0.2"
|
||||
iced-x86 = { version = "1.1", features = ["decoder", "op_code_info", "instr_info", "masm"] }
|
||||
log = "0.4"
|
||||
memmap = "0.7"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
uuid = { version = "0.8", features = ["guid"] }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
pdb = "0.6"
|
||||
winapi = "0.3"
|
52
src/agent/coverage/src/intel.rs
Normal file
52
src/agent/coverage/src/intel.rs
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use fixedbitset::FixedBitSet;
|
||||
use iced_x86::{Decoder, DecoderOptions, FlowControl, Instruction, OpKind};
|
||||
|
||||
fn process_near_branch(instruction: &Instruction, blocks: &mut FixedBitSet) {
|
||||
match instruction.op0_kind() {
|
||||
OpKind::NearBranch16 => {}
|
||||
OpKind::NearBranch32 => {}
|
||||
OpKind::NearBranch64 => {
|
||||
// Note we do not check if the branch takes us to another function, e.g.
|
||||
// with a tail call.
|
||||
//
|
||||
blocks.insert(instruction.near_branch_target() as usize);
|
||||
}
|
||||
OpKind::FarBranch16 => {}
|
||||
OpKind::FarBranch32 => {}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_blocks(bitness: u32, bytes: &[u8], func_rva: u32, blocks: &mut FixedBitSet) {
|
||||
// We *could* maybe pass `DecoderOptions::AMD_BRANCHES | DecoderOptions::JMPE` because
|
||||
// we only care about control flow here, but it's not clear we'll ever see those instructions
|
||||
// and we don't need precise coverage so it doesn't matter too much.
|
||||
let mut decoder = Decoder::new(bitness, bytes, DecoderOptions::NONE);
|
||||
decoder.set_ip(func_rva as u64);
|
||||
|
||||
let mut instruction = Instruction::default();
|
||||
while decoder.can_decode() {
|
||||
decoder.decode_out(&mut instruction);
|
||||
|
||||
match instruction.flow_control() {
|
||||
FlowControl::Next => {}
|
||||
FlowControl::ConditionalBranch => {
|
||||
process_near_branch(&instruction, blocks);
|
||||
blocks.insert(instruction.next_ip() as usize);
|
||||
}
|
||||
FlowControl::UnconditionalBranch => {
|
||||
process_near_branch(&instruction, blocks);
|
||||
}
|
||||
FlowControl::IndirectBranch => {}
|
||||
FlowControl::Return => {}
|
||||
FlowControl::Call => {}
|
||||
FlowControl::IndirectCall => {}
|
||||
FlowControl::Interrupt => {}
|
||||
FlowControl::XbeginXabortXend => {}
|
||||
FlowControl::Exception => {}
|
||||
}
|
||||
}
|
||||
}
|
143
src/agent/coverage/src/lib.rs
Normal file
143
src/agent/coverage/src/lib.rs
Normal file
@ -0,0 +1,143 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![cfg(windows)]
|
||||
#![allow(clippy::as_conversions)]
|
||||
#![allow(clippy::new_without_default)]
|
||||
|
||||
mod intel;
|
||||
mod pe;
|
||||
|
||||
use std::{ffi::OsString, fs::File, path::PathBuf};
|
||||
|
||||
use anyhow::Result;
|
||||
use fixedbitset::FixedBitSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path;
|
||||
|
||||
pub const COVERAGE_MAP: &str = "coverage-map";
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, Clone)]
|
||||
pub struct Block {
|
||||
rva: u32,
|
||||
hit: bool,
|
||||
}
|
||||
|
||||
impl Block {
|
||||
pub fn new(rva: u32, hit: bool) -> Self {
|
||||
Block { rva, hit }
|
||||
}
|
||||
|
||||
pub fn rva(&self) -> u32 {
|
||||
self.rva
|
||||
}
|
||||
|
||||
pub fn hit(&self) -> bool {
|
||||
self.hit
|
||||
}
|
||||
|
||||
pub fn set_hit(&mut self) {
|
||||
self.hit = true;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone)]
|
||||
pub struct ModuleCoverageBlocks {
|
||||
module: OsString,
|
||||
blocks: Vec<Block>,
|
||||
}
|
||||
|
||||
impl ModuleCoverageBlocks {
|
||||
pub fn new<S: Into<OsString>>(module: S, rvas_bitset: FixedBitSet) -> Self {
|
||||
let blocks: Vec<_> = rvas_bitset
|
||||
.ones()
|
||||
.map(|rva| Block::new(rva as u32, false))
|
||||
.collect();
|
||||
|
||||
ModuleCoverageBlocks {
|
||||
module: module.into(),
|
||||
blocks,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn module_name(&self) -> &Path {
|
||||
Path::new(&self.module)
|
||||
}
|
||||
|
||||
pub fn blocks(&self) -> &[Block] {
|
||||
&self.blocks
|
||||
}
|
||||
|
||||
pub fn set_block_hit(&mut self, block_index: usize) {
|
||||
self.blocks[block_index].set_hit();
|
||||
}
|
||||
|
||||
pub fn count_blocks_hit(&self) -> usize {
|
||||
self.blocks.iter().filter(|b| b.hit).count()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone)]
|
||||
pub struct AppCoverageBlocks {
|
||||
modules: Vec<ModuleCoverageBlocks>,
|
||||
}
|
||||
|
||||
impl AppCoverageBlocks {
|
||||
pub fn new() -> Self {
|
||||
let modules = vec![];
|
||||
Self { modules }
|
||||
}
|
||||
|
||||
pub fn modules(&self) -> &[ModuleCoverageBlocks] {
|
||||
&self.modules
|
||||
}
|
||||
|
||||
pub fn add_module(&mut self, module: ModuleCoverageBlocks) {
|
||||
self.modules.push(module);
|
||||
}
|
||||
|
||||
pub fn report_block_hit(&mut self, module_index: usize, block_index: usize) {
|
||||
self.modules[module_index].set_block_hit(block_index);
|
||||
}
|
||||
|
||||
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
|
||||
let cov_file = File::create(path)?;
|
||||
bincode::serialize_into(&cov_file, self)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn count_blocks_hit(&self) -> usize {
|
||||
self.modules().iter().map(|m| m.count_blocks_hit()).sum()
|
||||
}
|
||||
}
|
||||
|
||||
/// Statically analyze the specified images to discover the basic block
|
||||
/// entry points and write out the results in a file in `output_dir`.
|
||||
pub fn run_init(output_dir: PathBuf, modules: Vec<PathBuf>, function: bool) -> Result<()> {
|
||||
let mut result = AppCoverageBlocks::new();
|
||||
for module in modules {
|
||||
if module.is_file() {
|
||||
let rvas_bitset = pe::process_image(&module, function)?;
|
||||
|
||||
let module_name = module.file_stem().unwrap(); // Unwrap guaranteed by `is_file` test above.
|
||||
let module_rvas = ModuleCoverageBlocks::new(&module_name, rvas_bitset);
|
||||
result.add_module(module_rvas);
|
||||
} else {
|
||||
anyhow::bail!("Cannot find file `{}`", module.as_path().display());
|
||||
}
|
||||
}
|
||||
|
||||
let output_file = output_dir.join(COVERAGE_MAP);
|
||||
result.save(&output_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load a coverage map created by `run_init`.
|
||||
pub fn load_coverage_map(output_dir: &Path) -> Result<Option<AppCoverageBlocks>> {
|
||||
if let Ok(cov_file) = File::open(output_dir.join(COVERAGE_MAP)) {
|
||||
Ok(Some(bincode::deserialize_from(cov_file)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
290
src/agent/coverage/src/pe.rs
Normal file
290
src/agent/coverage/src/pe.rs
Normal file
@ -0,0 +1,290 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::manual_swap)]
|
||||
|
||||
use std::{ffi::CStr, fs::File, path::Path};
|
||||
|
||||
use anyhow::Result;
|
||||
use fixedbitset::FixedBitSet;
|
||||
use goblin::pe::{debug::DebugData, PE};
|
||||
use log::trace;
|
||||
use memmap::Mmap;
|
||||
use pdb::{
|
||||
AddressMap, FallibleIterator, PdbInternalSectionOffset, ProcedureSymbol, TypeIndex, PDB,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
use winapi::um::winnt::{IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_I386};
|
||||
|
||||
use crate::intel;
|
||||
|
||||
fn uuid_from_bytes_le(mut bytes: [u8; 16]) -> Uuid {
|
||||
for (i, j) in [(0, 3), (1, 2), (4, 5), (6, 7)].iter() {
|
||||
let tmp = bytes[*i];
|
||||
bytes[*i] = bytes[*j];
|
||||
bytes[*j] = tmp;
|
||||
}
|
||||
Uuid::from_bytes(bytes)
|
||||
}
|
||||
|
||||
struct JumpTableData {
|
||||
pub offset: PdbInternalSectionOffset,
|
||||
pub labels: Vec<PdbInternalSectionOffset>,
|
||||
}
|
||||
|
||||
impl JumpTableData {
|
||||
pub fn new(offset: PdbInternalSectionOffset) -> Self {
|
||||
Self {
|
||||
offset,
|
||||
labels: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ProcSymInfo {
|
||||
pub name: String,
|
||||
pub offset: PdbInternalSectionOffset,
|
||||
pub code_len: u32,
|
||||
pub jump_tables: Vec<JumpTableData>,
|
||||
pub extra_labels: Vec<PdbInternalSectionOffset>,
|
||||
}
|
||||
|
||||
impl ProcSymInfo {
|
||||
pub fn new(
|
||||
name: String,
|
||||
offset: PdbInternalSectionOffset,
|
||||
code_len: u32,
|
||||
jump_tables: Vec<JumpTableData>,
|
||||
extra_labels: Vec<PdbInternalSectionOffset>,
|
||||
) -> Self {
|
||||
Self {
|
||||
name,
|
||||
offset,
|
||||
code_len,
|
||||
jump_tables,
|
||||
extra_labels,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn offset_within_func(offset: PdbInternalSectionOffset, proc: &ProcedureSymbol) -> bool {
|
||||
offset.section == proc.offset.section
|
||||
&& offset.offset >= proc.offset.offset
|
||||
&& offset.offset < (proc.offset.offset + proc.len)
|
||||
}
|
||||
|
||||
fn collect_func_sym_info(
|
||||
symbols: &mut pdb::SymbolIter<'_>,
|
||||
proc: ProcedureSymbol,
|
||||
) -> Result<ProcSymInfo> {
|
||||
let mut jump_tables = vec![];
|
||||
let mut extra_labels = vec![];
|
||||
while let Some(symbol) = symbols.next()? {
|
||||
// Symbols are scoped with `end` marking the last symbol in the scope of the function.
|
||||
if symbol.index() == proc.end {
|
||||
break;
|
||||
}
|
||||
|
||||
match symbol.parse() {
|
||||
Ok(pdb::SymbolData::Data(data)) => {
|
||||
// Local data *might* be a jump table if it's in the same section as
|
||||
// the function. For extra paranoia, we also check that there is no type
|
||||
// as that is what VC++ generates. LLVM does not generate debug symbols for
|
||||
// jump tables.
|
||||
if offset_within_func(data.offset, &proc) && data.type_index == TypeIndex(0) {
|
||||
jump_tables.push(JumpTableData::new(data.offset));
|
||||
}
|
||||
}
|
||||
Ok(pdb::SymbolData::Label(label)) => {
|
||||
if offset_within_func(label.offset, &proc) {
|
||||
if let Some(jump_table) = jump_tables.last_mut() {
|
||||
jump_table.labels.push(label.offset);
|
||||
} else {
|
||||
// Maybe not possible to get here, and maybe a bad idea for VC++
|
||||
// because the code length would include this label,
|
||||
// but could be useful if LLVM generates labels but no L_DATA32 record.
|
||||
extra_labels.push(label.offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(_)
|
||||
| Err(pdb::Error::UnimplementedFeature(_))
|
||||
| Err(pdb::Error::UnimplementedSymbolKind(_)) => {}
|
||||
Err(err) => {
|
||||
anyhow::bail!("Error reading symbols: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let result = ProcSymInfo::new(
|
||||
proc.name.to_string().to_string(),
|
||||
proc.offset,
|
||||
proc.len,
|
||||
jump_tables,
|
||||
extra_labels,
|
||||
);
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn collect_proc_symbols(symbols: &mut pdb::SymbolIter<'_>) -> Result<Vec<ProcSymInfo>> {
|
||||
let mut result = vec![];
|
||||
|
||||
while let Some(symbol) = symbols.next()? {
|
||||
match symbol.parse() {
|
||||
Ok(pdb::SymbolData::Procedure(proc)) => {
|
||||
// Collect everything we need for safe disassembly of the function.
|
||||
result.push(collect_func_sym_info(symbols, proc)?);
|
||||
}
|
||||
Ok(_)
|
||||
| Err(pdb::Error::UnimplementedFeature(_))
|
||||
| Err(pdb::Error::UnimplementedSymbolKind(_)) => {}
|
||||
Err(err) => {
|
||||
anyhow::bail!("Error reading symbols: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn find_blocks(
|
||||
proc_data: &[ProcSymInfo],
|
||||
blocks: &mut FixedBitSet,
|
||||
address_map: &AddressMap,
|
||||
pe: &PE,
|
||||
mmap: &Mmap,
|
||||
functions_only: bool,
|
||||
) -> Result<()> {
|
||||
let file_alignment = pe
|
||||
.header
|
||||
.optional_header
|
||||
.unwrap()
|
||||
.windows_fields
|
||||
.file_alignment;
|
||||
let machine = pe.header.coff_header.machine;
|
||||
let bitness = match machine {
|
||||
IMAGE_FILE_MACHINE_I386 => 32,
|
||||
IMAGE_FILE_MACHINE_AMD64 => 64,
|
||||
_ => anyhow::bail!("Unsupported architecture {}", machine),
|
||||
};
|
||||
|
||||
for proc in proc_data {
|
||||
if let Some(rva) = proc.offset.to_rva(address_map) {
|
||||
blocks.insert(rva.0 as usize);
|
||||
|
||||
if functions_only {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(file_offset) =
|
||||
goblin::pe::utils::find_offset(rva.0 as usize, &pe.sections, file_alignment)
|
||||
{
|
||||
// VC++ includes jump tables with the code length which we must exclude
|
||||
// from disassembly. We use the minimum address of a jump table since
|
||||
// the tables are placed consecutively after the actual code.
|
||||
//
|
||||
// LLVM 9 **does not** include debug info for jump tables, but conveniently
|
||||
// does not include the jump tables in the code length.
|
||||
let mut code_len = proc.code_len;
|
||||
|
||||
for table in &proc.jump_tables {
|
||||
if table.offset.section == proc.offset.section
|
||||
&& table.offset.offset > proc.offset.offset
|
||||
&& (proc.offset.offset + code_len) > table.offset.offset
|
||||
{
|
||||
code_len = table.offset.offset - proc.offset.offset;
|
||||
}
|
||||
|
||||
for label in &table.labels {
|
||||
if let Some(rva) = label.to_rva(address_map) {
|
||||
blocks.insert(rva.0 as usize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for label in &proc.extra_labels {
|
||||
if let Some(rva) = label.to_rva(address_map) {
|
||||
blocks.insert(rva.0 as usize)
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
"analyzing func: {} rva: 0x{:x} file_offset: 0x{:x}",
|
||||
&proc.name,
|
||||
rva.0,
|
||||
file_offset
|
||||
);
|
||||
|
||||
intel::find_blocks(
|
||||
bitness,
|
||||
&mmap[file_offset..file_offset + (code_len as usize)],
|
||||
rva.0,
|
||||
blocks,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn process_image<P: AsRef<Path>>(image_path: P, functions_only: bool) -> Result<FixedBitSet> {
|
||||
let file = File::open(image_path.as_ref())?;
|
||||
let mmap = unsafe { Mmap::map(&file)? };
|
||||
|
||||
let pe = PE::parse(mmap.as_ref())?;
|
||||
|
||||
if let Some(DebugData {
|
||||
image_debug_directory: _,
|
||||
codeview_pdb70_debug_info: Some(cv),
|
||||
}) = pe.debug_data
|
||||
{
|
||||
let pdb_path = CStr::from_bytes_with_nul(cv.filename)?.to_str()?;
|
||||
let pdb_file = File::open(pdb_path)?;
|
||||
let mut pdb = PDB::open(pdb_file)?;
|
||||
let pdbi = pdb.pdb_information()?;
|
||||
|
||||
if pdbi.guid != uuid_from_bytes_le(cv.signature) || pdbi.age < cv.age {
|
||||
anyhow::bail!(
|
||||
"pdb `{}` doesn't match image `{}`",
|
||||
pdb_path,
|
||||
image_path.as_ref().display()
|
||||
);
|
||||
}
|
||||
|
||||
let address_map = pdb.address_map()?;
|
||||
|
||||
let mut blocks = FixedBitSet::with_capacity(mmap.len());
|
||||
|
||||
let proc_sym_info = collect_proc_symbols(&mut pdb.global_symbols()?.iter())?;
|
||||
find_blocks(
|
||||
&proc_sym_info[..],
|
||||
&mut blocks,
|
||||
&address_map,
|
||||
&pe,
|
||||
&mmap,
|
||||
functions_only,
|
||||
)?;
|
||||
|
||||
// Modules in the pdb correspond to object files.
|
||||
let dbi = pdb.debug_information()?;
|
||||
let mut modules = dbi.modules()?;
|
||||
while let Some(module) = modules.next()? {
|
||||
if let Some(info) = pdb.module_info(&module)? {
|
||||
let proc_sym_info = collect_proc_symbols(&mut info.symbols()?)?;
|
||||
find_blocks(
|
||||
&proc_sym_info[..],
|
||||
&mut blocks,
|
||||
&address_map,
|
||||
&pe,
|
||||
&mmap,
|
||||
functions_only,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(blocks);
|
||||
}
|
||||
|
||||
anyhow::bail!("PE missing codeview pdb debug info")
|
||||
}
|
1
src/agent/data/licenses.json
Normal file
1
src/agent/data/licenses.json
Normal file
@ -0,0 +1 @@
|
||||
[]
|
30
src/agent/debugger/Cargo.toml
Normal file
30
src/agent/debugger/Cargo.toml
Normal file
@ -0,0 +1,30 @@
|
||||
[package]
|
||||
name = "debugger"
|
||||
version = "0.1.0"
|
||||
authors = ["fuzzing@microsoft.com"]
|
||||
edition = "2018"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
fnv = "1.0"
|
||||
goblin = "0.2"
|
||||
log = "0.4"
|
||||
memmap = "0.7"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
win-util = { path = "../win-util" }
|
||||
|
||||
[dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = [
|
||||
"dbghelp",
|
||||
"debugapi",
|
||||
"handleapi",
|
||||
"memoryapi",
|
||||
"processthreadsapi",
|
||||
"securitybaseapi",
|
||||
"shellapi",
|
||||
"werapi",
|
||||
"winbase",
|
||||
"winerror"
|
||||
]
|
680
src/agent/debugger/src/dbghelp.rs
Normal file
680
src/agent/debugger/src/dbghelp.rs
Normal file
@ -0,0 +1,680 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
// This is only needed because of the types defined here that are missing from winapi.
|
||||
// Once they get added to winapi, this should be removed.
|
||||
#![allow(bad_style)]
|
||||
#![allow(clippy::unreadable_literal)]
|
||||
#![allow(clippy::collapsible_if)]
|
||||
#![allow(clippy::needless_return)]
|
||||
/// This module defines a wrapper around dbghelp apis so they can be used in a thread safe manner
|
||||
/// as well as providing a more Rust like api.
|
||||
use std::{
|
||||
cmp,
|
||||
ffi::{OsStr, OsString},
|
||||
mem::{size_of, MaybeUninit},
|
||||
path::{Path, PathBuf},
|
||||
sync::Once,
|
||||
};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use log::warn;
|
||||
use win_util::{check_winapi, last_os_error, process};
|
||||
use winapi::{
|
||||
shared::{
|
||||
basetsd::{DWORD64, PDWORD64},
|
||||
guiddef::GUID,
|
||||
minwindef::{BOOL, DWORD, FALSE, LPVOID, MAX_PATH, PDWORD, TRUE, ULONG, WORD},
|
||||
winerror::{ERROR_ALREADY_EXISTS, ERROR_SUCCESS},
|
||||
},
|
||||
um::{
|
||||
dbghelp::{
|
||||
AddrModeFlat, StackWalkEx, SymCleanup, SymFromNameW, SymFunctionTableAccess64,
|
||||
SymGetModuleBase64, SymInitializeW, SymLoadModuleExW, IMAGEHLP_LINEW64,
|
||||
PIMAGEHLP_LINEW64, PSYMBOL_INFOW, STACKFRAME_EX, SYMBOL_INFOW, SYM_STKWALK_DEFAULT,
|
||||
},
|
||||
errhandlingapi::GetLastError,
|
||||
handleapi::CloseHandle,
|
||||
processthreadsapi::{GetThreadContext, SetThreadContext},
|
||||
synchapi::{CreateMutexA, ReleaseMutex, WaitForSingleObjectEx},
|
||||
winbase::{
|
||||
Wow64GetThreadContext, Wow64SetThreadContext, INFINITE, WAIT_ABANDONED, WAIT_FAILED,
|
||||
},
|
||||
winnt::{
|
||||
CONTEXT, CONTEXT_ALL, HANDLE, IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_I386, WCHAR,
|
||||
WOW64_CONTEXT, WOW64_CONTEXT_ALL,
|
||||
},
|
||||
},
|
||||
ENUM, STRUCT,
|
||||
};
|
||||
|
||||
// We use 4096 based on C4503 - the documented VC++ warning that a name is truncated.
|
||||
const MAX_SYM_NAME: usize = 4096;
|
||||
|
||||
// Ideally this would be a function, but it would require returning a large stack
|
||||
// allocated object **and** an interior pointer to the object, so we use a macro instead.
|
||||
macro_rules! init_sym_info {
|
||||
($symbol_info: ident) => {{
|
||||
// We must allocate enough space for the SYMBOL_INFOW **and** the maximum
|
||||
// number of wide (2 byte) characters at the end of the SYMBOL_INFOW struct.
|
||||
const MIN_SYMINFO_SIZE: usize = 2 * MAX_SYM_NAME + size_of::<SYMBOL_INFOW>();
|
||||
|
||||
// The macro caller provides the name of the local variable that we initialize.
|
||||
// This odd pattern is used so we can return an interior pointer within this aligned
|
||||
// stack allocation.
|
||||
$symbol_info = Aligned8([0u8; MIN_SYMINFO_SIZE]);
|
||||
let aligned_sym_info = &mut $symbol_info.0;
|
||||
|
||||
// Clippy isn't smart enough to know the first field of our aligned struct is also aligned.
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
let symbol_info_ptr = unsafe { &mut *(aligned_sym_info.as_mut_ptr() as *mut SYMBOL_INFOW) };
|
||||
symbol_info_ptr.MaxNameLen = MAX_SYM_NAME as ULONG;
|
||||
|
||||
// the struct size not counting the variable length name.
|
||||
symbol_info_ptr.SizeOfStruct = size_of::<SYMBOL_INFOW>() as DWORD;
|
||||
symbol_info_ptr
|
||||
}};
|
||||
}
|
||||
|
||||
// Missing from winapi-rs - see https://github.com/retep998/winapi-rs/pull/864
|
||||
const SYMOPT_DEBUG: DWORD = 0x80000000;
|
||||
const SYMOPT_DEFERRED_LOADS: DWORD = 0x00000004;
|
||||
const SYMOPT_FAIL_CRITICAL_ERRORS: DWORD = 0x00000200;
|
||||
const SYMOPT_NO_PROMPTS: DWORD = 0x00080000;
|
||||
|
||||
/// We use dbghlp Sym apis to walk a stack. dbghlp apis are documented as not being thread safe,
|
||||
/// so we provide a lock around our use of these apis.
|
||||
///
|
||||
/// Note that Rust itself also uses dbghlp to get a stack trace, e.g. when you panic and set
|
||||
/// RUST_BACKTRACE.
|
||||
///
|
||||
/// This function is based on the `backtrace` crate which is also used in Rust std. Here
|
||||
/// we use the same named local mutex to hopefully avoid any unsynchronized uses of dbghlp
|
||||
/// in std.
|
||||
pub fn lock() -> Result<DebugHelpGuard> {
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
static LOCK: AtomicUsize = AtomicUsize::new(0);
|
||||
let mut lock = LOCK.load(Ordering::SeqCst);
|
||||
if lock == 0 {
|
||||
lock = unsafe {
|
||||
CreateMutexA(
|
||||
std::ptr::null_mut(),
|
||||
0,
|
||||
"Local\\RustBacktraceMutex\0".as_ptr() as _,
|
||||
) as usize
|
||||
};
|
||||
|
||||
if lock == 0 {
|
||||
return Err(last_os_error());
|
||||
}
|
||||
|
||||
// Handle the race between threads creating our mutex by closing ours if another
|
||||
// thread created the mutex first.
|
||||
if let Err(other) = LOCK.compare_exchange(0, lock, Ordering::SeqCst, Ordering::SeqCst) {
|
||||
debug_assert_ne!(other, 0);
|
||||
debug_assert_eq!(unsafe { GetLastError() }, ERROR_ALREADY_EXISTS);
|
||||
unsafe { CloseHandle(lock as HANDLE) };
|
||||
lock = other;
|
||||
}
|
||||
}
|
||||
debug_assert_ne!(lock, 0);
|
||||
let lock = lock as HANDLE;
|
||||
match unsafe { WaitForSingleObjectEx(lock, INFINITE, FALSE) } {
|
||||
WAIT_FAILED => return Err(last_os_error()),
|
||||
WAIT_ABANDONED => {
|
||||
warn!("dbghlp mutex was abandoned");
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let dbghlp = DebugHelpGuard::new(lock);
|
||||
|
||||
static DBGHLP_INIT: Once = Once::new();
|
||||
DBGHLP_INIT.call_once(|| {
|
||||
// Set SYMOPT_DEFERRED_LOADS for performance.
|
||||
// Set SYMOPT_FAIL_CRITICAL_ERRORS and SYMOPT_NO_PROMPTS to avoid popups.
|
||||
dbghlp.sym_set_options(
|
||||
dbghlp.sym_get_options()
|
||||
| SYMOPT_DEBUG
|
||||
| SYMOPT_DEFERRED_LOADS
|
||||
| SYMOPT_FAIL_CRITICAL_ERRORS
|
||||
| SYMOPT_NO_PROMPTS,
|
||||
);
|
||||
});
|
||||
|
||||
Ok(dbghlp)
|
||||
}
|
||||
|
||||
// Not defined in winapi yet
|
||||
ENUM! {enum SYM_TYPE {
|
||||
SymNone = 0,
|
||||
SymCoff,
|
||||
SymCv,
|
||||
SymPdb,
|
||||
SymExport,
|
||||
SymDeferred,
|
||||
SymSym,
|
||||
SymDia,
|
||||
SymVirtual,
|
||||
NumSymTypes,
|
||||
}}
|
||||
STRUCT! {struct IMAGEHLP_MODULEW64 {
|
||||
SizeOfStruct: DWORD,
|
||||
BaseOfImage: DWORD64,
|
||||
ImageSize: DWORD,
|
||||
TimeDateStamp: DWORD,
|
||||
CheckSum: DWORD,
|
||||
NumSyms: DWORD,
|
||||
SymType: SYM_TYPE,
|
||||
ModuleName: [WCHAR; 32],
|
||||
ImageName: [WCHAR; 256],
|
||||
LoadedImageName: [WCHAR; 256],
|
||||
LoadedPdbName: [WCHAR; 256],
|
||||
CVSig: DWORD,
|
||||
CVData: [WCHAR; MAX_PATH * 3],
|
||||
PdbSig: DWORD,
|
||||
PdbSig70: GUID,
|
||||
PdbAge: DWORD,
|
||||
PdbUnmatched: BOOL,
|
||||
DbgUnmatched: BOOL,
|
||||
LineNumbers: BOOL,
|
||||
GlobalSymbols: BOOL,
|
||||
TypeInfo: BOOL,
|
||||
SourceIndexed: BOOL,
|
||||
Publics: BOOL,
|
||||
MachineType: DWORD,
|
||||
Reserved: DWORD,
|
||||
}}
|
||||
pub type PIMAGEHLP_MODULEW64 = *mut IMAGEHLP_MODULEW64;
|
||||
|
||||
// Not defined in winapi yet
|
||||
extern "system" {
|
||||
pub fn SymGetOptions() -> DWORD;
|
||||
pub fn SymSetOptions(_: DWORD) -> DWORD;
|
||||
pub fn SymFromInlineContextW(
|
||||
hProcess: HANDLE,
|
||||
Address: DWORD64,
|
||||
InlineContext: ULONG,
|
||||
Displacement: PDWORD64,
|
||||
Symbol: PSYMBOL_INFOW,
|
||||
) -> BOOL;
|
||||
pub fn SymGetLineFromInlineContextW(
|
||||
hProcess: HANDLE,
|
||||
dwAddr: DWORD64,
|
||||
InlineContext: ULONG,
|
||||
qwModuleBaseAddress: DWORD64,
|
||||
pdwDisplacement: PDWORD,
|
||||
Line: PIMAGEHLP_LINEW64,
|
||||
) -> BOOL;
|
||||
pub fn SymGetModuleInfoW64(
|
||||
hProcess: HANDLE,
|
||||
qwAddr: DWORD64,
|
||||
ModuleInfo: PIMAGEHLP_MODULEW64,
|
||||
) -> BOOL;
|
||||
}
|
||||
|
||||
#[repr(C, align(8))]
|
||||
struct Aligned8<T>(T);
|
||||
|
||||
#[repr(C, align(16))]
|
||||
pub struct Aligned16<T>(T);
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum FrameContext {
|
||||
X64(Aligned16<CONTEXT>),
|
||||
X86(WOW64_CONTEXT),
|
||||
}
|
||||
|
||||
impl FrameContext {
|
||||
pub fn program_counter(&self) -> u64 {
|
||||
match self {
|
||||
FrameContext::X64(ctx) => ctx.0.Rip,
|
||||
FrameContext::X86(ctx) => ctx.Eip as u64,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_program_counter(&mut self, ip: u64) {
|
||||
match self {
|
||||
FrameContext::X64(ctx) => {
|
||||
ctx.0.Rip = ip;
|
||||
}
|
||||
FrameContext::X86(ctx) => {
|
||||
ctx.Eip = ip as u32;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn stack_pointer(&self) -> u64 {
|
||||
match self {
|
||||
FrameContext::X64(ctx) => ctx.0.Rsp,
|
||||
FrameContext::X86(ctx) => ctx.Esp as u64,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn frame_pointer(&self) -> u64 {
|
||||
match self {
|
||||
FrameContext::X64(ctx) => ctx.0.Rbp,
|
||||
FrameContext::X86(ctx) => ctx.Ebp as u64,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_single_step(&mut self, enable: bool) {
|
||||
const TRAP_FLAG: u32 = 1 << 8;
|
||||
|
||||
let flags = match self {
|
||||
FrameContext::X64(ctx) => &mut ctx.0.EFlags,
|
||||
FrameContext::X86(ctx) => &mut ctx.EFlags,
|
||||
};
|
||||
|
||||
if enable {
|
||||
*flags |= TRAP_FLAG;
|
||||
} else {
|
||||
*flags &= !TRAP_FLAG;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_mut_ptr(&mut self) -> LPVOID {
|
||||
match self {
|
||||
FrameContext::X64(ctx) => &mut ctx.0 as *mut CONTEXT as LPVOID,
|
||||
FrameContext::X86(ctx) => ctx as *mut WOW64_CONTEXT as LPVOID,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn machine_type(&self) -> WORD {
|
||||
match self {
|
||||
FrameContext::X64(_) => IMAGE_FILE_MACHINE_AMD64,
|
||||
FrameContext::X86(_) => IMAGE_FILE_MACHINE_I386,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_thread_context(&self, thread_handle: HANDLE) -> Result<()> {
|
||||
match self {
|
||||
FrameContext::X86(ctx) => {
|
||||
check_winapi(|| unsafe { Wow64SetThreadContext(thread_handle, ctx) })
|
||||
.context("SetThreadContext")?
|
||||
}
|
||||
FrameContext::X64(ctx) => {
|
||||
check_winapi(|| unsafe { SetThreadContext(thread_handle, &ctx.0) })
|
||||
.context("SetThreadContext")?
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/*
|
||||
pub fn get_register_u64<R: Into<xed_sys::xed_reg_enum_t>>(&self, reg: R) -> u64 {
|
||||
use xed_reg_enum_t::*;
|
||||
|
||||
let reg = reg.into();
|
||||
let full_register_value = match self {
|
||||
FrameContext::X64(cr) => match reg {
|
||||
XED_REG_RIP => cr.0.Rip,
|
||||
XED_REG_RAX | XED_REG_EAX | XED_REG_AX | XED_REG_AL | XED_REG_AH => {
|
||||
cr.0.Rax
|
||||
},
|
||||
XED_REG_RBX | XED_REG_EBX | XED_REG_BX | XED_REG_BL | XED_REG_BH => {
|
||||
cr.0.Rbx
|
||||
},
|
||||
XED_REG_RCX | XED_REG_ECX | XED_REG_CX | XED_REG_CL | XED_REG_CH => {
|
||||
cr.0.Rcx
|
||||
},
|
||||
XED_REG_RDX | XED_REG_EDX | XED_REG_DX | XED_REG_DL | XED_REG_DH => {
|
||||
cr.0.Rdx
|
||||
},
|
||||
XED_REG_R8 | XED_REG_R8D | XED_REG_R8W | XED_REG_R8B => cr.0.R8,
|
||||
XED_REG_R9 | XED_REG_R9D | XED_REG_R9W | XED_REG_R9B => cr.0.R9,
|
||||
XED_REG_R10 | XED_REG_R10D | XED_REG_R10W | XED_REG_R10B => cr.0.R10,
|
||||
XED_REG_R11 | XED_REG_R11D | XED_REG_R11W | XED_REG_R11B => cr.0.R11,
|
||||
XED_REG_R12 | XED_REG_R12D | XED_REG_R12W | XED_REG_R12B => cr.0.R12,
|
||||
XED_REG_R13 | XED_REG_R13D | XED_REG_R13W | XED_REG_R13B => cr.0.R13,
|
||||
XED_REG_R14 | XED_REG_R14D | XED_REG_R14W | XED_REG_R14B => cr.0.R14,
|
||||
XED_REG_R15 | XED_REG_R15D | XED_REG_R15W | XED_REG_R15B => cr.0.R15,
|
||||
XED_REG_RDI | XED_REG_EDI | XED_REG_DI | XED_REG_DIL => cr.0.Rdi,
|
||||
XED_REG_RSI | XED_REG_ESI | XED_REG_SI | XED_REG_SIL => cr.0.Rsi,
|
||||
XED_REG_RBP | XED_REG_EBP | XED_REG_BP | XED_REG_BPL => cr.0.Rbx,
|
||||
XED_REG_RSP | XED_REG_ESP | XED_REG_SP | XED_REG_SPL => cr.0.Rsp,
|
||||
XED_REG_EFLAGS => cr.0.EFlags as u64,
|
||||
XED_REG_GS => cr.0.SegGs as u64,
|
||||
_ => unimplemented!("Register read {:?}", reg),
|
||||
},
|
||||
|
||||
FrameContext::X86(cr) => match reg {
|
||||
XED_REG_EIP => cr.Eip as u64,
|
||||
XED_REG_EAX | XED_REG_AX | XED_REG_AL | XED_REG_AH => cr.Eax as u64,
|
||||
XED_REG_EBX | XED_REG_BX | XED_REG_BL | XED_REG_BH => cr.Ebx as u64,
|
||||
XED_REG_ECX | XED_REG_CX | XED_REG_CL | XED_REG_CH => cr.Ecx as u64,
|
||||
XED_REG_EDX | XED_REG_DX | XED_REG_DL | XED_REG_DH => cr.Edx as u64,
|
||||
XED_REG_EDI | XED_REG_DI | XED_REG_DIL => cr.Edi as u64,
|
||||
XED_REG_ESI | XED_REG_SI | XED_REG_SIL => cr.Esi as u64,
|
||||
XED_REG_EBP | XED_REG_BP | XED_REG_BPL => cr.Ebx as u64,
|
||||
XED_REG_ESP | XED_REG_SP | XED_REG_SPL => cr.Esp as u64,
|
||||
XED_REG_EFLAGS => cr.EFlags as u64,
|
||||
_ => unimplemented!("Register read {:?}", reg),
|
||||
},
|
||||
};
|
||||
|
||||
match reg {
|
||||
XED_REG_EAX | XED_REG_EBX | XED_REG_ECX | XED_REG_EDX | XED_REG_R8D |
|
||||
XED_REG_R9D | XED_REG_R10D | XED_REG_R11D | XED_REG_R12D | XED_REG_R13D |
|
||||
XED_REG_R14D | XED_REG_R15D | XED_REG_EDI | XED_REG_ESI | XED_REG_EBP |
|
||||
XED_REG_ESP => full_register_value & 0x0000_0000_ffff_ffff,
|
||||
|
||||
XED_REG_AX | XED_REG_BX | XED_REG_CX | XED_REG_DX | XED_REG_R8W |
|
||||
XED_REG_R9W | XED_REG_R10W | XED_REG_R11W | XED_REG_R12W | XED_REG_R13W |
|
||||
XED_REG_R14W | XED_REG_R15W | XED_REG_DI | XED_REG_SI | XED_REG_BP |
|
||||
XED_REG_SP => full_register_value & 0x0000_0000_0000_ffff,
|
||||
|
||||
XED_REG_AL | XED_REG_BL | XED_REG_CL | XED_REG_DL | XED_REG_R8B |
|
||||
XED_REG_R9B | XED_REG_R10B | XED_REG_R11B | XED_REG_R12B | XED_REG_R13B |
|
||||
XED_REG_R14B | XED_REG_R15B | XED_REG_DIL | XED_REG_SIL | XED_REG_BPL |
|
||||
XED_REG_SPL => full_register_value & 0x0000_0000_0000_00ff,
|
||||
|
||||
XED_REG_AH | XED_REG_BH | XED_REG_CH | XED_REG_DH => {
|
||||
(full_register_value & 0x0000_ff00) >> 8
|
||||
},
|
||||
|
||||
_ => full_register_value as u64,
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
pub fn get_thread_frame(process_handle: HANDLE, thread_handle: HANDLE) -> Result<FrameContext> {
|
||||
if process::is_wow64_process(process_handle) {
|
||||
let mut ctx: WOW64_CONTEXT = unsafe { MaybeUninit::zeroed().assume_init() };
|
||||
ctx.ContextFlags = WOW64_CONTEXT_ALL;
|
||||
|
||||
check_winapi(|| unsafe { Wow64GetThreadContext(thread_handle, &mut ctx) })
|
||||
.context("Wow64GetThreadContext")?;
|
||||
Ok(FrameContext::X86(ctx))
|
||||
} else {
|
||||
// required by `CONTEXT`, is a FIXME in winapi right now
|
||||
let mut ctx: Aligned16<CONTEXT> = unsafe { MaybeUninit::zeroed().assume_init() };
|
||||
|
||||
ctx.0.ContextFlags = CONTEXT_ALL;
|
||||
check_winapi(|| unsafe { GetThreadContext(thread_handle, &mut ctx.0) })
|
||||
.context("GetThreadContext")?;
|
||||
Ok(FrameContext::X64(ctx))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ModuleInfo {
|
||||
name: OsString,
|
||||
base_address: u64,
|
||||
}
|
||||
|
||||
impl ModuleInfo {
|
||||
pub fn name(&self) -> &OsStr {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn base_address(&self) -> u64 {
|
||||
self.base_address
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SymInfo {
|
||||
symbol: String,
|
||||
address: u64,
|
||||
displacement: u64,
|
||||
}
|
||||
|
||||
impl SymInfo {
|
||||
/// Return the name of the symbol.
|
||||
pub fn symbol(&self) -> &str {
|
||||
&self.symbol
|
||||
}
|
||||
|
||||
/// Return the address of the symbol.
|
||||
pub fn address(&self) -> u64 {
|
||||
self.address
|
||||
}
|
||||
|
||||
/// Return the displacement from the address of the symbol.
|
||||
pub fn displacement(&self) -> u64 {
|
||||
self.displacement
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SymLineInfo {
|
||||
filename: PathBuf,
|
||||
line_number: u32,
|
||||
}
|
||||
|
||||
// Magic line numbers that have special meaning in the debugger.
|
||||
// If we see these, we don't use the line number, instead we report the offset.
|
||||
const STEP_LINE_OVER: u32 = 0x00f00f00;
|
||||
const STEP_LINE_THRU: u32 = 0x00feefee;
|
||||
|
||||
impl SymLineInfo {
|
||||
pub fn filename(&self) -> &Path {
|
||||
&self.filename
|
||||
}
|
||||
|
||||
pub fn line_number(&self) -> u32 {
|
||||
self.line_number
|
||||
}
|
||||
|
||||
pub fn is_fake_line_number(&self) -> bool {
|
||||
self.line_number == STEP_LINE_OVER || self.line_number == STEP_LINE_THRU
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DebugHelpGuard {
|
||||
lock: HANDLE,
|
||||
}
|
||||
|
||||
impl DebugHelpGuard {
|
||||
pub fn new(lock: HANDLE) -> Self {
|
||||
DebugHelpGuard { lock }
|
||||
}
|
||||
|
||||
pub fn sym_get_options(&self) -> DWORD {
|
||||
unsafe { SymGetOptions() }
|
||||
}
|
||||
|
||||
pub fn sym_set_options(&self, options: DWORD) -> DWORD {
|
||||
unsafe { SymSetOptions(options) }
|
||||
}
|
||||
|
||||
pub fn sym_initialize(&self, process_handle: HANDLE) -> Result<()> {
|
||||
check_winapi(|| unsafe { SymInitializeW(process_handle, std::ptr::null(), FALSE) })
|
||||
}
|
||||
|
||||
pub fn sym_cleanup(&self, process_handle: HANDLE) -> Result<()> {
|
||||
check_winapi(|| unsafe { SymCleanup(process_handle) })
|
||||
}
|
||||
|
||||
pub fn sym_load_module(
|
||||
&self,
|
||||
process_handle: HANDLE,
|
||||
file_handle: HANDLE,
|
||||
image_name: &Path,
|
||||
base_of_dll: DWORD64,
|
||||
image_size: u32,
|
||||
) -> Result<DWORD64> {
|
||||
let load_address = unsafe {
|
||||
SymLoadModuleExW(
|
||||
process_handle,
|
||||
file_handle,
|
||||
win_util::string::to_wstring(image_name).as_ptr(),
|
||||
std::ptr::null_mut(),
|
||||
base_of_dll,
|
||||
image_size,
|
||||
std::ptr::null_mut(),
|
||||
0,
|
||||
)
|
||||
};
|
||||
|
||||
match load_address {
|
||||
0 => {
|
||||
// If the dll was already loaded, don't return an error. This can happen
|
||||
// when we have multiple debuggers - each tracks loading symbols separately.
|
||||
let last_error = std::io::Error::last_os_error();
|
||||
match last_error.raw_os_error() {
|
||||
Some(code) if code == ERROR_SUCCESS as i32 => Ok(0),
|
||||
_ => Err(last_error.into()),
|
||||
}
|
||||
}
|
||||
_ => Ok(load_address),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn stackwalk_ex<F: FnMut(&FrameContext, DWORD)>(
|
||||
&self,
|
||||
process_handle: HANDLE,
|
||||
thread_handle: HANDLE,
|
||||
mut f: F,
|
||||
) -> Result<()> {
|
||||
let mut frame_context = get_thread_frame(process_handle, thread_handle)?;
|
||||
|
||||
let mut frame: STACKFRAME_EX = unsafe { MaybeUninit::zeroed().assume_init() };
|
||||
frame.AddrPC.Offset = frame_context.program_counter();
|
||||
frame.AddrPC.Mode = AddrModeFlat;
|
||||
frame.AddrStack.Offset = frame_context.stack_pointer();
|
||||
frame.AddrStack.Mode = AddrModeFlat;
|
||||
frame.AddrFrame.Offset = frame_context.frame_pointer();
|
||||
frame.AddrFrame.Mode = AddrModeFlat;
|
||||
|
||||
while TRUE
|
||||
== unsafe {
|
||||
StackWalkEx(
|
||||
frame_context.machine_type().into(),
|
||||
process_handle,
|
||||
thread_handle,
|
||||
&mut frame,
|
||||
frame_context.as_mut_ptr(),
|
||||
None,
|
||||
Some(SymFunctionTableAccess64),
|
||||
Some(SymGetModuleBase64),
|
||||
None,
|
||||
SYM_STKWALK_DEFAULT,
|
||||
)
|
||||
}
|
||||
{
|
||||
f(&frame_context, frame.InlineFrameContext);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn sym_from_inline_context(
|
||||
&self,
|
||||
process_handle: HANDLE,
|
||||
program_counter: u64,
|
||||
inline_context: DWORD,
|
||||
) -> Result<SymInfo> {
|
||||
let mut sym_info;
|
||||
let sym_info_ptr = init_sym_info!(sym_info);
|
||||
|
||||
let mut displacement = 0;
|
||||
check_winapi(|| unsafe {
|
||||
SymFromInlineContextW(
|
||||
process_handle,
|
||||
program_counter,
|
||||
inline_context,
|
||||
&mut displacement,
|
||||
sym_info_ptr,
|
||||
)
|
||||
})?;
|
||||
|
||||
let address = sym_info_ptr.Address;
|
||||
let name_len = cmp::min(
|
||||
sym_info_ptr.NameLen as usize,
|
||||
sym_info_ptr.MaxNameLen as usize - 1,
|
||||
);
|
||||
let name_ptr = sym_info_ptr.Name.as_ptr() as *const u16;
|
||||
let name = unsafe { std::slice::from_raw_parts(name_ptr, name_len) };
|
||||
let symbol = String::from_utf16_lossy(name);
|
||||
|
||||
Ok(SymInfo {
|
||||
symbol,
|
||||
address,
|
||||
displacement,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn sym_get_file_and_line(
|
||||
&self,
|
||||
process_handle: HANDLE,
|
||||
program_counter: u64,
|
||||
inline_context: DWORD,
|
||||
) -> Result<SymLineInfo> {
|
||||
let mut line_info: IMAGEHLP_LINEW64 = unsafe { MaybeUninit::zeroed().assume_init() };
|
||||
line_info.SizeOfStruct = size_of::<IMAGEHLP_LINEW64>() as DWORD;
|
||||
let mut displacement = 0 as DWORD;
|
||||
check_winapi(|| unsafe {
|
||||
SymGetLineFromInlineContextW(
|
||||
process_handle,
|
||||
program_counter,
|
||||
inline_context,
|
||||
0,
|
||||
&mut displacement,
|
||||
&mut line_info,
|
||||
)
|
||||
})?;
|
||||
|
||||
let filename = unsafe { win_util::string::os_string_from_wide_ptr(line_info.FileName) };
|
||||
Ok(SymLineInfo {
|
||||
filename: filename.into(),
|
||||
line_number: line_info.LineNumber,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn sym_get_module_info(
|
||||
&self,
|
||||
process_handle: HANDLE,
|
||||
program_counter: u64,
|
||||
) -> Result<ModuleInfo> {
|
||||
let mut module_info: IMAGEHLP_MODULEW64 = unsafe { MaybeUninit::zeroed().assume_init() };
|
||||
module_info.SizeOfStruct = size_of::<IMAGEHLP_MODULEW64>() as DWORD;
|
||||
check_winapi(|| unsafe {
|
||||
SymGetModuleInfoW64(process_handle, program_counter, &mut module_info)
|
||||
})?;
|
||||
|
||||
let module_name =
|
||||
unsafe { win_util::string::os_string_from_wide_ptr(module_info.ModuleName.as_ptr()) };
|
||||
|
||||
Ok(ModuleInfo {
|
||||
name: module_name,
|
||||
base_address: module_info.BaseOfImage,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn sym_from_name(
|
||||
&self,
|
||||
process_handle: HANDLE,
|
||||
modname: impl AsRef<Path>,
|
||||
sym: &str,
|
||||
) -> Result<SymInfo> {
|
||||
assert!(sym.len() + 1 < MAX_SYM_NAME);
|
||||
let mut sym_info;
|
||||
let sym_info_ptr = init_sym_info!(sym_info);
|
||||
|
||||
let mut qualified_sym = OsString::from(modname.as_ref());
|
||||
qualified_sym.push("!");
|
||||
qualified_sym.push(sym);
|
||||
check_winapi(|| unsafe {
|
||||
SymFromNameW(
|
||||
process_handle,
|
||||
win_util::string::to_wstring(qualified_sym).as_ptr(),
|
||||
sym_info_ptr,
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(SymInfo {
|
||||
symbol: sym.to_string(),
|
||||
address: sym_info_ptr.Address,
|
||||
displacement: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DebugHelpGuard {
|
||||
fn drop(&mut self) {
|
||||
let r = unsafe { ReleaseMutex(self.lock) };
|
||||
debug_assert!(r != 0);
|
||||
}
|
||||
}
|
161
src/agent/debugger/src/debug_event.rs
Normal file
161
src/agent/debugger/src/debug_event.rs
Normal file
@ -0,0 +1,161 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
//! This module implements a Rust wrapper around the Win32 DEBUG_EVENT.
|
||||
use std::{
|
||||
self,
|
||||
fmt::{self, Display},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use win_util::file::get_path_from_handle;
|
||||
use winapi::{
|
||||
shared::minwindef::DWORD,
|
||||
um::minwinbase::{
|
||||
CREATE_PROCESS_DEBUG_EVENT, CREATE_PROCESS_DEBUG_INFO, CREATE_THREAD_DEBUG_EVENT,
|
||||
CREATE_THREAD_DEBUG_INFO, DEBUG_EVENT, EXCEPTION_DEBUG_EVENT, EXCEPTION_DEBUG_INFO,
|
||||
EXIT_PROCESS_DEBUG_EVENT, EXIT_PROCESS_DEBUG_INFO, EXIT_THREAD_DEBUG_EVENT,
|
||||
EXIT_THREAD_DEBUG_INFO, LOAD_DLL_DEBUG_EVENT, LOAD_DLL_DEBUG_INFO,
|
||||
OUTPUT_DEBUG_STRING_EVENT, OUTPUT_DEBUG_STRING_INFO, RIP_EVENT, RIP_INFO,
|
||||
UNLOAD_DLL_DEBUG_EVENT, UNLOAD_DLL_DEBUG_INFO,
|
||||
},
|
||||
};
|
||||
|
||||
pub enum DebugEventInfo<'a> {
|
||||
Exception(&'a EXCEPTION_DEBUG_INFO),
|
||||
CreateThread(&'a CREATE_THREAD_DEBUG_INFO),
|
||||
CreateProcess(&'a CREATE_PROCESS_DEBUG_INFO),
|
||||
ExitThread(&'a EXIT_THREAD_DEBUG_INFO),
|
||||
ExitProcess(&'a EXIT_PROCESS_DEBUG_INFO),
|
||||
LoadDll(&'a LOAD_DLL_DEBUG_INFO),
|
||||
UnloadDll(&'a UNLOAD_DLL_DEBUG_INFO),
|
||||
OutputDebugString(&'a OUTPUT_DEBUG_STRING_INFO),
|
||||
Rip(&'a RIP_INFO),
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl<'a> Display for DebugEventInfo<'a> {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
use DebugEventInfo::*;
|
||||
match self {
|
||||
Exception(info) => {
|
||||
write!(
|
||||
formatter,
|
||||
"event=Exception exception_code=0x{:08x} exception_address=0x{:08x} first_chance={}",
|
||||
info.ExceptionRecord.ExceptionCode,
|
||||
info.ExceptionRecord.ExceptionAddress as u64,
|
||||
info.dwFirstChance != 0
|
||||
)?;
|
||||
}
|
||||
CreateThread(_info) => {
|
||||
write!(formatter, "event=CreateThread")?;
|
||||
}
|
||||
CreateProcess(info) => {
|
||||
let image_name = get_path_from_handle(info.hFile).unwrap_or_else(|_| "???".into());
|
||||
write!(
|
||||
formatter,
|
||||
"event=CreateProcess name={} base=0x{:016x}",
|
||||
Path::new(&image_name).display(),
|
||||
info.lpBaseOfImage as u64,
|
||||
)?;
|
||||
}
|
||||
ExitThread(info) => {
|
||||
write!(formatter, "event=ExitThread exit_code={}", info.dwExitCode)?;
|
||||
}
|
||||
ExitProcess(info) => {
|
||||
write!(formatter, "event=ExitProcess exit_code={}", info.dwExitCode)?;
|
||||
}
|
||||
LoadDll(info) => {
|
||||
let image_name = get_path_from_handle(info.hFile).unwrap_or_else(|_| "???".into());
|
||||
write!(
|
||||
formatter,
|
||||
"event=LoadDll name={} base=0x{:016x}",
|
||||
Path::new(&image_name).display(),
|
||||
info.lpBaseOfDll as u64,
|
||||
)?;
|
||||
}
|
||||
UnloadDll(info) => {
|
||||
write!(
|
||||
formatter,
|
||||
"event=UnloadDll base=0x{:016x}",
|
||||
info.lpBaseOfDll as u64,
|
||||
)?;
|
||||
}
|
||||
OutputDebugString(info) => {
|
||||
write!(
|
||||
formatter,
|
||||
"event=OutputDebugString unicode={} address=0x{:016x} length={}",
|
||||
info.fUnicode, info.lpDebugStringData as u64, info.nDebugStringLength,
|
||||
)?;
|
||||
}
|
||||
Rip(info) => {
|
||||
write!(
|
||||
formatter,
|
||||
"event=Rip error=0x{:x} type={}",
|
||||
info.dwError, info.dwType
|
||||
)?;
|
||||
}
|
||||
Unknown => {
|
||||
write!(formatter, "event=Unknown")?;
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DebugEvent<'a> {
|
||||
process_id: DWORD,
|
||||
thread_id: DWORD,
|
||||
info: DebugEventInfo<'a>,
|
||||
}
|
||||
|
||||
impl<'a> DebugEvent<'a> {
|
||||
pub fn new(de: &'a DEBUG_EVENT) -> Self {
|
||||
let info = unsafe {
|
||||
match de.dwDebugEventCode {
|
||||
EXCEPTION_DEBUG_EVENT => DebugEventInfo::Exception(de.u.Exception()),
|
||||
CREATE_PROCESS_DEBUG_EVENT => {
|
||||
DebugEventInfo::CreateProcess(de.u.CreateProcessInfo())
|
||||
}
|
||||
CREATE_THREAD_DEBUG_EVENT => DebugEventInfo::CreateThread(de.u.CreateThread()),
|
||||
EXIT_PROCESS_DEBUG_EVENT => DebugEventInfo::ExitProcess(de.u.ExitProcess()),
|
||||
EXIT_THREAD_DEBUG_EVENT => DebugEventInfo::ExitThread(de.u.ExitThread()),
|
||||
LOAD_DLL_DEBUG_EVENT => DebugEventInfo::LoadDll(de.u.LoadDll()),
|
||||
UNLOAD_DLL_DEBUG_EVENT => DebugEventInfo::UnloadDll(de.u.UnloadDll()),
|
||||
OUTPUT_DEBUG_STRING_EVENT => DebugEventInfo::OutputDebugString(de.u.DebugString()),
|
||||
RIP_EVENT => DebugEventInfo::Rip(de.u.RipInfo()),
|
||||
_ => DebugEventInfo::Unknown,
|
||||
}
|
||||
};
|
||||
|
||||
Self {
|
||||
process_id: de.dwProcessId,
|
||||
thread_id: de.dwThreadId,
|
||||
info,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_id(&self) -> DWORD {
|
||||
self.process_id
|
||||
}
|
||||
|
||||
pub fn thread_id(&self) -> DWORD {
|
||||
self.thread_id
|
||||
}
|
||||
|
||||
pub fn info(&self) -> &DebugEventInfo {
|
||||
&self.info
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Display for DebugEvent<'a> {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
formatter,
|
||||
" pid={} tid={} {}",
|
||||
self.process_id, self.thread_id, self.info
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
1046
src/agent/debugger/src/debugger.rs
Normal file
1046
src/agent/debugger/src/debugger.rs
Normal file
File diff suppressed because it is too large
Load Diff
9
src/agent/debugger/src/lib.rs
Normal file
9
src/agent/debugger/src/lib.rs
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![cfg(windows)]
|
||||
|
||||
pub mod dbghelp;
|
||||
pub mod debug_event;
|
||||
pub mod debugger;
|
||||
pub mod stack;
|
354
src/agent/debugger/src/stack.rs
Normal file
354
src/agent/debugger/src/stack.rs
Normal file
@ -0,0 +1,354 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use std::{
|
||||
fmt::{self, Display, Formatter},
|
||||
hash::{Hash, Hasher},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use fnv::FnvHasher;
|
||||
use log::trace;
|
||||
use serde::{Serialize, Serializer};
|
||||
use win_util::memory;
|
||||
use winapi::{shared::minwindef::DWORD, um::winnt::HANDLE};
|
||||
|
||||
use crate::dbghelp::{self, DebugHelpGuard, ModuleInfo};
|
||||
|
||||
const UNKNOWN_MODULE: &str = "<UnknownModule>";
|
||||
|
||||
#[derive(Clone, Debug, Hash, PartialEq)]
|
||||
pub enum DebugFunctionLocation {
|
||||
/// File/line if available
|
||||
///
|
||||
/// Should be stable - ASLR and JIT should not change source position,
|
||||
/// but some precision is lost.
|
||||
///
|
||||
/// We mitigate this loss of precision by collecting multiple samples
|
||||
/// for the same hash bucket.
|
||||
Line { file: String, line: u32 },
|
||||
|
||||
/// Offset if line information not available.
|
||||
Offset { disp: u64 },
|
||||
}
|
||||
|
||||
impl Display for DebugFunctionLocation {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
|
||||
match self {
|
||||
DebugFunctionLocation::Line { file, line } => write!(formatter, "{}:{}", file, line)?,
|
||||
DebugFunctionLocation::Offset { disp } => write!(formatter, "0x{:x}", disp)?,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Hash, PartialEq)]
|
||||
pub enum DebugStackFrame {
|
||||
Frame {
|
||||
function: String,
|
||||
location: DebugFunctionLocation,
|
||||
},
|
||||
CorruptFrame,
|
||||
}
|
||||
|
||||
impl DebugStackFrame {
|
||||
pub fn new(function: String, location: DebugFunctionLocation) -> DebugStackFrame {
|
||||
DebugStackFrame::Frame { function, location }
|
||||
}
|
||||
|
||||
pub fn corrupt_frame() -> DebugStackFrame {
|
||||
DebugStackFrame::CorruptFrame
|
||||
}
|
||||
|
||||
pub fn is_corrupt_frame(&self) -> bool {
|
||||
match self {
|
||||
DebugStackFrame::Frame { .. } => false,
|
||||
DebugStackFrame::CorruptFrame => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DebugStackFrame {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
|
||||
match self {
|
||||
DebugStackFrame::Frame { function, location } => match location {
|
||||
DebugFunctionLocation::Line { file, line } => {
|
||||
write!(formatter, "{} {}:{}", function, file, line)
|
||||
}
|
||||
DebugFunctionLocation::Offset { disp } => {
|
||||
write!(formatter, "{}+0x{:x}", function, disp)
|
||||
}
|
||||
},
|
||||
DebugStackFrame::CorruptFrame => formatter.write_str("<corrupt frame(s)>"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for DebugStackFrame {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(&format!("{}", self))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize)]
|
||||
pub struct DebugStack {
|
||||
frames: Vec<DebugStackFrame>,
|
||||
}
|
||||
|
||||
impl DebugStack {
|
||||
pub fn new(frames: Vec<DebugStackFrame>) -> DebugStack {
|
||||
DebugStack { frames }
|
||||
}
|
||||
|
||||
pub fn frames(&self) -> &[DebugStackFrame] {
|
||||
&self.frames
|
||||
}
|
||||
|
||||
pub fn stable_hash(&self) -> u64 {
|
||||
// Corrupted stacks and jit can result in stacks that vary from run to run, so we exclude
|
||||
// those frames and anything below them for a more stable hash.
|
||||
let first_unstable_frame = self.frames.iter().position(|f| match f {
|
||||
DebugStackFrame::Frame { function, .. } => function == UNKNOWN_MODULE,
|
||||
DebugStackFrame::CorruptFrame => true,
|
||||
});
|
||||
|
||||
let count = if let Some(position) = first_unstable_frame {
|
||||
position.max(1)
|
||||
} else {
|
||||
self.frames.len()
|
||||
};
|
||||
|
||||
let mut hasher = FnvHasher::default();
|
||||
self.frames[0..count].hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DebugStack {
|
||||
fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
|
||||
let mut first = true;
|
||||
for frame in self.frames() {
|
||||
if !first {
|
||||
writeln!(formatter)?;
|
||||
}
|
||||
first = false;
|
||||
write!(formatter, "{}", frame)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_function_location_in_module(
|
||||
dbghlp: &DebugHelpGuard,
|
||||
module_info: &ModuleInfo,
|
||||
process_handle: HANDLE,
|
||||
program_counter: u64,
|
||||
inline_context: DWORD,
|
||||
) -> DebugStackFrame {
|
||||
if let Ok(sym_info) =
|
||||
dbghlp.sym_from_inline_context(process_handle, program_counter, inline_context)
|
||||
{
|
||||
let function = format!(
|
||||
"{}!{}",
|
||||
Path::new(module_info.name()).display(),
|
||||
sym_info.symbol()
|
||||
);
|
||||
|
||||
let sym_line_info =
|
||||
dbghlp.sym_get_file_and_line(process_handle, program_counter, inline_context);
|
||||
|
||||
let location = match sym_line_info {
|
||||
// Don't use file/line for these magic line numbers.
|
||||
Ok(ref sym_line_info) if !sym_line_info.is_fake_line_number() => {
|
||||
DebugFunctionLocation::Line {
|
||||
file: sym_line_info.filename().to_string_lossy().into(),
|
||||
line: sym_line_info.line_number(),
|
||||
}
|
||||
}
|
||||
|
||||
_ => DebugFunctionLocation::Offset {
|
||||
disp: sym_info.displacement(),
|
||||
},
|
||||
};
|
||||
|
||||
DebugStackFrame::new(function, location)
|
||||
} else {
|
||||
// No function - assume we have an exe with no pdb (so no exports). This should be
|
||||
// common, so we won't report an error. We do want a nice(ish) location though.
|
||||
let location = DebugFunctionLocation::Offset {
|
||||
disp: program_counter - module_info.base_address(),
|
||||
};
|
||||
DebugStackFrame::new(module_info.name().to_string_lossy().into(), location)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_frame_with_unknown_module(process_handle: HANDLE, program_counter: u64) -> DebugStackFrame {
|
||||
// We don't have any module information. If the memory is executable, we assume the
|
||||
// stack is still valid, perhaps we have jit code and we use the base of the allocation
|
||||
// to use for a synthetic RVA which is hopefully somewhat stable.
|
||||
//
|
||||
// Otherwise, assume the stack is corrupt.
|
||||
match memory::get_memory_info(process_handle, program_counter) {
|
||||
Ok(mi) => {
|
||||
if mi.is_executable() {
|
||||
let offset = program_counter
|
||||
.checked_sub(mi.base_address())
|
||||
.expect("logic error computing fake rva");
|
||||
|
||||
let location = DebugFunctionLocation::Offset { disp: offset };
|
||||
DebugStackFrame::new(UNKNOWN_MODULE.into(), location)
|
||||
} else {
|
||||
DebugStackFrame::corrupt_frame()
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// We expect corrupt stacks, so it's common to see failures with this api,
|
||||
// but do want a log we can turn on if needed.
|
||||
trace!("Error getting memory info: {}", e);
|
||||
DebugStackFrame::corrupt_frame()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_stack(
|
||||
process_handle: HANDLE,
|
||||
thread_handle: HANDLE,
|
||||
resolve_symbols: bool,
|
||||
) -> Result<DebugStack> {
|
||||
let dbghlp = dbghelp::lock()?;
|
||||
|
||||
let mut stack = vec![];
|
||||
|
||||
dbghlp.stackwalk_ex(process_handle, thread_handle, |frame, inline_context| {
|
||||
// The program counter is the return address, potentially outside of the function
|
||||
// performing the call. We subtract 1 to ensure the address is within the call.
|
||||
let program_counter = frame.program_counter().saturating_sub(1);
|
||||
|
||||
let debug_stack_frame = if resolve_symbols {
|
||||
if let Ok(module_info) = dbghlp.sym_get_module_info(process_handle, program_counter) {
|
||||
get_function_location_in_module(
|
||||
&dbghlp,
|
||||
&module_info,
|
||||
process_handle,
|
||||
program_counter,
|
||||
inline_context,
|
||||
)
|
||||
} else {
|
||||
// We ignore the error from sym_get_module_info because corrupt stacks in the
|
||||
// target are a common cause of not finding the module - a condition we expect.
|
||||
get_frame_with_unknown_module(process_handle, program_counter)
|
||||
}
|
||||
} else {
|
||||
get_frame_with_unknown_module(process_handle, program_counter)
|
||||
};
|
||||
|
||||
// Avoid pushing consecutive corrupt frames.
|
||||
if !debug_stack_frame.is_corrupt_frame()
|
||||
|| stack
|
||||
.last()
|
||||
.map_or(true, |f: &DebugStackFrame| !f.is_corrupt_frame())
|
||||
{
|
||||
stack.push(debug_stack_frame);
|
||||
};
|
||||
})?;
|
||||
|
||||
Ok(DebugStack::new(stack))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
macro_rules! frame {
|
||||
($name: expr, disp: $disp: expr) => {
|
||||
DebugStackFrame::new(
|
||||
$name.to_string(),
|
||||
DebugFunctionLocation::Offset { disp: $disp },
|
||||
)
|
||||
};
|
||||
|
||||
($name: expr, line: ($file: expr, $line: expr)) => {
|
||||
DebugStackFrame::new(
|
||||
$name.to_string(),
|
||||
DebugFunctionLocation::Line {
|
||||
file: $file.to_string(),
|
||||
line: $line,
|
||||
},
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stable_stack_hash() {
|
||||
let frames = vec![
|
||||
frame!("ntdll", disp: 88442200),
|
||||
frame!("usage", line: ("foo.c", 88)),
|
||||
frame!("main", line: ("foo.c", 42)),
|
||||
];
|
||||
let stack = DebugStack::new(frames);
|
||||
|
||||
// Hard coded hash constant is what we want to ensure the hash function is stable.
|
||||
assert_eq!(stack.stable_hash(), 8083364444338290471);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stable_hash_ignore_jit() {
|
||||
let mut frames = vec![
|
||||
frame!("ntdll", disp: 88442200),
|
||||
frame!("usage", line: ("foo.c", 88)),
|
||||
frame!("main", line: ("foo.c", 42)),
|
||||
];
|
||||
|
||||
let base_frames = frames.clone();
|
||||
|
||||
let base_stack = DebugStack::new(base_frames);
|
||||
|
||||
frames.push(DebugStackFrame::corrupt_frame());
|
||||
let corrupted_stack = DebugStack::new(frames.clone());
|
||||
|
||||
assert_eq!(corrupted_stack.stable_hash(), base_stack.stable_hash());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stable_hash_assuming_stack_corrupted() {
|
||||
let mut frames = vec![
|
||||
frame!("ntdll", disp: 88442200),
|
||||
frame!("usage", line: ("foo.c", 88)),
|
||||
frame!("main", line: ("foo.c", 42)),
|
||||
];
|
||||
|
||||
let base_frames = frames.clone();
|
||||
|
||||
let base_stack = DebugStack::new(base_frames);
|
||||
|
||||
frames.push(frame!(UNKNOWN_MODULE, disp: 1111));
|
||||
let corrupted_stack = DebugStack::new(frames.clone());
|
||||
|
||||
assert_eq!(corrupted_stack.stable_hash(), base_stack.stable_hash());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn corrupted_top_of_stack() {
|
||||
let one_corrupted_frame = vec![frame!(UNKNOWN_MODULE, disp: 88442200)];
|
||||
|
||||
let mut two_corrupted_frames = one_corrupted_frame.clone();
|
||||
two_corrupted_frames.push(frame!(UNKNOWN_MODULE, disp: 88442200));
|
||||
|
||||
// If the entire stack is corrupted, we should only hash the top of stack.
|
||||
assert_eq!(
|
||||
DebugStack::new(two_corrupted_frames).stable_hash(),
|
||||
DebugStack::new(one_corrupted_frame).stable_hash()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_stack() {
|
||||
let stack = DebugStack::new(vec![]);
|
||||
|
||||
assert_eq!(stack.stable_hash(), stack.stable_hash());
|
||||
}
|
||||
}
|
30
src/agent/input-tester/Cargo.toml
Normal file
30
src/agent/input-tester/Cargo.toml
Normal file
@ -0,0 +1,30 @@
|
||||
[package]
|
||||
name = "input-tester"
|
||||
version = "0.1.0"
|
||||
authors = ["fuzzing@microsoft.com"]
|
||||
edition = "2018"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
atexit = { path = "../atexit" }
|
||||
coverage = { path = "../coverage" }
|
||||
debugger = { path = "../debugger" }
|
||||
fnv = "1.0"
|
||||
log = "0.4"
|
||||
num_cpus = "1.13"
|
||||
rayon = "1.3"
|
||||
sha-1 = "0.9"
|
||||
uuid = { version = "0.8", features = ["serde", "v4"] }
|
||||
win-util = { path = "../win-util" }
|
||||
|
||||
[dependencies.winapi]
|
||||
version = "0.3"
|
||||
features = [
|
||||
"debugapi",
|
||||
"handleapi",
|
||||
"memoryapi",
|
||||
"processthreadsapi",
|
||||
"werapi",
|
||||
"winbase",
|
||||
]
|
1047
src/agent/input-tester/src/appverifier.rs
Normal file
1047
src/agent/input-tester/src/appverifier.rs
Normal file
File diff suppressed because it is too large
Load Diff
516
src/agent/input-tester/src/crash_detector.rs
Normal file
516
src/agent/input-tester/src/crash_detector.rs
Normal file
@ -0,0 +1,516 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
//! This module implements a simple debugger to detect exceptions in an application.
|
||||
use std::{
|
||||
self,
|
||||
collections::HashMap,
|
||||
ffi::{OsStr, OsString},
|
||||
fs,
|
||||
io::Write,
|
||||
path::Path,
|
||||
process::{Command, Output},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use coverage::AppCoverageBlocks;
|
||||
use debugger::debugger::{BreakpointId, BreakpointType, DebugEventHandler, Debugger};
|
||||
use fnv::FnvHashMap;
|
||||
use log::{debug, error, trace};
|
||||
use win_util::{
|
||||
pipe_handle::{pipe, PipeReaderNonBlocking},
|
||||
process,
|
||||
};
|
||||
use winapi::{
|
||||
shared::minwindef::DWORD,
|
||||
um::{
|
||||
minwinbase::{CREATE_PROCESS_DEBUG_INFO, EXCEPTION_DEBUG_INFO},
|
||||
winnt::{DBG_EXCEPTION_NOT_HANDLED, HANDLE},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
logging,
|
||||
test_result::{
|
||||
asan, new_exception,
|
||||
vcpp_debugger::{self, VcppDebuggerExceptionInfo},
|
||||
Exception, ExitStatus,
|
||||
},
|
||||
};
|
||||
|
||||
pub struct DebuggerResult {
|
||||
pub exceptions: Vec<Exception>,
|
||||
pub exit_status: ExitStatus,
|
||||
pub stdout: String,
|
||||
pub stderr: String,
|
||||
pub debugger_output: String,
|
||||
}
|
||||
|
||||
impl DebuggerResult {
|
||||
fn new(
|
||||
exceptions: Vec<Exception>,
|
||||
exit_status: ExitStatus,
|
||||
stdout: String,
|
||||
stderr: String,
|
||||
debugger_output: String,
|
||||
) -> Self {
|
||||
DebuggerResult {
|
||||
exceptions,
|
||||
exit_status,
|
||||
stdout,
|
||||
stderr,
|
||||
debugger_output,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn any_crashes(&self) -> bool {
|
||||
!self.exceptions.is_empty()
|
||||
}
|
||||
|
||||
pub fn timed_out(&self) -> bool {
|
||||
self.exit_status.is_timeout()
|
||||
}
|
||||
|
||||
pub fn any_crashes_or_timed_out(&self) -> bool {
|
||||
self.any_crashes() || self.timed_out()
|
||||
}
|
||||
|
||||
pub fn write_markdown_summary(&self, summary_path: &Path) -> Result<()> {
|
||||
let mut file = fs::File::create(&summary_path)?;
|
||||
writeln!(file, "# Test Results")?;
|
||||
writeln!(file)?;
|
||||
writeln!(file, "## Output")?;
|
||||
writeln!(file)?;
|
||||
writeln!(file, "### Standard Output")?;
|
||||
writeln!(file)?;
|
||||
writeln!(file, "```")?;
|
||||
writeln!(file, "{}", self.stdout)?;
|
||||
writeln!(file, "```")?;
|
||||
writeln!(file)?;
|
||||
writeln!(file, "### Standard Error")?;
|
||||
writeln!(file)?;
|
||||
writeln!(file, "```")?;
|
||||
writeln!(file, "{}", self.stderr)?;
|
||||
writeln!(file, "```")?;
|
||||
writeln!(file)?;
|
||||
writeln!(file, "### Exit status")?;
|
||||
writeln!(file)?;
|
||||
writeln!(file, "{}", self.exit_status)?;
|
||||
writeln!(file)?;
|
||||
if !self.debugger_output.is_empty() {
|
||||
writeln!(file, "## Debugger output")?;
|
||||
writeln!(file)?;
|
||||
writeln!(file, "```")?;
|
||||
writeln!(file, "{}", self.debugger_output)?;
|
||||
writeln!(file, "```")?;
|
||||
writeln!(file)?;
|
||||
}
|
||||
writeln!(file, "## Exceptions")?;
|
||||
writeln!(file)?;
|
||||
for exception in &self.exceptions {
|
||||
writeln!(file)?;
|
||||
writeln!(file, "```")?;
|
||||
writeln!(file, "{}", exception)?;
|
||||
writeln!(file, "```")?;
|
||||
}
|
||||
writeln!(file)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct CrashDetectorEventHandler<'a> {
|
||||
start_time: Instant,
|
||||
max_duration: Duration,
|
||||
ignore_first_chance_exceptions: bool,
|
||||
any_target_terminated: bool,
|
||||
timed_out: bool,
|
||||
stdout: PipeReaderNonBlocking,
|
||||
stderr: PipeReaderNonBlocking,
|
||||
stdout_buffer: Vec<u8>,
|
||||
stderr_buffer: Vec<u8>,
|
||||
debugger_output: String,
|
||||
exceptions: Vec<Exception>,
|
||||
id_to_block: Option<FnvHashMap<BreakpointId, (usize, usize)>>,
|
||||
coverage_map: Option<&'a mut AppCoverageBlocks>,
|
||||
}
|
||||
|
||||
impl<'a> CrashDetectorEventHandler<'a> {
|
||||
pub fn new(
|
||||
coverage_map: Option<&'a mut AppCoverageBlocks>,
|
||||
stdout: PipeReaderNonBlocking,
|
||||
stderr: PipeReaderNonBlocking,
|
||||
ignore_first_chance_exceptions: bool,
|
||||
start_time: Instant,
|
||||
max_duration: Duration,
|
||||
) -> Self {
|
||||
Self {
|
||||
start_time,
|
||||
max_duration,
|
||||
ignore_first_chance_exceptions,
|
||||
any_target_terminated: false,
|
||||
timed_out: false,
|
||||
stdout,
|
||||
stdout_buffer: vec![],
|
||||
stderr,
|
||||
stderr_buffer: vec![],
|
||||
debugger_output: String::new(),
|
||||
exceptions: vec![],
|
||||
id_to_block: None,
|
||||
coverage_map,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Register the coverage breakpoints and build a mapping from `BreakpointId` to the pair
|
||||
/// (module index, block index).
|
||||
///
|
||||
/// The debugger passes a `BreakpointId` to the `on_breakpoint` callback - the same id returned
|
||||
/// when the breakpoint is registered.
|
||||
///
|
||||
/// This mapping is convenient so that we do not need to track module load addresses and later
|
||||
/// map between breakpoint address and module/rva pairs.
|
||||
fn prepare_coverage_breakpoints(
|
||||
debugger: &mut Debugger,
|
||||
coverage_map: &Option<&mut AppCoverageBlocks>,
|
||||
) -> Option<FnvHashMap<BreakpointId, (usize, usize)>> {
|
||||
if let Some(coverage_map) = coverage_map {
|
||||
let mut id_to_block = fnv::FnvHashMap::default();
|
||||
for (m, module) in coverage_map.modules().iter().enumerate() {
|
||||
let name = module.module_name();
|
||||
for (i, block) in module.blocks().iter().enumerate() {
|
||||
// For better performance, we can skip registering breakpoints that have
|
||||
// been hit as we only care about new coverage.
|
||||
if !block.hit() {
|
||||
let id = debugger.register_breakpoint(
|
||||
name,
|
||||
block.rva() as u64,
|
||||
BreakpointType::OneTime,
|
||||
);
|
||||
|
||||
id_to_block.insert(id, (m, i));
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(id_to_block)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn is_vcpp_notification(exception: &EXCEPTION_DEBUG_INFO, target_process_handle: HANDLE) -> bool {
|
||||
if exception.ExceptionRecord.ExceptionCode == vcpp_debugger::EXCEPTION_VISUALCPP_DEBUGGER {
|
||||
match VcppDebuggerExceptionInfo::from_exception_record(
|
||||
&exception.ExceptionRecord,
|
||||
!process::is_wow64_process(target_process_handle),
|
||||
) {
|
||||
VcppDebuggerExceptionInfo::ThreadName(_) => {
|
||||
return true;
|
||||
}
|
||||
VcppDebuggerExceptionInfo::Probe(probe_info) => {
|
||||
if let Err(e) = probe_info.notify_target(target_process_handle) {
|
||||
error!("Error notifying target on vcpp probe: {}", e);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
VcppDebuggerExceptionInfo::Fiber(_) => {
|
||||
return true;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
impl<'a> DebugEventHandler for CrashDetectorEventHandler<'a> {
|
||||
fn on_exception(
|
||||
&mut self,
|
||||
debugger: &mut Debugger,
|
||||
info: &EXCEPTION_DEBUG_INFO,
|
||||
process_handle: HANDLE,
|
||||
) -> DWORD {
|
||||
if !is_vcpp_notification(info, process_handle) {
|
||||
// An exception might be handled, or other cleanup might occur between
|
||||
// the first chance and the second chance, so we continue execution.
|
||||
let exception_code = info.ExceptionRecord.ExceptionCode;
|
||||
|
||||
// If we're ignoring first chance exceptions, we skip collecting the stack
|
||||
// and adding the exception to our list of results.
|
||||
// We also ignore exceptions after we terminate any process as that might
|
||||
// cause exceptions in other processes in the process tree.
|
||||
if !(info.dwFirstChance == 1 && self.ignore_first_chance_exceptions
|
||||
|| self.any_target_terminated)
|
||||
{
|
||||
match debugger.get_current_stack() {
|
||||
Ok(stack) => self
|
||||
.exceptions
|
||||
.push(new_exception(process_handle, info, stack)),
|
||||
Err(err) => error!("Error walking program under test stack: {}", err),
|
||||
}
|
||||
|
||||
if info.dwFirstChance == 0 {
|
||||
if exception_code == asan::EH_SANITIZER {
|
||||
let asan_report =
|
||||
asan::get_asan_report(process_handle, &info.ExceptionRecord);
|
||||
if let Some(report) = asan_report {
|
||||
self.debugger_output.push_str(&report);
|
||||
}
|
||||
}
|
||||
|
||||
// This is the second chance - we terminate the process to avoid
|
||||
// any potential popups, e.g. from Windows Error Reporting.
|
||||
|
||||
// Kill the process but stay in the debug loop to consume
|
||||
// the last EXIT_PROCESS_DEBUG_EVENT.
|
||||
self.any_target_terminated = true;
|
||||
trace!(
|
||||
"crash in process {} - terminating",
|
||||
process::id(process_handle)
|
||||
);
|
||||
process::terminate(process_handle);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Continue normal exception handling processing
|
||||
DBG_EXCEPTION_NOT_HANDLED
|
||||
}
|
||||
|
||||
fn on_create_process(&mut self, debugger: &mut Debugger, _info: &CREATE_PROCESS_DEBUG_INFO) {
|
||||
prepare_coverage_breakpoints(debugger, &self.coverage_map);
|
||||
}
|
||||
|
||||
fn on_output_debug_string(&mut self, _debugger: &mut Debugger, message: String) {
|
||||
self.debugger_output.push_str(&message);
|
||||
}
|
||||
|
||||
fn on_output_debug_os_string(&mut self, _debugger: &mut Debugger, message: OsString) {
|
||||
self.debugger_output
|
||||
.push_str(message.to_string_lossy().as_ref());
|
||||
}
|
||||
|
||||
fn on_poll(&mut self, debugger: &mut Debugger) {
|
||||
if let Err(e) = self.stdout.read(&mut self.stdout_buffer) {
|
||||
error!("Error reading child process stdout: {}", e);
|
||||
}
|
||||
if let Err(e) = self.stderr.read(&mut self.stderr_buffer) {
|
||||
error!("Error reading child process stderr: {}", e);
|
||||
}
|
||||
|
||||
if !self.timed_out && self.start_time.elapsed() > self.max_duration {
|
||||
// The process appears to be hung, kill it and it's children.
|
||||
debug!("test timeout - terminating process tree");
|
||||
|
||||
self.timed_out = true;
|
||||
self.any_target_terminated = true;
|
||||
|
||||
debugger.quit_debugging();
|
||||
}
|
||||
}
|
||||
|
||||
fn on_breakpoint(&mut self, _debugger: &mut Debugger, id: BreakpointId) {
|
||||
if let Some(id_to_block) = &self.id_to_block {
|
||||
if let Some(&(mod_idx, block_idx)) = id_to_block.get(&id) {
|
||||
if let Some(coverage_map) = &mut self.coverage_map {
|
||||
coverage_map.report_block_hit(mod_idx, block_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This function runs the application under a debugger to detect any crashes in
|
||||
/// the process or any children processes.
|
||||
pub fn test_process(
|
||||
app_path: impl AsRef<OsStr>,
|
||||
args: &[impl AsRef<OsStr>],
|
||||
env: &HashMap<String, String>,
|
||||
max_duration: Duration,
|
||||
ignore_first_chance_exceptions: bool,
|
||||
coverage_map: Option<&mut AppCoverageBlocks>,
|
||||
) -> Result<DebuggerResult> {
|
||||
debug!("Running: {}", logging::command_invocation(&app_path, args));
|
||||
|
||||
let (stdout_reader, stdout_writer) = pipe()?;
|
||||
// To merge streams, we could instead use:
|
||||
// stderr_writer = stdout_writer.try_clone()?;
|
||||
let (stderr_reader, stderr_writer) = pipe()?;
|
||||
let mut command = Command::new(app_path);
|
||||
command
|
||||
.args(args)
|
||||
.stdout(stdout_writer)
|
||||
.stderr(stderr_writer);
|
||||
|
||||
for (k, v) in env {
|
||||
command.env(k, v);
|
||||
}
|
||||
|
||||
let start_time = Instant::now();
|
||||
let mut event_handler = CrashDetectorEventHandler::new(
|
||||
coverage_map,
|
||||
stdout_reader,
|
||||
stderr_reader,
|
||||
ignore_first_chance_exceptions,
|
||||
start_time,
|
||||
max_duration,
|
||||
);
|
||||
let (mut debugger, mut child) = Debugger::init(command, &mut event_handler)?;
|
||||
debugger.run(&mut event_handler)?;
|
||||
|
||||
let pid = child.id();
|
||||
let status = child.wait()?;
|
||||
let output = Output {
|
||||
status,
|
||||
stdout: event_handler.stdout_buffer,
|
||||
stderr: event_handler.stderr_buffer,
|
||||
};
|
||||
debug!("TestTask: {:?}", logging::ProcessDetails::new(pid, &output));
|
||||
|
||||
let exit_status = if event_handler.timed_out {
|
||||
ExitStatus::from_timeout(max_duration.as_secs())
|
||||
} else if let Some(code) = output.status.code() {
|
||||
ExitStatus::from_code(code)
|
||||
} else {
|
||||
unreachable!("Only Unix can signal");
|
||||
};
|
||||
|
||||
Ok(DebuggerResult::new(
|
||||
filter_uninteresting_exceptions(event_handler.exceptions),
|
||||
exit_status,
|
||||
String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
String::from_utf8_lossy(&output.stderr).to_string(),
|
||||
event_handler.debugger_output,
|
||||
))
|
||||
}
|
||||
|
||||
fn filter_uninteresting_exceptions(mut exceptions: Vec<Exception>) -> Vec<Exception> {
|
||||
// Filter out first chance exceptions that are **immediately** followed by the same
|
||||
// second chance exception (same stack hash). This is the typical scenario.
|
||||
//
|
||||
// It is possible to have intervening handled exceptions between a first chance and
|
||||
// second chance (crashing) exception, but we keep those because it might be interesting.
|
||||
let mut i = 1;
|
||||
while i < exceptions.len() {
|
||||
let prev = &exceptions[i - 1];
|
||||
let curr = &exceptions[i];
|
||||
if prev.first_chance
|
||||
&& prev.exception_code == curr.exception_code
|
||||
&& prev.stack_hash == curr.stack_hash
|
||||
{
|
||||
exceptions.remove(i - 1);
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
exceptions
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::test_result::{ExceptionCode, ExceptionDescription};
|
||||
|
||||
const READ_AV: u32 = 0xc0000005;
|
||||
const EXCEPTION_CPP: u32 = 0xE06D7363;
|
||||
|
||||
macro_rules! runps {
|
||||
($timeout: expr, $script: expr) => {{
|
||||
test_process(
|
||||
r"C:\windows\system32\WindowsPowerShell\v1.0\powershell.exe",
|
||||
&vec!["/nop".to_string(), "/c".to_string(), $script.to_string()],
|
||||
$timeout,
|
||||
/*ignore first chance exceptions*/ true,
|
||||
None,
|
||||
)
|
||||
.unwrap()
|
||||
}};
|
||||
|
||||
($script: expr) => {{
|
||||
let timeout = Duration::from_secs(5);
|
||||
runps!(timeout, $script)
|
||||
}};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn timeout_works() {
|
||||
let timeout = Duration::from_secs(2);
|
||||
let result = runps!(timeout, "sleep 600");
|
||||
assert_eq!(
|
||||
result.exit_status,
|
||||
ExitStatus::from_timeout(timeout.as_secs())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nonblocking_stdout() {
|
||||
let result = runps!(
|
||||
Duration::from_secs(10),
|
||||
"'+++' + 'a'*8kb + '@@@' + 'b'*8kb + '---'"
|
||||
);
|
||||
assert_eq!(result.exit_status, ExitStatus::from_code(0));
|
||||
|
||||
assert!(result.stdout.starts_with("+++"));
|
||||
assert!(result.stdout.contains("@@@"));
|
||||
assert!(result.stdout.ends_with("---\r\n"));
|
||||
let expected_len = /*+++*/ 3 + (/*a - 8kb*/1 * 8 * 1024) + /*@@@*/3 + (/*b - 8kb*/1 * 8 * 1024) + /*---*/3 + /*\r\n*/2;
|
||||
assert_eq!(result.stdout.len(), expected_len);
|
||||
}
|
||||
|
||||
macro_rules! exception {
|
||||
($code: expr, $hash: expr, first) => {
|
||||
exception!($code, $hash, true)
|
||||
};
|
||||
|
||||
($code: expr, $hash: expr) => {
|
||||
exception!($code, $hash, false)
|
||||
};
|
||||
|
||||
($code: expr, $hash: expr, $first_chance: expr) => {{
|
||||
let descr = match $code {
|
||||
READ_AV => ExceptionCode::ReadAccessViolation,
|
||||
EXCEPTION_CPP => ExceptionCode::CppException,
|
||||
_ => ExceptionCode::UnknownExceptionCode,
|
||||
};
|
||||
Exception {
|
||||
exception_code: $code,
|
||||
description: ExceptionDescription::GenericException(descr),
|
||||
stack_hash: $hash,
|
||||
first_chance: $first_chance,
|
||||
stack_frames: vec![],
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exception_filtering() {
|
||||
let empty = vec![];
|
||||
let one_first_chance = vec![exception!(READ_AV, 1234, first)];
|
||||
let one_second_chance = vec![exception!(READ_AV, 1234)];
|
||||
let typical = vec![exception!(READ_AV, 1234, first), exception!(READ_AV, 1234)];
|
||||
let atypical1 = vec![exception!(READ_AV, 1234, first), exception!(READ_AV, 4567)];
|
||||
let atypical2 = vec![
|
||||
exception!(READ_AV, 1234, first),
|
||||
exception!(EXCEPTION_CPP, 1234),
|
||||
];
|
||||
let atypical3 = vec![
|
||||
exception!(READ_AV, 1234, first),
|
||||
exception!(EXCEPTION_CPP, 1234, first),
|
||||
exception!(READ_AV, 1234),
|
||||
];
|
||||
let atypical4 = vec![
|
||||
exception!(READ_AV, 1234, first),
|
||||
exception!(READ_AV, 4567, first),
|
||||
exception!(READ_AV, 1234),
|
||||
];
|
||||
|
||||
assert_eq!(filter_uninteresting_exceptions(empty).len(), 0);
|
||||
assert_eq!(filter_uninteresting_exceptions(one_first_chance).len(), 1);
|
||||
assert_eq!(filter_uninteresting_exceptions(one_second_chance).len(), 1);
|
||||
assert_eq!(filter_uninteresting_exceptions(typical).len(), 1);
|
||||
assert_eq!(filter_uninteresting_exceptions(atypical1).len(), 2);
|
||||
assert_eq!(filter_uninteresting_exceptions(atypical2).len(), 2);
|
||||
assert_eq!(filter_uninteresting_exceptions(atypical3).len(), 3);
|
||||
assert_eq!(filter_uninteresting_exceptions(atypical4).len(), 3);
|
||||
}
|
||||
}
|
66
src/agent/input-tester/src/lib.rs
Normal file
66
src/agent/input-tester/src/lib.rs
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
#![allow(clippy::implicit_hasher)]
|
||||
#![cfg(windows)]
|
||||
|
||||
#[allow(unused)]
|
||||
pub mod appverifier;
|
||||
pub mod crash_detector;
|
||||
pub mod logging;
|
||||
pub mod summary;
|
||||
pub mod test_result;
|
||||
pub mod tester;
|
||||
|
||||
pub use tester::Tester;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Result;
|
||||
use log::info;
|
||||
|
||||
use crate::appverifier::AppVerifierState;
|
||||
|
||||
/// Run the test file task in standalone mode where the input file is specified
|
||||
/// from the command line.
|
||||
pub fn run(
|
||||
output_dir: PathBuf,
|
||||
driver: PathBuf,
|
||||
driver_env: HashMap<String, String>,
|
||||
driver_args: Vec<String>,
|
||||
max_run_s: u64,
|
||||
ignore_first_chance_exceptions: bool,
|
||||
app_verifier_tests: Option<Vec<String>>,
|
||||
input: PathBuf,
|
||||
max_cores: Option<usize>,
|
||||
) -> Result<()> {
|
||||
let tester = Tester::new(
|
||||
&output_dir,
|
||||
driver,
|
||||
driver_env,
|
||||
driver_args,
|
||||
max_run_s,
|
||||
ignore_first_chance_exceptions,
|
||||
app_verifier_tests,
|
||||
)?;
|
||||
|
||||
tester.set_appverifier(AppVerifierState::Enabled)?;
|
||||
|
||||
let (summary, _results) = if std::fs::metadata(&input)?.is_file() {
|
||||
tester.test_single_file(&input)?
|
||||
} else {
|
||||
tester.test_dir(&input, max_cores)?
|
||||
};
|
||||
|
||||
info!(
|
||||
"Test results summary: Crashes={} Passes={} HandledExceptions={} Timeouts={}",
|
||||
summary.crashes(),
|
||||
summary.passes(),
|
||||
summary.handled_exceptions(),
|
||||
summary.timeouts(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
40
src/agent/input-tester/src/logging.rs
Normal file
40
src/agent/input-tester/src/logging.rs
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use std::{ffi::OsStr, path::Path, process::Output};
|
||||
|
||||
pub fn command_invocation<S, T, I>(command: S, args: I) -> String
|
||||
where
|
||||
S: AsRef<OsStr>,
|
||||
T: AsRef<OsStr>,
|
||||
I: IntoIterator<Item = T>,
|
||||
{
|
||||
let mut result = command.as_ref().to_string_lossy().to_string();
|
||||
|
||||
for arg in args {
|
||||
result.push(' ');
|
||||
let needs_quotes = arg.as_ref().to_string_lossy().find(' ').is_some();
|
||||
if needs_quotes {
|
||||
result.push('"');
|
||||
}
|
||||
let arg: &Path = arg.as_ref().as_ref();
|
||||
result.push_str(&format!("{}", arg.display()));
|
||||
if needs_quotes {
|
||||
result.push('"');
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ProcessDetails<'a> {
|
||||
pid: u32,
|
||||
output: &'a Output,
|
||||
}
|
||||
|
||||
impl<'a> ProcessDetails<'a> {
|
||||
pub fn new(pid: u32, output: &'a Output) -> Self {
|
||||
ProcessDetails { pid, output }
|
||||
}
|
||||
}
|
121
src/agent/input-tester/src/summary.rs
Normal file
121
src/agent/input-tester/src/summary.rs
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::new_without_default)]
|
||||
|
||||
use std::collections::hash_map::HashMap;
|
||||
|
||||
use crate::crash_detector::DebuggerResult;
|
||||
|
||||
/// The test summary includes counts of results for all inputs tested.
|
||||
#[derive(Clone)]
|
||||
pub struct Summary {
|
||||
/// Count of inputs tested with no exceptions or timeouts.
|
||||
passes: u32,
|
||||
|
||||
/// Handled exceptions is the count of first_chance_exceptions where we never see
|
||||
/// the second chance exception (of which there is typically 1 - a crash.)
|
||||
handled_exceptions: u32,
|
||||
|
||||
/// First chance exceptions observed.
|
||||
first_chance_exceptions: u32,
|
||||
|
||||
/// Second chance exceptions observed.
|
||||
crashes: u32,
|
||||
|
||||
/// Timeouts observed.
|
||||
timeouts: u32,
|
||||
}
|
||||
|
||||
impl Summary {
|
||||
pub fn new() -> Self {
|
||||
Summary {
|
||||
passes: 0,
|
||||
handled_exceptions: 0,
|
||||
first_chance_exceptions: 0,
|
||||
crashes: 0,
|
||||
timeouts: 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn difference(&self, other: &Self) -> Self {
|
||||
Summary {
|
||||
passes: self.passes() - other.passes(),
|
||||
handled_exceptions: self.handled_exceptions() - other.handled_exceptions(),
|
||||
first_chance_exceptions: self.first_chance_exceptions()
|
||||
- other.first_chance_exceptions(),
|
||||
crashes: self.crashes() - other.crashes(),
|
||||
timeouts: self.timeouts() - other.timeouts(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn passes(&self) -> u32 {
|
||||
self.passes
|
||||
}
|
||||
|
||||
pub fn handled_exceptions(&self) -> u32 {
|
||||
self.handled_exceptions
|
||||
}
|
||||
|
||||
pub fn first_chance_exceptions(&self) -> u32 {
|
||||
self.first_chance_exceptions
|
||||
}
|
||||
|
||||
pub fn crashes(&self) -> u32 {
|
||||
self.crashes
|
||||
}
|
||||
|
||||
pub fn timeouts(&self) -> u32 {
|
||||
self.timeouts
|
||||
}
|
||||
|
||||
pub fn update(&mut self, result: &DebuggerResult) {
|
||||
if !result.any_crashes_or_timed_out() {
|
||||
self.passes += 1;
|
||||
return;
|
||||
}
|
||||
|
||||
// Track first and second chance exceptions by stack hash so we
|
||||
// can see how many were caught.
|
||||
// We can't assume we see a first chance exception for every second chance,
|
||||
// one example is __fastfail - it only raises a second chance exception.
|
||||
let mut bug_hashmap: HashMap<u64, (u32, u32)> = HashMap::new();
|
||||
for exception in &result.exceptions {
|
||||
let (first, second) = if exception.first_chance {
|
||||
self.first_chance_exceptions += 1;
|
||||
(1, 0)
|
||||
} else {
|
||||
self.crashes += 1;
|
||||
(0, 1)
|
||||
};
|
||||
|
||||
match bug_hashmap.get_mut(&exception.stack_hash) {
|
||||
Some(v) => {
|
||||
*v = (v.0 + first, v.1 + second);
|
||||
}
|
||||
_ => {
|
||||
bug_hashmap.insert(exception.stack_hash, (first, second));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for exception_counts in bug_hashmap.values() {
|
||||
if exception_counts.0 > 0 {
|
||||
self.handled_exceptions += exception_counts.0 - exception_counts.1;
|
||||
}
|
||||
}
|
||||
|
||||
if result.timed_out() {
|
||||
self.timeouts += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DebuggerResult> for Summary {
|
||||
fn from(result: &DebuggerResult) -> Self {
|
||||
let mut summary = Summary::new();
|
||||
summary.update(result);
|
||||
summary
|
||||
}
|
||||
}
|
205
src/agent/input-tester/src/test_result/asan.rs
Normal file
205
src/agent/input-tester/src/test_result/asan.rs
Normal file
@ -0,0 +1,205 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::unreadable_literal)]
|
||||
|
||||
use std::mem::size_of;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use win_util::process;
|
||||
use win_util::UNION; // Ideally this would be exported from winapi.
|
||||
use winapi::{
|
||||
shared::{
|
||||
basetsd::UINT64,
|
||||
minwindef::{DWORD, LPCVOID},
|
||||
ntdef::LPWSTR,
|
||||
},
|
||||
um::winnt::{EXCEPTION_RECORD, HANDLE},
|
||||
STRUCT,
|
||||
};
|
||||
|
||||
/// An error detected by ASAN.
|
||||
///
|
||||
/// These kinds are based on strings output by the lldb ASAN plugin, unrecognized errors should use `UnknownAsanError`.
|
||||
#[derive(Debug)]
|
||||
pub enum AsanError {
|
||||
UnknownAsanError,
|
||||
HeapUseAfterFree,
|
||||
HeapBufferOverflow,
|
||||
StackBufferUnderflow,
|
||||
InitializationOrderFiasco,
|
||||
StackBufferOverflow,
|
||||
StackUseAfterReturn,
|
||||
UseAfterPoison,
|
||||
ContainerOverflow,
|
||||
StackUseAfterScope,
|
||||
GlobalBufferOverflow,
|
||||
UnknownCrash,
|
||||
/// Leading underscore to avoid conflicts because protoc generates C++ enums which share the namespace.
|
||||
StackOverflow,
|
||||
NullDeref,
|
||||
WildJump,
|
||||
WildAddrWrite,
|
||||
WildAddrRead,
|
||||
WildAddr,
|
||||
Signal,
|
||||
/// Leading underscore to avoid conflicts because protoc generates C++ enums which share the namespace.
|
||||
DoubleFree,
|
||||
NewDeleteTypeMismatch,
|
||||
BadFree,
|
||||
AllocDeallocMismatch,
|
||||
ParamOverlap,
|
||||
NegativeSizeParam,
|
||||
InvalidPointerPair,
|
||||
}
|
||||
|
||||
// Types defined in vcasan.h
|
||||
STRUCT! {
|
||||
#[allow(non_snake_case)]
|
||||
struct EXCEPTION_ASAN_ERROR {
|
||||
// The description string from asan, such as heap-use-after-free
|
||||
uiRuntimeDescriptionLength: UINT64,
|
||||
pwRuntimeDescription: LPWSTR,
|
||||
|
||||
// A translation of the description string to something more user friendly done by this lib
|
||||
// not localized
|
||||
uiRuntimeShortMessageLength: UINT64,
|
||||
pwRuntimeShortMessage: LPWSTR,
|
||||
|
||||
// the full report from asan, not localized
|
||||
uiRuntimeFullMessageLength: UINT64,
|
||||
pwRuntimeFullMessage: LPWSTR, /* pointer to Unicode message (or NULL) */
|
||||
|
||||
// azure payload, WIP
|
||||
uiCustomDataLength: UINT64,
|
||||
pwCustomData: LPWSTR,
|
||||
}}
|
||||
|
||||
UNION! {
|
||||
union EXCEPTION_SANITIZER_ERROR_u {
|
||||
[u64; 8],
|
||||
asan asan_mut: EXCEPTION_ASAN_ERROR,
|
||||
}}
|
||||
|
||||
STRUCT! {
|
||||
#[allow(non_snake_case)]
|
||||
struct EXCEPTION_SANITIZER_ERROR {
|
||||
// the size of this structure, set by the caller
|
||||
cbSize: DWORD,
|
||||
// the specific type of sanitizer error this is. Set by the caller, determines which member of the union is valid
|
||||
dwSanitizerKind: DWORD,
|
||||
u: EXCEPTION_SANITIZER_ERROR_u,
|
||||
}}
|
||||
|
||||
// #define EH_SANITIZER ('san' | 0xE0000000)
|
||||
// #define EH_SANITIZER_ASAN (EH_SANITIZER + 1)
|
||||
pub const EH_SANITIZER: u32 =
|
||||
0xe0000000 | ((b's' as u32) << 16) | ((b'a' as u32) << 8) | b'n' as u32; // 0xe073616e;
|
||||
pub const EH_SANITIZER_ASAN: u32 = EH_SANITIZER + 1;
|
||||
|
||||
fn get_exception_sanitizer_error(
|
||||
process_handle: HANDLE,
|
||||
remote_asan_error: LPCVOID,
|
||||
) -> Result<EXCEPTION_SANITIZER_ERROR> {
|
||||
let record =
|
||||
process::read_memory::<EXCEPTION_SANITIZER_ERROR>(process_handle, remote_asan_error)?;
|
||||
if record.dwSanitizerKind != EH_SANITIZER_ASAN {
|
||||
anyhow::bail!("Unrecognized sanitizer kind");
|
||||
}
|
||||
if (record.cbSize as usize) < size_of::<EXCEPTION_SANITIZER_ERROR>() {
|
||||
anyhow::bail!("Unrecognized sanitizer record size");
|
||||
}
|
||||
Ok(record)
|
||||
}
|
||||
|
||||
fn get_runtime_description(process_handle: HANDLE, remote_asan_error: LPCVOID) -> Result<String> {
|
||||
let record = get_exception_sanitizer_error(process_handle, remote_asan_error)?;
|
||||
let asan_error = unsafe { record.u.asan() };
|
||||
let size = asan_error.uiRuntimeDescriptionLength as usize;
|
||||
let remote_message_address = asan_error.pwRuntimeDescription as LPCVOID;
|
||||
let message = process::read_wide_string(process_handle, remote_message_address, size)?;
|
||||
Ok(message.to_string_lossy().to_string())
|
||||
}
|
||||
|
||||
fn get_full_message(process_handle: HANDLE, remote_asan_error: LPCVOID) -> Result<String> {
|
||||
let record = get_exception_sanitizer_error(process_handle, remote_asan_error)?;
|
||||
let asan_error = unsafe { record.u.asan() };
|
||||
let size = asan_error.uiRuntimeFullMessageLength as usize;
|
||||
let remote_message_address = asan_error.pwRuntimeFullMessage as LPCVOID;
|
||||
if size == 0 || remote_message_address.is_null() {
|
||||
bail!("Empty full message");
|
||||
}
|
||||
|
||||
let message = process::read_wide_string(process_handle, remote_message_address, size)?;
|
||||
Ok(message.to_string_lossy().to_string())
|
||||
}
|
||||
|
||||
fn get_asan_error_from_runtime_description(message: &str) -> AsanError {
|
||||
match message {
|
||||
"heap-use-after-free" => AsanError::HeapUseAfterFree,
|
||||
"heap-buffer-overflow" => AsanError::HeapBufferOverflow,
|
||||
"stack-buffer-underflow" => AsanError::StackBufferUnderflow,
|
||||
"initialization-order-fiasco" => AsanError::InitializationOrderFiasco,
|
||||
"stack-buffer-overflow" => AsanError::StackBufferOverflow,
|
||||
"stack-use-after-return" => AsanError::StackUseAfterReturn,
|
||||
"use-after-poison" => AsanError::UseAfterPoison,
|
||||
"container-overflow" => AsanError::ContainerOverflow,
|
||||
"stack-use-after-scope" => AsanError::StackUseAfterScope,
|
||||
"global-buffer-overflow" => AsanError::GlobalBufferOverflow,
|
||||
"unknown-crash" => AsanError::UnknownCrash,
|
||||
"stack-overflow" => AsanError::StackOverflow,
|
||||
"null-deref" => AsanError::NullDeref,
|
||||
"wild-jump" => AsanError::WildJump,
|
||||
"wild-addr-write" => AsanError::WildAddrWrite,
|
||||
"wild-addr-read" => AsanError::WildAddrRead,
|
||||
"wild-addr" => AsanError::WildAddr,
|
||||
"signal" => AsanError::Signal,
|
||||
"double-free" => AsanError::DoubleFree,
|
||||
"new-delete-type-mismatch" => AsanError::NewDeleteTypeMismatch,
|
||||
"bad-free" => AsanError::BadFree,
|
||||
"alloc-dealloc-mismatch" => AsanError::AllocDeallocMismatch,
|
||||
"param-overlap" => AsanError::ParamOverlap,
|
||||
"negative-size-param" => AsanError::NegativeSizeParam,
|
||||
"invalid-pointer-pair" => AsanError::InvalidPointerPair,
|
||||
_ => AsanError::UnknownAsanError,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn asan_error_from_exception_record(
|
||||
process_handle: HANDLE,
|
||||
exception_record: &EXCEPTION_RECORD,
|
||||
) -> AsanError {
|
||||
if exception_record.NumberParameters >= 1 {
|
||||
let message = get_runtime_description(
|
||||
process_handle,
|
||||
exception_record.ExceptionInformation[0] as LPCVOID,
|
||||
)
|
||||
.ok();
|
||||
|
||||
if let Some(message) = message {
|
||||
return get_asan_error_from_runtime_description(&message);
|
||||
}
|
||||
}
|
||||
|
||||
AsanError::UnknownAsanError
|
||||
}
|
||||
|
||||
/// Return the full asan report from the exception record.
|
||||
pub fn get_asan_report(
|
||||
process_handle: HANDLE,
|
||||
exception_record: &EXCEPTION_RECORD,
|
||||
) -> Option<String> {
|
||||
if exception_record.NumberParameters >= 1 {
|
||||
let message = get_full_message(
|
||||
process_handle,
|
||||
exception_record.ExceptionInformation[0] as LPCVOID,
|
||||
)
|
||||
.ok();
|
||||
|
||||
if let Some(message) = message {
|
||||
return Some(message);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
144
src/agent/input-tester/src/test_result/fast_fail.rs
Normal file
144
src/agent/input-tester/src/test_result/fast_fail.rs
Normal file
@ -0,0 +1,144 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::unreadable_literal)]
|
||||
|
||||
use winapi::um::winnt::{
|
||||
EXCEPTION_RECORD, FAST_FAIL_APCS_DISABLED, FAST_FAIL_CERTIFICATION_FAILURE,
|
||||
FAST_FAIL_CORRUPT_LIST_ENTRY, FAST_FAIL_CRYPTO_LIBRARY, FAST_FAIL_DEPRECATED_SERVICE_INVOKED,
|
||||
FAST_FAIL_DLOAD_PROTECTION_FAILURE, FAST_FAIL_FATAL_APP_EXIT, FAST_FAIL_GS_COOKIE_INIT,
|
||||
FAST_FAIL_GUARD_EXPORT_SUPPRESSION_FAILURE, FAST_FAIL_GUARD_ICALL_CHECK_FAILURE,
|
||||
FAST_FAIL_GUARD_ICALL_CHECK_SUPPRESSED, FAST_FAIL_GUARD_JUMPTABLE, FAST_FAIL_GUARD_SS_FAILURE,
|
||||
FAST_FAIL_GUARD_WRITE_CHECK_FAILURE, FAST_FAIL_INCORRECT_STACK, FAST_FAIL_INVALID_ARG,
|
||||
FAST_FAIL_INVALID_BALANCED_TREE, FAST_FAIL_INVALID_BUFFER_ACCESS,
|
||||
FAST_FAIL_INVALID_CALL_IN_DLL_CALLOUT, FAST_FAIL_INVALID_CONTROL_STACK,
|
||||
FAST_FAIL_INVALID_DISPATCH_CONTEXT, FAST_FAIL_INVALID_EXCEPTION_CHAIN,
|
||||
FAST_FAIL_INVALID_FIBER_SWITCH, FAST_FAIL_INVALID_FILE_OPERATION, FAST_FAIL_INVALID_IDLE_STATE,
|
||||
FAST_FAIL_INVALID_IMAGE_BASE, FAST_FAIL_INVALID_JUMP_BUFFER, FAST_FAIL_INVALID_LOCK_STATE,
|
||||
FAST_FAIL_INVALID_LONGJUMP_TARGET, FAST_FAIL_INVALID_NEXT_THREAD,
|
||||
FAST_FAIL_INVALID_REFERENCE_COUNT, FAST_FAIL_INVALID_SET_OF_CONTEXT,
|
||||
FAST_FAIL_INVALID_SYSCALL_NUMBER, FAST_FAIL_INVALID_THREAD, FAST_FAIL_LEGACY_GS_VIOLATION,
|
||||
FAST_FAIL_LOADER_CONTINUITY_FAILURE, FAST_FAIL_LPAC_ACCESS_DENIED, FAST_FAIL_MRDATA_MODIFIED,
|
||||
FAST_FAIL_MRDATA_PROTECTION_FAILURE, FAST_FAIL_RANGE_CHECK_FAILURE,
|
||||
FAST_FAIL_SET_CONTEXT_DENIED, FAST_FAIL_STACK_COOKIE_CHECK_FAILURE,
|
||||
FAST_FAIL_UNEXPECTED_HEAP_EXCEPTION, FAST_FAIL_UNSAFE_EXTENSION_CALL,
|
||||
FAST_FAIL_UNSAFE_REGISTRY_ACCESS, FAST_FAIL_VTGUARD_CHECK_FAILURE,
|
||||
};
|
||||
|
||||
/// The C compiler intrinsic __fastfail was called with one of these values - we use UnknownFastFailCode for values
|
||||
/// we have not seen before.
|
||||
///
|
||||
/// See https://docs.microsoft.com/en-us/cpp/intrinsics/fastfail?view=vs-2017 for __fastfail details.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum FastFail {
|
||||
UnknownFastFailCode,
|
||||
LegacyGsViolation,
|
||||
VtguardCheckFailure,
|
||||
StackCookieCheckFailure,
|
||||
CorruptListEntry,
|
||||
IncorrectStack,
|
||||
InvalidArg,
|
||||
GsCookieInit,
|
||||
FatalAppExit,
|
||||
RangeCheckFailure,
|
||||
UnsafeRegistryAccess,
|
||||
GuardIcallCheckFailure,
|
||||
GuardWriteCheckFailure,
|
||||
InvalidFiberSwitch,
|
||||
InvalidSetOfContext,
|
||||
InvalidReferenceCount,
|
||||
InvalidJumpBuffer,
|
||||
MrdataModified,
|
||||
CertificationFailure,
|
||||
InvalidExceptionChain,
|
||||
CryptoLibrary,
|
||||
InvalidCallInDllCallout,
|
||||
InvalidImageBase,
|
||||
DloadProtectionFailure,
|
||||
UnsafeExtensionCall,
|
||||
DeprecatedServiceInvoked,
|
||||
InvalidBufferAccess,
|
||||
InvalidBalancedTree,
|
||||
InvalidNextThread,
|
||||
GuardIcallCheckSuppressed,
|
||||
ApcsDisabled,
|
||||
InvalidIdleState,
|
||||
MrdataProtectionFailure,
|
||||
UnexpectedHeapException,
|
||||
InvalidLockState,
|
||||
GuardJumptable,
|
||||
InvalidLongjumpTarget,
|
||||
InvalidDispatchContext,
|
||||
InvalidThread,
|
||||
InvalidSyscallNumber,
|
||||
InvalidFileOperation,
|
||||
LpacAccessDenied,
|
||||
GuardSsFailure,
|
||||
LoaderContinuityFailure,
|
||||
GuardExportSuppressionFailure,
|
||||
InvalidControlStack,
|
||||
SetContextDenied,
|
||||
}
|
||||
|
||||
// See https://docs.microsoft.com/en-us/cpp/intrinsics/fastfail?view=vs-2017 for __fastfail details.
|
||||
pub const EXCEPTION_FAIL_FAST: u32 = 0xC0000409;
|
||||
|
||||
fn fast_fail_from_u32(code: u32) -> FastFail {
|
||||
match code {
|
||||
FAST_FAIL_LEGACY_GS_VIOLATION => FastFail::LegacyGsViolation,
|
||||
FAST_FAIL_VTGUARD_CHECK_FAILURE => FastFail::VtguardCheckFailure,
|
||||
FAST_FAIL_STACK_COOKIE_CHECK_FAILURE => FastFail::StackCookieCheckFailure,
|
||||
FAST_FAIL_CORRUPT_LIST_ENTRY => FastFail::CorruptListEntry,
|
||||
FAST_FAIL_INCORRECT_STACK => FastFail::IncorrectStack,
|
||||
FAST_FAIL_INVALID_ARG => FastFail::InvalidArg,
|
||||
FAST_FAIL_GS_COOKIE_INIT => FastFail::GsCookieInit,
|
||||
FAST_FAIL_FATAL_APP_EXIT => FastFail::FatalAppExit,
|
||||
FAST_FAIL_RANGE_CHECK_FAILURE => FastFail::RangeCheckFailure,
|
||||
FAST_FAIL_UNSAFE_REGISTRY_ACCESS => FastFail::UnsafeRegistryAccess,
|
||||
FAST_FAIL_GUARD_ICALL_CHECK_FAILURE => FastFail::GuardIcallCheckFailure,
|
||||
FAST_FAIL_GUARD_WRITE_CHECK_FAILURE => FastFail::GuardWriteCheckFailure,
|
||||
FAST_FAIL_INVALID_FIBER_SWITCH => FastFail::InvalidFiberSwitch,
|
||||
FAST_FAIL_INVALID_SET_OF_CONTEXT => FastFail::InvalidSetOfContext,
|
||||
FAST_FAIL_INVALID_REFERENCE_COUNT => FastFail::InvalidReferenceCount,
|
||||
FAST_FAIL_INVALID_JUMP_BUFFER => FastFail::InvalidJumpBuffer,
|
||||
FAST_FAIL_MRDATA_MODIFIED => FastFail::MrdataModified,
|
||||
FAST_FAIL_CERTIFICATION_FAILURE => FastFail::CertificationFailure,
|
||||
FAST_FAIL_INVALID_EXCEPTION_CHAIN => FastFail::InvalidExceptionChain,
|
||||
FAST_FAIL_CRYPTO_LIBRARY => FastFail::CryptoLibrary,
|
||||
FAST_FAIL_INVALID_CALL_IN_DLL_CALLOUT => FastFail::InvalidCallInDllCallout,
|
||||
FAST_FAIL_INVALID_IMAGE_BASE => FastFail::InvalidImageBase,
|
||||
FAST_FAIL_DLOAD_PROTECTION_FAILURE => FastFail::DloadProtectionFailure,
|
||||
FAST_FAIL_UNSAFE_EXTENSION_CALL => FastFail::UnsafeExtensionCall,
|
||||
FAST_FAIL_DEPRECATED_SERVICE_INVOKED => FastFail::DeprecatedServiceInvoked,
|
||||
FAST_FAIL_INVALID_BUFFER_ACCESS => FastFail::InvalidBufferAccess,
|
||||
FAST_FAIL_INVALID_BALANCED_TREE => FastFail::InvalidBalancedTree,
|
||||
FAST_FAIL_INVALID_NEXT_THREAD => FastFail::InvalidNextThread,
|
||||
FAST_FAIL_GUARD_ICALL_CHECK_SUPPRESSED => FastFail::GuardIcallCheckSuppressed,
|
||||
FAST_FAIL_APCS_DISABLED => FastFail::ApcsDisabled,
|
||||
FAST_FAIL_INVALID_IDLE_STATE => FastFail::InvalidIdleState,
|
||||
FAST_FAIL_MRDATA_PROTECTION_FAILURE => FastFail::MrdataProtectionFailure,
|
||||
FAST_FAIL_UNEXPECTED_HEAP_EXCEPTION => FastFail::UnexpectedHeapException,
|
||||
FAST_FAIL_INVALID_LOCK_STATE => FastFail::InvalidLockState,
|
||||
FAST_FAIL_GUARD_JUMPTABLE => FastFail::GuardJumptable,
|
||||
FAST_FAIL_INVALID_LONGJUMP_TARGET => FastFail::InvalidLongjumpTarget,
|
||||
FAST_FAIL_INVALID_DISPATCH_CONTEXT => FastFail::InvalidDispatchContext,
|
||||
FAST_FAIL_INVALID_THREAD => FastFail::InvalidThread,
|
||||
FAST_FAIL_INVALID_SYSCALL_NUMBER => FastFail::InvalidSyscallNumber,
|
||||
FAST_FAIL_INVALID_FILE_OPERATION => FastFail::InvalidFileOperation,
|
||||
FAST_FAIL_LPAC_ACCESS_DENIED => FastFail::LpacAccessDenied,
|
||||
FAST_FAIL_GUARD_SS_FAILURE => FastFail::GuardSsFailure,
|
||||
FAST_FAIL_LOADER_CONTINUITY_FAILURE => FastFail::LoaderContinuityFailure,
|
||||
FAST_FAIL_GUARD_EXPORT_SUPPRESSION_FAILURE => FastFail::GuardExportSuppressionFailure,
|
||||
FAST_FAIL_INVALID_CONTROL_STACK => FastFail::InvalidControlStack,
|
||||
FAST_FAIL_SET_CONTEXT_DENIED => FastFail::SetContextDenied,
|
||||
_ => FastFail::UnknownFastFailCode,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_exception_record(exception_record: &EXCEPTION_RECORD) -> FastFail {
|
||||
if exception_record.NumberParameters == 1 {
|
||||
fast_fail_from_u32(exception_record.ExceptionInformation[0] as u32)
|
||||
} else {
|
||||
FastFail::UnknownFastFailCode
|
||||
}
|
||||
}
|
448
src/agent/input-tester/src/test_result/mod.rs
Normal file
448
src/agent/input-tester/src/test_result/mod.rs
Normal file
@ -0,0 +1,448 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::unreadable_literal)]
|
||||
|
||||
pub mod asan;
|
||||
pub mod fast_fail;
|
||||
pub mod vcpp_debugger;
|
||||
pub mod verifier_stop;
|
||||
|
||||
use std::{fmt, path::Path};
|
||||
|
||||
use debugger::stack;
|
||||
use log::error;
|
||||
use win_util::process;
|
||||
use winapi::um::{
|
||||
minwinbase::{
|
||||
EXCEPTION_ACCESS_VIOLATION, EXCEPTION_ARRAY_BOUNDS_EXCEEDED, EXCEPTION_BREAKPOINT,
|
||||
EXCEPTION_DATATYPE_MISALIGNMENT, EXCEPTION_DEBUG_INFO, EXCEPTION_FLT_DENORMAL_OPERAND,
|
||||
EXCEPTION_FLT_DIVIDE_BY_ZERO, EXCEPTION_FLT_INEXACT_RESULT,
|
||||
EXCEPTION_FLT_INVALID_OPERATION, EXCEPTION_FLT_OVERFLOW, EXCEPTION_FLT_STACK_CHECK,
|
||||
EXCEPTION_FLT_UNDERFLOW, EXCEPTION_ILLEGAL_INSTRUCTION, EXCEPTION_INT_DIVIDE_BY_ZERO,
|
||||
EXCEPTION_INT_OVERFLOW, EXCEPTION_INVALID_DISPOSITION, EXCEPTION_IN_PAGE_ERROR,
|
||||
EXCEPTION_NONCONTINUABLE_EXCEPTION, EXCEPTION_PRIV_INSTRUCTION, EXCEPTION_SINGLE_STEP,
|
||||
EXCEPTION_STACK_OVERFLOW,
|
||||
},
|
||||
winnt::{EXCEPTION_RECORD, HANDLE},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
crash_detector::DebuggerResult,
|
||||
test_result::{
|
||||
asan::{asan_error_from_exception_record, AsanError, EH_SANITIZER},
|
||||
fast_fail::{FastFail, EXCEPTION_FAIL_FAST},
|
||||
vcpp_debugger::{VcppDebuggerExceptionInfo, VcppRtcError},
|
||||
verifier_stop::{VerifierStop, STATUS_VERIFIER_STOP},
|
||||
},
|
||||
};
|
||||
|
||||
// See https://github.com/dotnet/coreclr/blob/030a3ea9b8dbeae89c90d34441d4d9a1cf4a7de6/src/inc/corexcep.h#L21
|
||||
const EXCEPTION_CLR: u32 = 0xE0434352;
|
||||
|
||||
// From vc crt source file ehdata_values.h
|
||||
// #define EH_EXCEPTION_NUMBER ('msc' | 0xE0000000) // The NT Exception # that we use
|
||||
// Also defined here: https://github.com/dotnet/coreclr/blob/030a3ea9b8dbeae89c90d34441d4d9a1cf4a7de6/src/inc/corexcep.h#L19
|
||||
const EXCEPTION_CPP: u32 = 0xE06D7363;
|
||||
|
||||
// When debugging a WoW64 process, we see STATUS_WX86_BREAKPOINT in addition to EXCEPTION_BREAKPOINT
|
||||
const STATUS_WX86_BREAKPOINT: u32 = ::winapi::shared::ntstatus::STATUS_WX86_BREAKPOINT as u32;
|
||||
|
||||
fn get_av_description(exception_record: &EXCEPTION_RECORD) -> ExceptionCode {
|
||||
if exception_record.NumberParameters >= 2 {
|
||||
let write = exception_record.ExceptionInformation[0] != 0;
|
||||
let null = exception_record.ExceptionInformation[1] == 0;
|
||||
match (write, null) {
|
||||
(true, true) => ExceptionCode::WriteToNull,
|
||||
(true, false) => ExceptionCode::WriteAccessViolation,
|
||||
(false, true) => ExceptionCode::ReadFromNull,
|
||||
(false, false) => ExceptionCode::ReadAccessViolation,
|
||||
}
|
||||
} else {
|
||||
ExceptionCode::UnknownAccessViolation
|
||||
}
|
||||
}
|
||||
|
||||
fn generic_exception(exception_record: &EXCEPTION_RECORD) -> Option<ExceptionCode> {
|
||||
match exception_record.ExceptionCode {
|
||||
EXCEPTION_ACCESS_VIOLATION => Some(get_av_description(exception_record)),
|
||||
EXCEPTION_ARRAY_BOUNDS_EXCEEDED => Some(ExceptionCode::ArrayBoundsExceeded),
|
||||
// EXCEPTION_BREAKPOINT - when the debugger bitness matches the debuggee
|
||||
// STATUS_WX86_BREAKPOINT - when the debugger is 64 bit and the debuggee is Wow64.
|
||||
// In other words, the exception code is a debugger implementation detail, the end
|
||||
// user only really cares that it was a breakpoint.
|
||||
EXCEPTION_BREAKPOINT | STATUS_WX86_BREAKPOINT => Some(ExceptionCode::Breakpoint),
|
||||
EXCEPTION_DATATYPE_MISALIGNMENT => Some(ExceptionCode::MisalignedData),
|
||||
EXCEPTION_FLT_DENORMAL_OPERAND => Some(ExceptionCode::FltDenormalOperand),
|
||||
EXCEPTION_FLT_DIVIDE_BY_ZERO => Some(ExceptionCode::FltDivByZero),
|
||||
EXCEPTION_FLT_INEXACT_RESULT => Some(ExceptionCode::FltInexactResult),
|
||||
EXCEPTION_FLT_INVALID_OPERATION => Some(ExceptionCode::FltInvalidOperation),
|
||||
EXCEPTION_FLT_OVERFLOW => Some(ExceptionCode::FltOverflow),
|
||||
EXCEPTION_FLT_STACK_CHECK => Some(ExceptionCode::FltStackCheck),
|
||||
EXCEPTION_FLT_UNDERFLOW => Some(ExceptionCode::FltUnderflow),
|
||||
EXCEPTION_ILLEGAL_INSTRUCTION => Some(ExceptionCode::IllegalInstruction),
|
||||
EXCEPTION_INT_DIVIDE_BY_ZERO => Some(ExceptionCode::IntDivByZero),
|
||||
EXCEPTION_INT_OVERFLOW => Some(ExceptionCode::IntOverflow),
|
||||
EXCEPTION_INVALID_DISPOSITION => Some(ExceptionCode::InvalidDisposition),
|
||||
EXCEPTION_IN_PAGE_ERROR => Some(ExceptionCode::InPageError),
|
||||
EXCEPTION_NONCONTINUABLE_EXCEPTION => Some(ExceptionCode::NonContinuableException),
|
||||
EXCEPTION_PRIV_INSTRUCTION => Some(ExceptionCode::PrivilegedInstruction),
|
||||
EXCEPTION_SINGLE_STEP => Some(ExceptionCode::SingleStep),
|
||||
EXCEPTION_STACK_OVERFLOW => Some(ExceptionCode::StackOverflow),
|
||||
EXCEPTION_CLR => Some(ExceptionCode::ClrException),
|
||||
EXCEPTION_CPP => Some(ExceptionCode::CppException),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// A friendly description of the exception based on the exception code and other
|
||||
/// parameters available to the debugger when the exception was raised.
|
||||
pub enum ExceptionDescription {
|
||||
/// A generic exception with no additional details.
|
||||
GenericException(ExceptionCode),
|
||||
|
||||
/// An exception detected by enabling application verifier.
|
||||
VerifierStop(VerifierStop),
|
||||
|
||||
/// An exception raised by calling __fastfail.
|
||||
FastFail(FastFail),
|
||||
|
||||
/// An exception detected by ASAN.
|
||||
Asan(AsanError),
|
||||
|
||||
/// An exception detected by the VC++ RTC checks
|
||||
Rtc(VcppRtcError),
|
||||
}
|
||||
|
||||
impl fmt::Display for ExceptionDescription {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
ExceptionDescription::GenericException(code) => write!(formatter, "{:?}", code),
|
||||
ExceptionDescription::VerifierStop(stop) => write!(formatter, "VerifierStop({})", stop),
|
||||
ExceptionDescription::FastFail(code) => write!(formatter, "FastFail({:?})", code),
|
||||
ExceptionDescription::Asan(code) => write!(formatter, "{:?}", code),
|
||||
ExceptionDescription::Rtc(code) => write!(formatter, "{:?}", code),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_exception_description(
|
||||
process_handle: HANDLE,
|
||||
exception_record: &EXCEPTION_RECORD,
|
||||
) -> ExceptionDescription {
|
||||
if let Some(generic_exception) = generic_exception(exception_record) {
|
||||
ExceptionDescription::GenericException(generic_exception)
|
||||
} else {
|
||||
match exception_record.ExceptionCode {
|
||||
EXCEPTION_FAIL_FAST => {
|
||||
ExceptionDescription::FastFail(fast_fail::from_exception_record(exception_record))
|
||||
}
|
||||
STATUS_VERIFIER_STOP => ExceptionDescription::VerifierStop(verifier_stop::new(
|
||||
process_handle,
|
||||
exception_record,
|
||||
)),
|
||||
EH_SANITIZER => ExceptionDescription::Asan(asan_error_from_exception_record(
|
||||
process_handle,
|
||||
exception_record,
|
||||
)),
|
||||
vcpp_debugger::EXCEPTION_VISUALCPP_DEBUGGER => {
|
||||
if let VcppDebuggerExceptionInfo::RuntimeError(info) =
|
||||
VcppDebuggerExceptionInfo::from_exception_record(
|
||||
exception_record,
|
||||
!process::is_wow64_process(process_handle),
|
||||
)
|
||||
{
|
||||
if let Err(e) = info.notify_target(process_handle) {
|
||||
error!("Error notifying target on vcpp runtime error: {}", e);
|
||||
}
|
||||
ExceptionDescription::Rtc(info.get_rtc_error())
|
||||
} else {
|
||||
ExceptionDescription::GenericException(ExceptionCode::UnknownExceptionCode)
|
||||
}
|
||||
}
|
||||
_ => ExceptionDescription::GenericException(ExceptionCode::UnknownExceptionCode),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_exception(
|
||||
process_handle: HANDLE,
|
||||
exception: &EXCEPTION_DEBUG_INFO,
|
||||
stack: stack::DebugStack,
|
||||
) -> Exception {
|
||||
Exception {
|
||||
exception_code: exception.ExceptionRecord.ExceptionCode,
|
||||
description: new_exception_description(process_handle, &exception.ExceptionRecord),
|
||||
stack_hash: stack.stable_hash(),
|
||||
first_chance: exception.dwFirstChance != 0,
|
||||
stack_frames: stack.frames().iter().map(|f| f.into()).collect(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_test_result(
|
||||
debugger_result: DebuggerResult,
|
||||
input_file: &Path,
|
||||
log_path: &Path,
|
||||
) -> TestResult {
|
||||
TestResult {
|
||||
bugs: debugger_result.exceptions,
|
||||
input_file: input_file.to_string_lossy().to_string(),
|
||||
log_path: format!("{}", log_path.display()),
|
||||
debugger_output: debugger_result.debugger_output,
|
||||
test_stdout: debugger_result.stdout,
|
||||
test_stderr: debugger_result.stderr,
|
||||
exit_status: debugger_result.exit_status,
|
||||
}
|
||||
}
|
||||
|
||||
/// The file and line number for frame in the calls stack.
|
||||
pub struct FileInfo {
|
||||
pub file: String,
|
||||
pub line: u32,
|
||||
}
|
||||
|
||||
/// The location within a function for a call stack entry.
|
||||
pub enum DebugFunctionLocation {
|
||||
/// If symbol information is available, we use the file/line numbers for stability across builds.
|
||||
FileInfo(FileInfo),
|
||||
/// If no symbol information is available, the offset within the function is used.
|
||||
Displacement(u64),
|
||||
}
|
||||
|
||||
impl fmt::Display for DebugFunctionLocation {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
DebugFunctionLocation::FileInfo(file_info) => {
|
||||
write!(formatter, "{}:{}", file_info.file, file_info.line)
|
||||
}
|
||||
DebugFunctionLocation::Displacement(disp) => write!(formatter, "0x{:x}", disp),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a stack::DebugFunctionLocation> for DebugFunctionLocation {
|
||||
fn from(location: &'a stack::DebugFunctionLocation) -> Self {
|
||||
match location {
|
||||
stack::DebugFunctionLocation::Line { file, line } => {
|
||||
DebugFunctionLocation::FileInfo(FileInfo {
|
||||
file: file.to_string(),
|
||||
line: *line,
|
||||
})
|
||||
}
|
||||
|
||||
stack::DebugFunctionLocation::Offset { disp } => {
|
||||
DebugFunctionLocation::Displacement(*disp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A stack frame for reporting where an exception or other bug occurs.
|
||||
pub enum DebugStackFrame {
|
||||
Frame {
|
||||
/// The name of the function (if available via symbols or exports) or possibly something else like a
|
||||
/// (possibly synthetic) module name.
|
||||
function: String,
|
||||
|
||||
/// Location details such as file/line (if symbols are available) or offset
|
||||
location: DebugFunctionLocation,
|
||||
},
|
||||
|
||||
CorruptFrame,
|
||||
}
|
||||
|
||||
impl fmt::Display for DebugStackFrame {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
DebugStackFrame::Frame { function, location } => {
|
||||
formatter.write_str(function)?;
|
||||
match location {
|
||||
DebugFunctionLocation::FileInfo(file_info) => {
|
||||
write!(formatter, " {}:{}", file_info.file, file_info.line)
|
||||
}
|
||||
DebugFunctionLocation::Displacement(disp) => write!(formatter, "+0x{:x}", disp),
|
||||
}
|
||||
}
|
||||
DebugStackFrame::CorruptFrame => formatter.write_str("<corrupt frame(s)>"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a stack::DebugStackFrame> for DebugStackFrame {
|
||||
fn from(frame: &'a stack::DebugStackFrame) -> Self {
|
||||
match frame {
|
||||
stack::DebugStackFrame::Frame { function, location } => DebugStackFrame::Frame {
|
||||
function: function.to_string(),
|
||||
location: location.into(),
|
||||
},
|
||||
stack::DebugStackFrame::CorruptFrame => DebugStackFrame::CorruptFrame,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The details of an exception observed by the execution engine.
|
||||
pub struct Exception {
|
||||
/// The win32 exception code.
|
||||
pub exception_code: u32,
|
||||
|
||||
/// A friendly description of the exception based on the exception code and other
|
||||
/// parameters available to the debugger when the exception was raised.
|
||||
pub description: ExceptionDescription,
|
||||
|
||||
/// A hash of the call stack when the exception was raised.
|
||||
pub stack_hash: u64,
|
||||
|
||||
/// True if the exception if "first chance". Applications can handle first chance exceptions,
|
||||
/// so it is possible to see more than one. When `first_chance` is false, the exception caused
|
||||
/// the program to crash.
|
||||
pub first_chance: bool,
|
||||
|
||||
/// The call stack when the exception was raised.
|
||||
pub stack_frames: Vec<DebugStackFrame>,
|
||||
}
|
||||
|
||||
impl fmt::Display for Exception {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(formatter, "Exception: 0x{:8x}", self.exception_code)?;
|
||||
writeln!(formatter, " Description: {}", self.description)?;
|
||||
writeln!(formatter, " FirstChance: {}", self.first_chance)?;
|
||||
writeln!(formatter, " StackHash: {}", self.stack_hash)?;
|
||||
writeln!(formatter, " Stack:")?;
|
||||
for frame in &self.stack_frames {
|
||||
writeln!(formatter, " {}", frame)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// How did the program exit - normally (so we have a proper exit code) or was it terminated?
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ExitStatus {
|
||||
/// The exit code returned from the process.
|
||||
Code(i32),
|
||||
|
||||
/// Unix only - terminated by signal.
|
||||
Signal(i32),
|
||||
|
||||
/// The application took longer than the maximum allowed and was terminated, timeout is in seconds.
|
||||
Timeout(u64),
|
||||
}
|
||||
|
||||
impl ExitStatus {
|
||||
pub fn from_code(code: i32) -> Self {
|
||||
ExitStatus::Code(code)
|
||||
}
|
||||
|
||||
pub fn from_timeout(timeout_s: u64) -> Self {
|
||||
ExitStatus::Timeout(timeout_s)
|
||||
}
|
||||
|
||||
pub fn is_normal_exit(&self) -> bool {
|
||||
match self {
|
||||
ExitStatus::Code(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_timeout(&self) -> bool {
|
||||
match self {
|
||||
ExitStatus::Timeout(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ExitStatus {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
ExitStatus::Code(c) => write!(formatter, "Exit code: {}", c),
|
||||
ExitStatus::Signal(c) => write!(formatter, "Signal: {}", c),
|
||||
ExitStatus::Timeout(sec) => write!(formatter, "Timeout: {}s", sec),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A fuzzer or execution engine sends this message to a back end to report the bugs found for a single input.
|
||||
pub struct TestResult {
|
||||
/// The input filename that results in the bugs_found.
|
||||
pub input_file: String,
|
||||
|
||||
/// The standard output from running the program (possibly merged with stderr which would then be empty).
|
||||
pub test_stdout: String,
|
||||
|
||||
/// The standard output from running the program (possibly merged with stderr which would then be empty).
|
||||
pub test_stderr: String,
|
||||
|
||||
/// The output from the debugger attached to the program.
|
||||
pub debugger_output: String,
|
||||
|
||||
/// The bugs found when testing input_file.
|
||||
pub bugs: Vec<Exception>,
|
||||
|
||||
/// A log file or directory that should be shared with the customer along with the input_file and bug details.
|
||||
pub log_path: String,
|
||||
|
||||
/// How did the program exit - normally (so we have a proper exit code) or was it terminated?
|
||||
pub exit_status: ExitStatus,
|
||||
}
|
||||
|
||||
impl TestResult {
|
||||
pub fn any_crashes(&self) -> bool {
|
||||
!self.bugs.is_empty()
|
||||
}
|
||||
|
||||
pub fn timed_out(&self) -> bool {
|
||||
self.exit_status.is_timeout()
|
||||
}
|
||||
|
||||
pub fn any_crashes_or_timed_out(&self) -> bool {
|
||||
self.any_crashes() || self.timed_out()
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a non-exhaustive list of exceptions that might be raised in a program.
|
||||
#[derive(Debug)]
|
||||
pub enum ExceptionCode {
|
||||
UnknownExceptionCode,
|
||||
UnknownApplicationVerifierStop,
|
||||
UnknownFastFail,
|
||||
/// A read reference to an invalid address (null)
|
||||
ReadFromNull,
|
||||
/// A write reference to an invalid address (null)
|
||||
WriteToNull,
|
||||
/// A read reference to an invalid address (non-null)
|
||||
ReadAccessViolation,
|
||||
/// A write reference to an invalid address (non-null)
|
||||
WriteAccessViolation,
|
||||
/// A read or write reference to an invalid address where we don't know the address.
|
||||
UnknownAccessViolation,
|
||||
ArrayBoundsExceeded,
|
||||
MisalignedData,
|
||||
Breakpoint,
|
||||
SingleStep,
|
||||
BoundsExceeded,
|
||||
FltDenormalOperand,
|
||||
FltDivByZero,
|
||||
FltInexactResult,
|
||||
FltInvalidOperation,
|
||||
FltOverflow,
|
||||
FltStackCheck,
|
||||
FltUnderflow,
|
||||
IntDivByZero,
|
||||
IntOverflow,
|
||||
PrivilegedInstruction,
|
||||
InPageError,
|
||||
IllegalInstruction,
|
||||
NonContinuableException,
|
||||
StackOverflow,
|
||||
InvalidDisposition,
|
||||
GuardPage,
|
||||
InvalidHandleException,
|
||||
PossibleDeadlock,
|
||||
/// An exception raised from .Net code
|
||||
ClrException,
|
||||
/// An exception raised from the C++ throw statement.
|
||||
CppException,
|
||||
/// An error detected by ASAN.
|
||||
Asan,
|
||||
}
|
209
src/agent/input-tester/src/test_result/vcpp_debugger.rs
Normal file
209
src/agent/input-tester/src/test_result/vcpp_debugger.rs
Normal file
@ -0,0 +1,209 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::unreadable_literal)]
|
||||
|
||||
/// This module wraps an exception raised by the VC++ runtime or by user code implementing
|
||||
/// https://docs.microsoft.com/en-us/visualstudio/debugger/how-to-set-a-thread-name-in-native-code?view=vs-2019
|
||||
///
|
||||
/// Some uses of this exception are documented, some are not. The FFI here is derived from
|
||||
/// tagEXCEPTION_VISUALCPP_DEBUG_INFO (defined in multiple places, e.g. vscommon\dbghelper\dbghlper.h)
|
||||
/// but we extract the fields explicitly from the exception record parameters to deal with
|
||||
/// layouts that differ between x86 and x64 (our target process could be either).
|
||||
use anyhow::Result;
|
||||
use win_util::process;
|
||||
use winapi::{
|
||||
shared::minwindef::{BOOL, DWORD, PBYTE},
|
||||
um::winnt::{EXCEPTION_RECORD, HANDLE, LPCSTR, LPCWSTR, PVOID},
|
||||
};
|
||||
|
||||
/// Errors reported via the VC++ /RTC compiler flag, as defined in vctruntime\inc\rtcapi.h
|
||||
#[derive(Debug)]
|
||||
pub enum VcppRtcError {
|
||||
UnknownRtcError,
|
||||
/// _RTC_CORRUPT_STACK (stack memory corrupted)
|
||||
CorruptStack,
|
||||
/// _RTC_CORRUPTED_ALLOCA (stack memory around allloca corrupted)
|
||||
CorruptAlloca,
|
||||
/// _RTC_UNINIT_LOCAL_USE (local variable used before initialized)
|
||||
UseUninitializedVariable,
|
||||
/// _RTC_CHKSTK (ESP not saved properly across a function call (usually calling convention error)
|
||||
StackPointerCorrupted,
|
||||
/// _RTC_CVRT_LOSS_INFO (cast to smaller data type - not always a bug)
|
||||
ShorteningConvertDataLoss,
|
||||
}
|
||||
|
||||
// this is a special exception used to:
|
||||
// * name a thread
|
||||
// * report rtc errors
|
||||
// we should not report any bugs if the exception was raised to name a thread.
|
||||
pub const EXCEPTION_VISUALCPP_DEBUGGER: DWORD = 0x406d1388;
|
||||
|
||||
// The exception is a simple notification to the debugger which can be ignored.
|
||||
const EXCEPTION_DEBUGGER_NAME_THREAD: DWORD = 0x1000;
|
||||
// The exception is asking the debugger if it is aware of RTC errors. If the debugger
|
||||
// is aware, it will modify the memory of the target which will then raise a RUNTIMECHECK
|
||||
// exception. We should do this eventually (PBI #6530).
|
||||
const EXCEPTION_DEBUGGER_PROBE: DWORD = 0x1001;
|
||||
// This exception is raised only after the PROBE exception and if the debugger set the
|
||||
// target memory to raise this exception.
|
||||
const EXCEPTION_DEBUGGER_RUNTIMECHECK: DWORD = 0x1002;
|
||||
// Unsure if this is used at all, but there is info to extract
|
||||
const EXCEPTION_DEBUGGER_FIBER: DWORD = 0x1003;
|
||||
// Defined in the vc headers, but no info to extract and no uses.
|
||||
//const EXCEPTION_DEBUGGER_HANDLECHECK: DWORD = 0x1004;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub struct ThreadNameInfo {
|
||||
/// pointer to name (in user addr space)
|
||||
pub szName: LPCSTR,
|
||||
/// thread id (-1=caller thread)
|
||||
pub dwThreadId: DWORD,
|
||||
/// reserved for future use (eg user thread, system thread)
|
||||
pub dwFlags: DWORD,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub struct DebuggerProbeInfo {
|
||||
/// 0 = do you understand this private exception, else max value of enum
|
||||
pub dwLevelRequired: DWORD,
|
||||
/// debugger puts a non-zero value in this address to tell runtime debugger is aware of RTC
|
||||
pub pbDebuggerPresent: PBYTE,
|
||||
}
|
||||
|
||||
impl DebuggerProbeInfo {
|
||||
pub fn notify_target(&self, process_handle: HANDLE) -> Result<()> {
|
||||
// This will tell the VC++ runtime to raise another exception to report the error.
|
||||
process::write_memory(process_handle, self.pbDebuggerPresent as PVOID, &1u8)
|
||||
}
|
||||
}
|
||||
|
||||
// Based on _RTC_ErrorNumber, used in the dwRuntimeNumber field
|
||||
const RTC_CHKSTK: DWORD = 0;
|
||||
const RTC_CVRT_LOSS_INFO: DWORD = 1;
|
||||
const RTC_CORRUPT_STACK: DWORD = 2;
|
||||
const RTC_UNINIT_LOCAL_USE: DWORD = 3;
|
||||
const RTC_CORRUPTED_ALLOCA: DWORD = 4;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub struct RuntimeErrorInfo {
|
||||
/// the type of the runtime check
|
||||
pub dwRuntimeNumber: DWORD,
|
||||
/// true if never a false-positive
|
||||
pub bRealBug: BOOL,
|
||||
/// caller puts a return address in here
|
||||
pub pvReturnAddress: PVOID,
|
||||
/// debugger puts a non-zero value in this address if handled it
|
||||
pub pbDebuggerPresent: PBYTE,
|
||||
/// pointer to unicode message (or null)
|
||||
pub pwRuntimeMessage: LPCWSTR,
|
||||
}
|
||||
|
||||
impl RuntimeErrorInfo {
|
||||
pub fn notify_target(&self, process_handle: HANDLE) -> Result<()> {
|
||||
// This will tell the VC++ runtime to **not** use __debugbreak() to report the error.
|
||||
process::write_memory(process_handle, self.pbDebuggerPresent as PVOID, &1u8)
|
||||
}
|
||||
|
||||
pub fn get_rtc_error(&self) -> VcppRtcError {
|
||||
match self.dwRuntimeNumber {
|
||||
RTC_CHKSTK => VcppRtcError::StackPointerCorrupted,
|
||||
RTC_CVRT_LOSS_INFO => VcppRtcError::ShorteningConvertDataLoss,
|
||||
RTC_CORRUPT_STACK => VcppRtcError::CorruptStack,
|
||||
RTC_UNINIT_LOCAL_USE => VcppRtcError::UseUninitializedVariable,
|
||||
RTC_CORRUPTED_ALLOCA => VcppRtcError::CorruptAlloca,
|
||||
_ => VcppRtcError::UnknownRtcError,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub struct FiberInfo {
|
||||
/// 0=ConvertThreadToFiber, 1=CreateFiber, 2=DeleteFiber
|
||||
pub dwType: DWORD,
|
||||
/// pointer to fiber
|
||||
pub pvFiber: PVOID,
|
||||
/// pointer to FIBER_START_ROUTINE (CreateFiber only)
|
||||
pub pvStartRoutine: PVOID,
|
||||
}
|
||||
|
||||
pub enum VcppDebuggerExceptionInfo {
|
||||
ThreadName(ThreadNameInfo),
|
||||
Probe(DebuggerProbeInfo),
|
||||
RuntimeError(RuntimeErrorInfo),
|
||||
Fiber(FiberInfo),
|
||||
UnknownException,
|
||||
}
|
||||
|
||||
impl VcppDebuggerExceptionInfo {
|
||||
pub fn from_exception_record(exception_record: &EXCEPTION_RECORD, target_x64: bool) -> Self {
|
||||
assert_eq!(exception_record.ExceptionCode, EXCEPTION_VISUALCPP_DEBUGGER);
|
||||
|
||||
if exception_record.NumberParameters == 0 {
|
||||
return VcppDebuggerExceptionInfo::UnknownException;
|
||||
}
|
||||
|
||||
match exception_record.ExceptionInformation[0] as DWORD {
|
||||
EXCEPTION_DEBUGGER_NAME_THREAD
|
||||
if target_x64 && exception_record.NumberParameters >= 3 =>
|
||||
{
|
||||
VcppDebuggerExceptionInfo::ThreadName(ThreadNameInfo {
|
||||
szName: exception_record.ExceptionInformation[1] as LPCSTR,
|
||||
dwThreadId: exception_record.ExceptionInformation[2] as DWORD,
|
||||
dwFlags: (exception_record.ExceptionInformation[2] >> 32) as DWORD,
|
||||
})
|
||||
}
|
||||
|
||||
EXCEPTION_DEBUGGER_NAME_THREAD
|
||||
if !target_x64 && exception_record.NumberParameters >= 4 =>
|
||||
{
|
||||
VcppDebuggerExceptionInfo::ThreadName(ThreadNameInfo {
|
||||
szName: exception_record.ExceptionInformation[1] as LPCSTR,
|
||||
dwThreadId: exception_record.ExceptionInformation[2] as DWORD,
|
||||
dwFlags: exception_record.ExceptionInformation[3] as DWORD,
|
||||
})
|
||||
}
|
||||
|
||||
EXCEPTION_DEBUGGER_PROBE if exception_record.NumberParameters >= 3 => {
|
||||
VcppDebuggerExceptionInfo::Probe(DebuggerProbeInfo {
|
||||
dwLevelRequired: exception_record.ExceptionInformation[1] as DWORD,
|
||||
pbDebuggerPresent: exception_record.ExceptionInformation[2] as PBYTE,
|
||||
})
|
||||
}
|
||||
|
||||
EXCEPTION_DEBUGGER_RUNTIMECHECK
|
||||
if target_x64 && exception_record.NumberParameters >= 6 =>
|
||||
{
|
||||
VcppDebuggerExceptionInfo::RuntimeError(RuntimeErrorInfo {
|
||||
dwRuntimeNumber: exception_record.ExceptionInformation[1] as DWORD,
|
||||
bRealBug: exception_record.ExceptionInformation[2] as BOOL,
|
||||
pvReturnAddress: exception_record.ExceptionInformation[3] as PVOID,
|
||||
pbDebuggerPresent: exception_record.ExceptionInformation[4] as PBYTE,
|
||||
pwRuntimeMessage: exception_record.ExceptionInformation[5] as LPCWSTR,
|
||||
})
|
||||
}
|
||||
|
||||
EXCEPTION_DEBUGGER_RUNTIMECHECK
|
||||
if !target_x64 && exception_record.NumberParameters >= 5 =>
|
||||
{
|
||||
VcppDebuggerExceptionInfo::RuntimeError(RuntimeErrorInfo {
|
||||
dwRuntimeNumber: exception_record.ExceptionInformation[1] as DWORD,
|
||||
bRealBug: (exception_record.ExceptionInformation[1] >> 32) as BOOL,
|
||||
pvReturnAddress: exception_record.ExceptionInformation[2] as PVOID,
|
||||
pbDebuggerPresent: exception_record.ExceptionInformation[3] as PBYTE,
|
||||
pwRuntimeMessage: exception_record.ExceptionInformation[4] as LPCWSTR,
|
||||
})
|
||||
}
|
||||
|
||||
EXCEPTION_DEBUGGER_FIBER if exception_record.NumberParameters >= 4 => {
|
||||
VcppDebuggerExceptionInfo::Fiber(FiberInfo {
|
||||
dwType: exception_record.ExceptionInformation[1] as DWORD,
|
||||
pvFiber: exception_record.ExceptionInformation[2] as PVOID,
|
||||
pvStartRoutine: exception_record.ExceptionInformation[3] as PVOID,
|
||||
})
|
||||
}
|
||||
|
||||
_ => VcppDebuggerExceptionInfo::UnknownException,
|
||||
}
|
||||
}
|
||||
}
|
240
src/agent/input-tester/src/test_result/verifier_stop.rs
Normal file
240
src/agent/input-tester/src/test_result/verifier_stop.rs
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use win_util::process;
|
||||
use winapi::{
|
||||
shared::{
|
||||
basetsd::ULONG64,
|
||||
minwindef::{LPCVOID, ULONG},
|
||||
},
|
||||
um::winnt::{EXCEPTION_RECORD, HANDLE},
|
||||
STRUCT,
|
||||
};
|
||||
|
||||
use crate::appverifier::stop_codes;
|
||||
|
||||
pub const STATUS_VERIFIER_STOP: u32 = ::winapi::shared::ntstatus::STATUS_VERIFIER_STOP as u32;
|
||||
|
||||
// VERIFIER_STOP_HEADER and VERIFIER_STOP_PARAMS are not public apis (but probably could be).
|
||||
// They are defined in os/src/onecore/base/avrf/verifier/logging.h
|
||||
const MAX_STACK_DEPTH: usize = 32;
|
||||
|
||||
STRUCT! {
|
||||
#[allow(non_snake_case)]
|
||||
struct VERIFIER_STOP_HEADER {
|
||||
StopCode: ULONG64,
|
||||
StopFlags: ULONG,
|
||||
StackTraceDepth: ULONG,
|
||||
BackTrace: [ULONG64; MAX_STACK_DEPTH],
|
||||
}}
|
||||
|
||||
// For our use here, pointers in this struct point to memory in another process, so if you
|
||||
// want to read those strings, you must use ReadProcessMemory.
|
||||
STRUCT! {
|
||||
#[allow(non_snake_case)]
|
||||
struct VERIFIER_STOP_PARAMS {
|
||||
Header: VERIFIER_STOP_HEADER,
|
||||
Message: ULONG64,
|
||||
Parameter1: ULONG64,
|
||||
StringPtr1: ULONG64,
|
||||
Parameter2: ULONG64,
|
||||
StringPtr2: ULONG64,
|
||||
Parameter3: ULONG64,
|
||||
StringPtr3: ULONG64,
|
||||
Parameter4: ULONG64,
|
||||
StringPtr4: ULONG64,
|
||||
}}
|
||||
|
||||
fn handles_stop_from_u32(code: u32) -> HandlesStop {
|
||||
match code {
|
||||
stop_codes::HANDLES_INVALID_HANDLE => HandlesStop::InvalidHandleStop,
|
||||
stop_codes::HANDLES_INVALID_TLS_VALUE => HandlesStop::InvalidTlsValue,
|
||||
stop_codes::HANDLES_INCORRECT_WAIT_CALL => HandlesStop::IncorrectWaitCall,
|
||||
stop_codes::HANDLES_NULL_HANDLE => HandlesStop::NullHandle,
|
||||
stop_codes::HANDLES_WAIT_IN_DLLMAIN => HandlesStop::WaitInDllmain,
|
||||
stop_codes::HANDLES_INCORRECT_OBJECT_TYPE => HandlesStop::IncorrectObjectType,
|
||||
_ => panic!("Invalid Handles stop code"),
|
||||
}
|
||||
}
|
||||
|
||||
fn heap_stop_from_u32(code: u32) -> HeapStop {
|
||||
match code {
|
||||
stop_codes::HEAPS_UNKNOWN_ERROR => HeapStop::UnknownError,
|
||||
stop_codes::HEAPS_ACCESS_VIOLATION => HeapStop::AccessViolation,
|
||||
stop_codes::HEAPS_UNSYNCHRONIZED_ACCESS => HeapStop::UnsynchronizedAccess,
|
||||
stop_codes::HEAPS_EXTREME_SIZE_REQUEST => HeapStop::ExtremeSizeRequest,
|
||||
stop_codes::HEAPS_BAD_HEAP_HANDLE => HeapStop::BadHeapHandle,
|
||||
stop_codes::HEAPS_SWITCHED_HEAP_HANDLE => HeapStop::SwitchedHeapHandle,
|
||||
stop_codes::HEAPS_DOUBLE_FREE => HeapStop::DoubleFree,
|
||||
stop_codes::HEAPS_CORRUPTED_HEAP_BLOCK => HeapStop::CorruptedHeapBlock,
|
||||
stop_codes::HEAPS_DESTROY_PROCESS_HEAP => HeapStop::DestroyProcessHeap,
|
||||
stop_codes::HEAPS_UNEXPECTED_EXCEPTION => HeapStop::UnexpectedException,
|
||||
stop_codes::HEAPS_CORRUPTED_HEAP_BLOCK_EXCEPTION_RAISED_FOR_HEADER => {
|
||||
HeapStop::CorruptedHeapBlockExceptionRaisedForHeader
|
||||
}
|
||||
stop_codes::HEAPS_CORRUPTED_HEAP_BLOCK_EXCEPTION_RAISED_FOR_PROBING => {
|
||||
HeapStop::CorruptedHeapBlockExceptionRaisedForProbing
|
||||
}
|
||||
stop_codes::HEAPS_CORRUPTED_HEAP_BLOCK_HEADER => HeapStop::CorruptedHeapBlockHeader,
|
||||
stop_codes::HEAPS_CORRUPTED_FREED_HEAP_BLOCK => HeapStop::CorruptedFreedHeapBlock,
|
||||
stop_codes::HEAPS_CORRUPTED_HEAP_BLOCK_SUFFIX => HeapStop::CorruptedHeapBlockSuffix,
|
||||
stop_codes::HEAPS_CORRUPTED_HEAP_BLOCK_START_STAMP => {
|
||||
HeapStop::CorruptedHeapBlockStartStamp
|
||||
}
|
||||
stop_codes::HEAPS_CORRUPTED_HEAP_BLOCK_END_STAMP => HeapStop::CorruptedHeapBlockEndStamp,
|
||||
stop_codes::HEAPS_CORRUPTED_HEAP_BLOCK_PREFIX => HeapStop::CorruptedHeapBlockPrefix,
|
||||
stop_codes::HEAPS_FIRST_CHANCE_ACCESS_VIOLATION => HeapStop::FirstChanceAccessViolation,
|
||||
stop_codes::HEAPS_CORRUPTED_HEAP_LIST => HeapStop::CorruptedHeapList,
|
||||
_ => panic!("unexpected heap stop code"),
|
||||
}
|
||||
}
|
||||
|
||||
fn leak_stop_from_u32(code: u32) -> LeakStop {
|
||||
match code {
|
||||
stop_codes::LEAK_ALLOCATION => LeakStop::Allocation,
|
||||
stop_codes::LEAK_HANDLE => LeakStop::Handle,
|
||||
stop_codes::LEAK_REGISTRY => LeakStop::Registry,
|
||||
stop_codes::LEAK_VIRTUAL_RESERVATION => LeakStop::VirtualReservation,
|
||||
stop_codes::LEAK_SYSSTRING => LeakStop::SysString,
|
||||
stop_codes::LEAK_POWER_NOTIFICATION => LeakStop::PowerNotification,
|
||||
stop_codes::LEAK_COM_ALLOCATION => LeakStop::ComAllocation,
|
||||
_ => panic!("unexpected leak stop code"),
|
||||
}
|
||||
}
|
||||
|
||||
fn exception_stop_from_u32(code: u32) -> ExceptionStop {
|
||||
match code {
|
||||
stop_codes::EXCEPTIONS_FIRST_CHANCE_ACCESS_VIOLATION_CODE => {
|
||||
ExceptionStop::FirstChanceAccessViolationCode
|
||||
}
|
||||
_ => panic!("unexpected exception stop code"),
|
||||
}
|
||||
}
|
||||
|
||||
/// A bug detected by enabling `handles` in application verifier.
|
||||
#[derive(Debug)]
|
||||
pub enum HandlesStop {
|
||||
InvalidHandleStop,
|
||||
InvalidTlsValue,
|
||||
IncorrectWaitCall,
|
||||
NullHandle,
|
||||
WaitInDllmain,
|
||||
IncorrectObjectType,
|
||||
}
|
||||
|
||||
/// A bug detected by enabling `heaps` in application verifier.
|
||||
#[derive(Debug)]
|
||||
pub enum HeapStop {
|
||||
UnknownError,
|
||||
AccessViolation,
|
||||
UnsynchronizedAccess,
|
||||
ExtremeSizeRequest,
|
||||
BadHeapHandle,
|
||||
SwitchedHeapHandle,
|
||||
DoubleFree,
|
||||
CorruptedHeapBlock,
|
||||
DestroyProcessHeap,
|
||||
UnexpectedException,
|
||||
CorruptedHeapBlockExceptionRaisedForHeader,
|
||||
CorruptedHeapBlockExceptionRaisedForProbing,
|
||||
CorruptedHeapBlockHeader,
|
||||
CorruptedFreedHeapBlock,
|
||||
CorruptedHeapBlockSuffix,
|
||||
CorruptedHeapBlockStartStamp,
|
||||
CorruptedHeapBlockEndStamp,
|
||||
CorruptedHeapBlockPrefix,
|
||||
FirstChanceAccessViolation,
|
||||
CorruptedHeapList,
|
||||
}
|
||||
|
||||
/// A bug detected by enabling `leak` in application verifier.
|
||||
#[derive(Debug)]
|
||||
pub enum LeakStop {
|
||||
Allocation,
|
||||
Handle,
|
||||
Registry,
|
||||
VirtualReservation,
|
||||
SysString,
|
||||
PowerNotification,
|
||||
ComAllocation,
|
||||
}
|
||||
|
||||
/// A bug detected by enabling `exceptions` in application verifier.
|
||||
///
|
||||
/// We don't enable this option normally because it only detects first chance exceptions which are already
|
||||
/// reported and this option ends up reporting the same issue a second time with a different stack.
|
||||
#[derive(Debug)]
|
||||
pub enum ExceptionStop {
|
||||
FirstChanceAccessViolationCode,
|
||||
}
|
||||
|
||||
/// A verifier stop has a specific exception code but the exception parameters provide additional useful
|
||||
/// information in understanding the type of bug detected.
|
||||
///
|
||||
/// This message encapsulates the most important kinds of bugs detected by application verifier when fuzzing.
|
||||
pub enum VerifierStop {
|
||||
/// A bug detected by enabling `heaps` in application verifier.
|
||||
Heap(HeapStop),
|
||||
|
||||
/// A bug detected by enabling `handles` in application verifier.
|
||||
Handles(HandlesStop),
|
||||
|
||||
/// A bug detected by enabling `leak` in application verifier.
|
||||
Leak(LeakStop),
|
||||
|
||||
/// A bug detected by enabling `exceptions` in application verifier.
|
||||
Exception(ExceptionStop),
|
||||
|
||||
/// A bug was detected by a currently unsupported option in application verifier.
|
||||
Unknown,
|
||||
}
|
||||
|
||||
impl fmt::Display for VerifierStop {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
VerifierStop::Heap(code) => write!(formatter, "{:?}", code),
|
||||
VerifierStop::Handles(code) => write!(formatter, "{:?}", code),
|
||||
VerifierStop::Leak(code) => write!(formatter, "{:?}", code),
|
||||
VerifierStop::Exception(code) => write!(formatter, "{:?}", code),
|
||||
VerifierStop::Unknown => write!(formatter, "Unknown"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(process_handle: HANDLE, exception_record: &EXCEPTION_RECORD) -> VerifierStop {
|
||||
if exception_record.NumberParameters >= 3 {
|
||||
match process::read_memory::<VERIFIER_STOP_PARAMS>(
|
||||
process_handle,
|
||||
exception_record.ExceptionInformation[2] as LPCVOID,
|
||||
) {
|
||||
Ok(stop_params) => {
|
||||
let code = stop_params.Header.StopCode as u32;
|
||||
match code {
|
||||
stop_codes::HANDLES_INVALID_HANDLE
|
||||
..=stop_codes::HANDLES_INCORRECT_OBJECT_TYPE => {
|
||||
let handles_stop = handles_stop_from_u32(code);
|
||||
VerifierStop::Handles(handles_stop)
|
||||
}
|
||||
stop_codes::HEAPS_UNKNOWN_ERROR..=stop_codes::HEAPS_CORRUPTED_HEAP_LIST => {
|
||||
let heap_stop = heap_stop_from_u32(code);
|
||||
VerifierStop::Heap(heap_stop)
|
||||
}
|
||||
stop_codes::LEAK_ALLOCATION..=stop_codes::LEAK_COM_ALLOCATION => {
|
||||
let leak_stop = leak_stop_from_u32(code);
|
||||
VerifierStop::Leak(leak_stop)
|
||||
}
|
||||
stop_codes::EXCEPTIONS_FIRST_CHANCE_ACCESS_VIOLATION_CODE => {
|
||||
let exception_stop = exception_stop_from_u32(code);
|
||||
VerifierStop::Exception(exception_stop)
|
||||
}
|
||||
_ => VerifierStop::Unknown,
|
||||
}
|
||||
}
|
||||
Err(_) => VerifierStop::Unknown,
|
||||
}
|
||||
} else {
|
||||
VerifierStop::Unknown
|
||||
}
|
||||
}
|
546
src/agent/input-tester/src/tester.rs
Normal file
546
src/agent/input-tester/src/tester.rs
Normal file
@ -0,0 +1,546 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::unknown_clippy_lints)]
|
||||
#![allow(clippy::single_component_path_imports)]
|
||||
#![allow(clippy::option_map_or_none)]
|
||||
#![allow(clippy::ptr_arg)]
|
||||
#![allow(clippy::bind_instead_of_map)]
|
||||
#![allow(clippy::len_zero)]
|
||||
|
||||
//! This module runs the application under a debugger to detect exceptions or time outs.
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs,
|
||||
io::{Read, Write},
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use coverage::AppCoverageBlocks;
|
||||
use log::{error, info, trace, warn};
|
||||
use num_cpus;
|
||||
use rayon::{prelude::*, ThreadPoolBuilder};
|
||||
use sha1::{Digest, Sha1};
|
||||
|
||||
use crate::{
|
||||
appverifier::{self, AppVerifierController, AppVerifierState},
|
||||
crash_detector::{self, DebuggerResult},
|
||||
logging,
|
||||
summary::Summary,
|
||||
test_result::{new_test_result, Exception, ExceptionCode, ExceptionDescription, TestResult},
|
||||
};
|
||||
|
||||
macro_rules! writecrlf {
|
||||
($dst:expr) => (
|
||||
write!($dst, "\r\n")
|
||||
);
|
||||
($dst:expr,) => (
|
||||
writecrlf!($dst)
|
||||
);
|
||||
($dst:expr, $($arg:tt)*) => (
|
||||
write!($dst, $($arg)*).and_then(|_| writecrlf!($dst))
|
||||
);
|
||||
}
|
||||
|
||||
const THREAD_POOL_NAME: &str = "input-tester";
|
||||
|
||||
const MAX_CRASH_SAMPLES: usize = 10;
|
||||
|
||||
pub struct InputTestResult {
|
||||
pub debugger_result: DebuggerResult,
|
||||
pub input_path: PathBuf,
|
||||
pub blocks_covered: Option<usize>,
|
||||
}
|
||||
|
||||
impl InputTestResult {
|
||||
pub fn new(
|
||||
debugger_result: DebuggerResult,
|
||||
input_path: PathBuf,
|
||||
blocks_covered: Option<usize>,
|
||||
) -> Self {
|
||||
InputTestResult {
|
||||
debugger_result,
|
||||
input_path,
|
||||
blocks_covered,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Tester {
|
||||
driver: PathBuf,
|
||||
driver_env: HashMap<String, String>,
|
||||
driver_args: Vec<String>,
|
||||
max_run_s: u64,
|
||||
ignore_first_chance_exceptions: bool,
|
||||
appverif_controller: Option<AppVerifierController>,
|
||||
bugs_found_dir: PathBuf,
|
||||
coverage_map: Option<AppCoverageBlocks>,
|
||||
}
|
||||
|
||||
impl Tester {
|
||||
pub fn new(
|
||||
output_dir: &Path,
|
||||
driver: PathBuf,
|
||||
driver_env: HashMap<String, String>,
|
||||
driver_args: Vec<String>,
|
||||
max_run_s: u64,
|
||||
ignore_first_chance_exceptions: bool,
|
||||
app_verifier_tests: Option<Vec<String>>,
|
||||
) -> Result<Arc<Self>> {
|
||||
let mut bugs_found_dir = output_dir.to_path_buf();
|
||||
bugs_found_dir.push("bugs_found");
|
||||
ensure_directory_exists(&bugs_found_dir)
|
||||
.with_context(|| format!("Creating directory {}", bugs_found_dir.display()))?;
|
||||
|
||||
let appverif_controller = if let Some(app_verifier_tests) = app_verifier_tests {
|
||||
if let Some(exe_name) = Path::new(&driver).file_name() {
|
||||
let appverif_controller =
|
||||
AppVerifierController::new(exe_name, &app_verifier_tests)?;
|
||||
|
||||
let cloned_appverif = appverif_controller.clone();
|
||||
atexit::register(move || {
|
||||
if let Err(e) = cloned_appverif.set(AppVerifierState::Disabled) {
|
||||
error!("Error disabling appverif: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
Some(appverif_controller)
|
||||
} else {
|
||||
anyhow::bail!("Missing executable name in path {}", driver.display());
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let coverage_map = coverage::load_coverage_map(&output_dir)?;
|
||||
Ok(Arc::new(Tester {
|
||||
appverif_controller,
|
||||
driver,
|
||||
driver_env,
|
||||
driver_args,
|
||||
max_run_s,
|
||||
ignore_first_chance_exceptions,
|
||||
bugs_found_dir,
|
||||
coverage_map,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Run the test file task with the specified input file.
|
||||
pub fn test_application(&self, input_path: impl AsRef<Path>) -> Result<InputTestResult> {
|
||||
let app_args = args_with_input_file_applied(&self.driver_args, &input_path)?;
|
||||
|
||||
let mut coverage_map = self.coverage_map.clone();
|
||||
|
||||
crash_detector::test_process(
|
||||
&self.driver,
|
||||
&app_args,
|
||||
&self.driver_env,
|
||||
Duration::from_secs(self.max_run_s),
|
||||
self.ignore_first_chance_exceptions,
|
||||
coverage_map.as_mut(),
|
||||
)
|
||||
.and_then(|result| {
|
||||
let blocks_hit = coverage_map.map_or(None, |map| Some(map.count_blocks_hit()));
|
||||
let result =
|
||||
InputTestResult::new(result, PathBuf::from(input_path.as_ref()), blocks_hit);
|
||||
log_input_test_result(&result);
|
||||
Ok(result)
|
||||
})
|
||||
}
|
||||
|
||||
/// Test each file with the expected extension in the specified directory.
|
||||
/// Testing will be run in parallel unless the config specifies otherwise.
|
||||
pub fn test_dir(
|
||||
self: Arc<Self>,
|
||||
input_dir: impl AsRef<Path>,
|
||||
max_cores: Option<usize>,
|
||||
) -> Result<(Summary, Vec<TestResult>)> {
|
||||
let threads = max_cores.unwrap_or_else(num_cpus::get);
|
||||
let threadpool = ThreadPoolBuilder::new()
|
||||
.thread_name(|idx| format!("{}-{}", THREAD_POOL_NAME, idx))
|
||||
.num_threads(threads)
|
||||
.build()?;
|
||||
|
||||
let files_to_test: Vec<_> = fs::read_dir(&input_dir)
|
||||
.with_context(|| format!("Reading directory {}", input_dir.as_ref().display()))?
|
||||
.filter_map(|entry| match entry {
|
||||
Ok(item) => {
|
||||
let path = item.path();
|
||||
if path.is_file() {
|
||||
Some(path)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("Error reading directory entry: {}", err);
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let self_clone = Arc::clone(&self);
|
||||
Ok(threadpool.scope(|_s| {
|
||||
let results: Vec<InputTestResult> = files_to_test
|
||||
.par_iter()
|
||||
.map(move |input_path| self_clone.test_application(&input_path))
|
||||
.filter_map(|result| match result {
|
||||
Ok(result) => Some(result),
|
||||
Err(err) => {
|
||||
error!("Debugger error: {}", err);
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Count the number of passes, crashes, etc.
|
||||
let mut summary = Summary::new();
|
||||
for result in &results {
|
||||
summary.update(&result.debugger_result);
|
||||
}
|
||||
|
||||
// Copy failing inputs to the bugs_found directory and create a log for the crash,
|
||||
// and return a collection of results to possibly be reported.
|
||||
let mut test_results = vec![];
|
||||
for result in results {
|
||||
match self.prepare_test_result(result) {
|
||||
Ok(Some(result)) => test_results.push(result),
|
||||
Ok(None) => {}
|
||||
Err(e) => {
|
||||
error!("Error reporting results: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(summary, test_results)
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn test_single_file(
|
||||
&self,
|
||||
input_path: impl AsRef<Path>,
|
||||
) -> Result<(Summary, Vec<TestResult>)> {
|
||||
let test_result = self.test_application(input_path)?;
|
||||
|
||||
let summary = Summary::from(&test_result.debugger_result);
|
||||
|
||||
let mut results = vec![];
|
||||
if let Some(result) = self.prepare_test_result(test_result)? {
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
Ok((summary, results))
|
||||
}
|
||||
|
||||
fn is_bucket_full(&self, bucket_dir: &Path) -> Result<bool> {
|
||||
let dir_entries = fs::read_dir(bucket_dir)
|
||||
.with_context(|| format!("Reading directory {}", bucket_dir.display()))?;
|
||||
|
||||
// We save the input and create a directory for the log+repro script,
|
||||
// so divide count of files by 2 to get number of files.
|
||||
Ok((dir_entries.count() / 2) >= MAX_CRASH_SAMPLES)
|
||||
}
|
||||
|
||||
fn create_test_failure_artifacts(
|
||||
&self,
|
||||
log_dir: &Path,
|
||||
result: &InputTestResult,
|
||||
deduped_input_path: &Path,
|
||||
) -> Result<()> {
|
||||
// Make a directory for our logs.
|
||||
ensure_directory_exists(&log_dir).context("Creating log directory for crash/timeout.")?;
|
||||
|
||||
// Create a markdown file in our log directory.
|
||||
let stem = match result.input_path.file_stem() {
|
||||
Some(stem) => stem,
|
||||
None => anyhow::bail!(
|
||||
"Unexpected missing file stem {}",
|
||||
result.input_path.display()
|
||||
),
|
||||
};
|
||||
let summary_path = log_dir.join(stem).with_extension("md");
|
||||
result
|
||||
.debugger_result
|
||||
.write_markdown_summary(&summary_path)
|
||||
.context("Writing markdown summary for crash/timeout.")?;
|
||||
|
||||
// Create a batch file to help reproduce the bug with the settings we used.
|
||||
self.create_repro_bat(&log_dir, &result.input_path, &deduped_input_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_repro_bat(
|
||||
&self,
|
||||
log_dir: &Path,
|
||||
orig_input_path: &Path,
|
||||
deduped_input_path: &Path,
|
||||
) -> Result<()> {
|
||||
let repro_bat_path = log_dir.join("repro.bat");
|
||||
let mut repro_bat = fs::File::create(&repro_bat_path)?;
|
||||
|
||||
writecrlf!(repro_bat, "@echo off")?;
|
||||
|
||||
if let Some(appverif) = &self.appverif_controller {
|
||||
write_appverif_calls(
|
||||
&mut repro_bat,
|
||||
appverif.appverif_path(),
|
||||
appverif.enable_command_lines(),
|
||||
)?;
|
||||
writecrlf!(repro_bat)?;
|
||||
}
|
||||
|
||||
writecrlf!(
|
||||
repro_bat,
|
||||
"@rem Original input file tested was: {}",
|
||||
orig_input_path.display()
|
||||
)?;
|
||||
let app_args = args_with_input_file_applied(&self.driver_args, &deduped_input_path)?;
|
||||
writecrlf!(
|
||||
repro_bat,
|
||||
"{}",
|
||||
logging::command_invocation(&self.driver, &app_args[..])
|
||||
)?;
|
||||
|
||||
if let Some(appverif) = &self.appverif_controller {
|
||||
write_appverif_calls(
|
||||
&mut repro_bat,
|
||||
appverif.appverif_path(),
|
||||
appverif.disable_command_lines(),
|
||||
)?;
|
||||
writecrlf!(repro_bat)?;
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
|
||||
fn write_appverif_calls(
|
||||
repro_bat: &mut fs::File,
|
||||
appverif_path: &Path,
|
||||
args_with_comments: &[appverifier::ArgsWithComments],
|
||||
) -> Result<()> {
|
||||
for args in args_with_comments {
|
||||
writecrlf!(repro_bat)?;
|
||||
for comment in args.comments {
|
||||
write!(repro_bat, "@rem ")?;
|
||||
writecrlf!(repro_bat, "{}", comment)?;
|
||||
}
|
||||
writecrlf!(
|
||||
repro_bat,
|
||||
"{}",
|
||||
logging::command_invocation(appverif_path, &args.args)
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prepare_test_result(&self, result: InputTestResult) -> Result<Option<TestResult>> {
|
||||
if !result.debugger_result.any_crashes_or_timed_out() {
|
||||
return Ok(Some(new_test_result(
|
||||
result.debugger_result,
|
||||
&result.input_path,
|
||||
Path::new(""),
|
||||
)));
|
||||
}
|
||||
|
||||
// We bucketize results into folders and limit the number of samples in each bucket
|
||||
// so we hopefully avoid filling up the disk.
|
||||
//
|
||||
// A single sample could live in multiple buckets, so we try to pick the most serious
|
||||
// and so the sample is stored just once.
|
||||
let bucket = if result.debugger_result.any_crashes() {
|
||||
most_serious_exception(&result.debugger_result)
|
||||
.stack_hash
|
||||
.to_string()
|
||||
} else {
|
||||
"timeout".to_owned()
|
||||
};
|
||||
let mut bucket_dir = self.bugs_found_dir.clone();
|
||||
bucket_dir.push(bucket);
|
||||
ensure_directory_exists(&bucket_dir)?;
|
||||
|
||||
if self.is_bucket_full(&bucket_dir)? {
|
||||
warn!(
|
||||
"TestInput not copied, max results ({}) found in {}",
|
||||
MAX_CRASH_SAMPLES,
|
||||
bucket_dir.display(),
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let copy_result =
|
||||
copy_input_file_result(&result.input_path, &bucket_dir).with_context(|| {
|
||||
format!(
|
||||
"Copying input file `{}` to `{}`",
|
||||
result.input_path.display(),
|
||||
bucket_dir.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
match copy_result {
|
||||
None => {
|
||||
// We assume if we've previously seen an input (checked by hashing the input)
|
||||
// and it crashed again, there is no value in reporting it again even though
|
||||
// we may see different exceptions or stacks.
|
||||
Ok(None)
|
||||
}
|
||||
Some(copied_file) => {
|
||||
// Logs will go in a directory matching the copied input name w/o an extension.
|
||||
let mut logs_dir = PathBuf::from(&copied_file);
|
||||
logs_dir.set_file_name(format!(
|
||||
"{}-logs",
|
||||
logs_dir.file_stem().unwrap().to_str().unwrap()
|
||||
));
|
||||
self.create_test_failure_artifacts(&logs_dir, &result, &copied_file)?;
|
||||
Ok(Some(new_test_result(
|
||||
result.debugger_result,
|
||||
&result.input_path,
|
||||
&logs_dir,
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn use_appverifier(&self) -> bool {
|
||||
self.appverif_controller.is_some()
|
||||
}
|
||||
|
||||
pub fn set_appverifier(&self, state: AppVerifierState) -> Result<()> {
|
||||
if let Some(appverif_controller) = &self.appverif_controller {
|
||||
appverif_controller
|
||||
.set(state)
|
||||
.with_context(|| format!("Setting appverifier to {:?}", state))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn exception_from_throw(exception: &Exception) -> bool {
|
||||
match exception.description {
|
||||
ExceptionDescription::GenericException(ExceptionCode::ClrException)
|
||||
| ExceptionDescription::GenericException(ExceptionCode::CppException) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Using heuristics, choose the most serious exception. Typically this would be the one that
|
||||
/// crashes the program, which normally would be the last exception we see.
|
||||
fn most_serious_exception(result: &DebuggerResult) -> &Exception {
|
||||
for e in &result.exceptions {
|
||||
// An unhandled exception will cause a crash, so treat those as more serious
|
||||
// than any handled exception.
|
||||
//
|
||||
// I'm not sure it's possible to have more than one unhandled exception, but if it
|
||||
// was possible, we want the first, so we search forwards. If a second unhandled exception
|
||||
// is possible, I'd guess it comes from something like a vectored exception handler.
|
||||
if !e.first_chance {
|
||||
return e;
|
||||
}
|
||||
}
|
||||
|
||||
// Every exception was handled, but we can assume some exceptions are less severe than
|
||||
// others. For starters, we'll assume any throw statement is less severe than a non-throw.
|
||||
let a_throw = result.exceptions.iter().find(|e| exception_from_throw(e));
|
||||
|
||||
// If we have no throw, assume the first exception is the root cause to be reported.
|
||||
a_throw.unwrap_or(&result.exceptions[0])
|
||||
}
|
||||
|
||||
/// Read the file and hash the file contents using sha1.
|
||||
/// We don't have security concerns, so a collision attack is a non-issue. Also, both git and
|
||||
/// libfuzzer use sha1, so it's still a reasonable choice.
|
||||
///
|
||||
/// Returns the digest of the hash as a string in lowercase hex.
|
||||
fn hash_file_contents(file: impl AsRef<Path>) -> Result<String> {
|
||||
let mut file = fs::File::open(file.as_ref())
|
||||
.with_context(|| format!("opening {} to hash", file.as_ref().display()))?;
|
||||
let mut data = Vec::new();
|
||||
file.read_to_end(&mut data)?;
|
||||
let digest = Sha1::digest(&data);
|
||||
Ok(format!("{:040x}", &digest))
|
||||
}
|
||||
|
||||
/// Copy file to directory, but hash the file contents to use as the filename
|
||||
/// so we can uniquely identify the input. This helps:
|
||||
/// * Redundantly trying to repro an input that was previously observed.
|
||||
/// * Avoid investigating identical inputs that result in different stack hashes.
|
||||
///
|
||||
/// If the target file exists, nothing is copied.
|
||||
///
|
||||
/// Returns the destination file name in a PathBuf if we've never seen the input file before.
|
||||
fn copy_input_file_result(
|
||||
file: impl AsRef<Path>,
|
||||
directory: impl AsRef<Path>,
|
||||
) -> Result<Option<PathBuf>> {
|
||||
let mut dest = directory.as_ref().to_path_buf();
|
||||
let hash = hash_file_contents(&file)?;
|
||||
dest.push(hash);
|
||||
if let Some(ext) = file.as_ref().extension() {
|
||||
dest.set_extension(ext);
|
||||
}
|
||||
|
||||
if dest.is_dir() || dest.is_file() {
|
||||
warn!("Not reporting result: {} exists", dest.display());
|
||||
Ok(None)
|
||||
} else {
|
||||
fs::copy(file, &dest)?;
|
||||
Ok(Some(dest))
|
||||
}
|
||||
}
|
||||
|
||||
fn log_input_test_result(result: &InputTestResult) {
|
||||
let debugger_result = &result.debugger_result;
|
||||
let input_path = &result.input_path;
|
||||
if debugger_result.exceptions.is_empty() {
|
||||
trace!("No bugs found in {}", input_path.display())
|
||||
} else {
|
||||
for exception in &debugger_result.exceptions {
|
||||
info!(
|
||||
"Exception found testing {} ExceptionCode=0x{:08x} Description={} FirstChance={} StackHash={}",
|
||||
input_path.display(),
|
||||
exception.exception_code,
|
||||
exception.description,
|
||||
exception.first_chance,
|
||||
exception.stack_hash,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Replace `@@` with `input_file` in `args`.
|
||||
pub fn args_with_input_file_applied(
|
||||
args: &Vec<impl AsRef<str>>,
|
||||
input_file: impl AsRef<Path>,
|
||||
) -> Result<Vec<String>> {
|
||||
let mut result = vec![];
|
||||
|
||||
let input_file = input_file.as_ref().to_str().ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"unexpected unicode character in path {}",
|
||||
input_file.as_ref().display()
|
||||
)
|
||||
})?;
|
||||
for arg in args {
|
||||
let arg: String = arg.as_ref().replace("@@", input_file);
|
||||
result.push(arg);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Create the specified directory if it does not already exist.
|
||||
pub fn ensure_directory_exists(path: impl AsRef<Path>) -> Result<()> {
|
||||
let path = path.as_ref();
|
||||
if path.is_dir() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Either directory does not exist, or maybe it's a file, either way,
|
||||
// we'll try to create the directory and using that result for the error if any.
|
||||
fs::create_dir_all(&path).with_context(|| format!("Creating directory {}", path.display()))?;
|
||||
Ok(())
|
||||
}
|
34
src/agent/onefuzz-agent/Cargo.toml
Normal file
34
src/agent/onefuzz-agent/Cargo.toml
Normal file
@ -0,0 +1,34 @@
|
||||
[package]
|
||||
name = "onefuzz-agent"
|
||||
version = "0.2.0"
|
||||
authors = ["fuzzing@microsoft.com"]
|
||||
edition = "2018"
|
||||
publish = false
|
||||
license = "MIT"
|
||||
|
||||
[features]
|
||||
integration_test=[]
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
appinsights = "0.1"
|
||||
async-trait = "0.1"
|
||||
clap = "2.33"
|
||||
env_logger = "0.7"
|
||||
futures = "0.3"
|
||||
hex = "0.4"
|
||||
lazy_static = "1.4"
|
||||
log = "0.4"
|
||||
num_cpus = "1.13"
|
||||
reqwest = { version = "0.10", features = ["json", "stream"] }
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "0.2", features = ["full"] }
|
||||
tokio-util = { version = "0.3", features = ["full"] }
|
||||
url = { version = "2.1", features = ["serde"] }
|
||||
uuid = { version = "0.8", features = ["serde", "v4"] }
|
||||
onefuzz = { path = "../onefuzz" }
|
||||
storage-queue = { path = "../storage-queue" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.1"
|
36
src/agent/onefuzz-agent/build.rs
Normal file
36
src/agent/onefuzz-agent/build.rs
Normal file
@ -0,0 +1,36 @@
|
||||
use std::error::Error;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::process::Command;
|
||||
|
||||
fn run_cmd(args: &[&str]) -> Result<String, Box<dyn Error>> {
|
||||
let cmd = Command::new(args[0]).args(&args[1..]).output()?;
|
||||
if cmd.status.success() {
|
||||
Ok(String::from_utf8_lossy(&cmd.stdout).to_string())
|
||||
} else {
|
||||
Err(From::from("failed"))
|
||||
}
|
||||
}
|
||||
|
||||
fn read_file(filename: &str) -> Result<String, Box<dyn Error>> {
|
||||
let mut file = File::open(filename)?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
|
||||
Ok(contents)
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<dyn Error>> {
|
||||
let sha = run_cmd(&["git", "rev-parse", "HEAD"])?;
|
||||
let with_changes = if run_cmd(&["git", "diff", "--quiet"]).is_err() {
|
||||
"-local_changes"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
println!("cargo:rustc-env=GIT_VERSION={}{}", sha, with_changes);
|
||||
|
||||
let version = read_file("../../../CURRENT_VERSION")?;
|
||||
println!("cargo:rustc-env=ONEFUZZ_VERSION={}", version);
|
||||
|
||||
Ok(())
|
||||
}
|
28
src/agent/onefuzz-agent/data/afl-fuzzer_stats.txt
Normal file
28
src/agent/onefuzz-agent/data/afl-fuzzer_stats.txt
Normal file
@ -0,0 +1,28 @@
|
||||
start_time : 1587433304
|
||||
last_update : 1587433306
|
||||
fuzzer_pid : 26515
|
||||
cycles_done : 8
|
||||
execs_done : 13655
|
||||
execs_per_sec : 2666.67
|
||||
paths_total : 2
|
||||
paths_favored : 2
|
||||
paths_found : 1
|
||||
paths_imported : 0
|
||||
max_depth : 2
|
||||
cur_path : 0
|
||||
pending_favs : 0
|
||||
pending_total : 0
|
||||
variable_paths : 0
|
||||
stability : 100.00%
|
||||
bitmap_cvg : 0.01%
|
||||
unique_crashes : 0
|
||||
unique_hangs : 0
|
||||
last_path : 1587433304
|
||||
last_crash : 0
|
||||
last_hang : 0
|
||||
execs_since_crash : 13655
|
||||
exec_timeout : 20
|
||||
afl_banner : fuzz.exe
|
||||
afl_version : 2.52b
|
||||
target_mode : default
|
||||
command_line : afl-fuzz -i inputs -o OUT -- /home/bcaswell/projects/onefuzz/samples/afl-clang/fuzz.exe
|
24
src/agent/onefuzz-agent/src/debug/cmd.rs
Normal file
24
src/agent/onefuzz-agent/src/debug/cmd.rs
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::{App, SubCommand};
|
||||
|
||||
pub fn run(args: &clap::ArgMatches) -> Result<()> {
|
||||
match args.subcommand() {
|
||||
("generic-crash-report", Some(sub)) => crate::debug::generic_crash_report::run(sub)?,
|
||||
("libfuzzer-coverage", Some(sub)) => crate::debug::libfuzzer_coverage::run(sub)?,
|
||||
("libfuzzer-crash-report", Some(sub)) => crate::debug::libfuzzer_crash_report::run(sub)?,
|
||||
_ => println!("missing subcommand\nUSAGE : {}", args.usage()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn args() -> App<'static, 'static> {
|
||||
SubCommand::with_name("debug")
|
||||
.about("unsupported internal debugging commands")
|
||||
.subcommand(crate::debug::generic_crash_report::args())
|
||||
.subcommand(crate::debug::libfuzzer_coverage::args())
|
||||
.subcommand(crate::debug::libfuzzer_crash_report::args())
|
||||
}
|
121
src/agent/onefuzz-agent/src/debug/generic_crash_report.rs
Normal file
121
src/agent/onefuzz-agent/src/debug/generic_crash_report.rs
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
report::generic::{Config, GenericReportProcessor},
|
||||
utils::parse_key_value,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use onefuzz::blob::BlobContainerUrl;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use tokio::runtime::Runtime;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
async fn run_impl(input: String, config: Config) -> Result<()> {
|
||||
let input_path = Path::new(&input);
|
||||
let test_url = Url::parse("https://contoso.com/sample-container/blob.txt")?;
|
||||
let processor = GenericReportProcessor::new(&config);
|
||||
let result = processor.test_input(test_url, input_path).await?;
|
||||
println!("{:#?}", result);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run(args: &clap::ArgMatches) -> Result<()> {
|
||||
let target_exe = value_t!(args, "target_exe", PathBuf)?;
|
||||
let input = value_t!(args, "input", String)?;
|
||||
let target_timeout = value_t!(args, "target_timeout", u64).ok();
|
||||
let check_retry_count = value_t!(args, "check_retry_count", u64)?;
|
||||
let target_options = args.values_of_lossy("target_options").unwrap_or_default();
|
||||
let check_asan_log = args.is_present("check_asan_log");
|
||||
let check_debugger = !args.is_present("disable_check_debugger");
|
||||
|
||||
let mut target_env = HashMap::new();
|
||||
for opt in args.values_of_lossy("target_env").unwrap_or_default() {
|
||||
let (k, v) = parse_key_value(opt)?;
|
||||
target_env.insert(k, v);
|
||||
}
|
||||
|
||||
let config = Config {
|
||||
target_exe,
|
||||
target_env,
|
||||
target_options,
|
||||
target_timeout,
|
||||
check_asan_log,
|
||||
check_debugger,
|
||||
check_retry_count,
|
||||
crashes: None,
|
||||
input_queue: None,
|
||||
no_repro: None,
|
||||
reports: None,
|
||||
unique_reports: SyncedDir {
|
||||
path: "unique_reports".into(),
|
||||
url: BlobContainerUrl::new(url::Url::parse("https://contoso.com/unique_reports")?)?,
|
||||
},
|
||||
common: CommonConfig {
|
||||
heartbeat_queue: None,
|
||||
instrumentation_key: None,
|
||||
telemetry_key: None,
|
||||
job_id: Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(),
|
||||
task_id: Uuid::parse_str("11111111-1111-1111-1111-111111111111").unwrap(),
|
||||
},
|
||||
};
|
||||
|
||||
let mut rt = Runtime::new()?;
|
||||
rt.block_on(async { run_impl(input, config).await })?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn args() -> App<'static, 'static> {
|
||||
SubCommand::with_name("generic-crash-report")
|
||||
.about("execute a local-only generic crash report")
|
||||
.arg(
|
||||
Arg::with_name("target_exe")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(Arg::with_name("input").takes_value(true).required(true))
|
||||
.arg(
|
||||
Arg::with_name("disable_check_debugger")
|
||||
.takes_value(false)
|
||||
.long("disable_check_debugger"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("check_asan_log")
|
||||
.takes_value(false)
|
||||
.long("check_asan_log"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("check_retry_count")
|
||||
.takes_value(true)
|
||||
.long("check_retry_count")
|
||||
.default_value("0"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_timeout")
|
||||
.takes_value(true)
|
||||
.long("target_timeout")
|
||||
.default_value("5"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_env")
|
||||
.long("target_env")
|
||||
.takes_value(true)
|
||||
.multiple(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_options")
|
||||
.long("target_options")
|
||||
.takes_value(true)
|
||||
.multiple(true)
|
||||
.allow_hyphen_values(true)
|
||||
.default_value("{input}")
|
||||
.help("Supports hyphens. Recommendation: Set target_env first"),
|
||||
)
|
||||
}
|
105
src/agent/onefuzz-agent/src/debug/libfuzzer_coverage.rs
Normal file
105
src/agent/onefuzz-agent/src/debug/libfuzzer_coverage.rs
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
coverage::libfuzzer_coverage::{Config, CoverageProcessor},
|
||||
utils::parse_key_value,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use onefuzz::blob::BlobContainerUrl;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::runtime::Runtime;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
async fn run_impl(input: String, config: Config) -> Result<()> {
|
||||
let mut processor = CoverageProcessor::new(Arc::new(config))
|
||||
.await
|
||||
.map_err(|e| format_err!("coverage processor failed: {:?}", e))?;
|
||||
let input_path = Path::new(&input);
|
||||
processor
|
||||
.test_input(input_path)
|
||||
.await
|
||||
.map_err(|e| format_err!("test input failed {:?}", e))?;
|
||||
let info = processor
|
||||
.total
|
||||
.info()
|
||||
.await
|
||||
.map_err(|e| format_err!("coverage_info failed {:?}", e))?;
|
||||
println!("{:?}", info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run(args: &clap::ArgMatches) -> Result<()> {
|
||||
let target_exe = value_t!(args, "target_exe", PathBuf)?;
|
||||
let input = value_t!(args, "input", String)?;
|
||||
let result_dir = value_t!(args, "result_dir", String)?;
|
||||
let target_options = args.values_of_lossy("target_options").unwrap_or_default();
|
||||
|
||||
let mut target_env = HashMap::new();
|
||||
for opt in args.values_of_lossy("target_env").unwrap_or_default() {
|
||||
let (k, v) = parse_key_value(opt)?;
|
||||
target_env.insert(k, v);
|
||||
}
|
||||
|
||||
let config = Config {
|
||||
target_exe,
|
||||
target_env,
|
||||
target_options,
|
||||
input_queue: None,
|
||||
readonly_inputs: vec![],
|
||||
coverage: SyncedDir {
|
||||
path: result_dir.into(),
|
||||
url: BlobContainerUrl::new(Url::parse("https://contoso.com/coverage")?)?,
|
||||
},
|
||||
common: CommonConfig {
|
||||
heartbeat_queue: None,
|
||||
instrumentation_key: None,
|
||||
telemetry_key: None,
|
||||
job_id: Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(),
|
||||
task_id: Uuid::parse_str("11111111-1111-1111-1111-111111111111").unwrap(),
|
||||
},
|
||||
};
|
||||
|
||||
let mut rt = Runtime::new()?;
|
||||
rt.block_on(run_impl(input, config))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn args() -> App<'static, 'static> {
|
||||
SubCommand::with_name("libfuzzer-coverage")
|
||||
.about("execute a local-only libfuzzer coverage task")
|
||||
.arg(
|
||||
Arg::with_name("target_exe")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(Arg::with_name("input").takes_value(true).required(true))
|
||||
.arg(
|
||||
Arg::with_name("result_dir")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_env")
|
||||
.long("target_env")
|
||||
.takes_value(true)
|
||||
.multiple(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_options")
|
||||
.long("target_options")
|
||||
.takes_value(true)
|
||||
.multiple(true)
|
||||
.allow_hyphen_values(true)
|
||||
.default_value("{input}")
|
||||
.help("Supports hyphens. Recommendation: Set target_env first"),
|
||||
)
|
||||
}
|
106
src/agent/onefuzz-agent/src/debug/libfuzzer_crash_report.rs
Normal file
106
src/agent/onefuzz-agent/src/debug/libfuzzer_crash_report.rs
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
report::libfuzzer_report::{AsanProcessor, Config},
|
||||
utils::parse_key_value,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use onefuzz::blob::BlobContainerUrl;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::runtime::Runtime;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
async fn run_impl(input: String, config: Config) -> Result<()> {
|
||||
let task = AsanProcessor::new(Arc::new(config));
|
||||
|
||||
let test_url = Url::parse("https://contoso.com/sample-container/blob.txt")?;
|
||||
let input_path = Path::new(&input);
|
||||
let result = task.test_input(test_url, &input_path).await;
|
||||
println!("{:#?}", result);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run(args: &clap::ArgMatches) -> Result<()> {
|
||||
let target_exe = value_t!(args, "target_exe", PathBuf)?;
|
||||
let input = value_t!(args, "input", String)?;
|
||||
let target_options = args.values_of_lossy("target_options").unwrap_or_default();
|
||||
let mut target_env = HashMap::new();
|
||||
for opt in args.values_of_lossy("target_env").unwrap_or_default() {
|
||||
let (k, v) = parse_key_value(opt)?;
|
||||
target_env.insert(k, v);
|
||||
}
|
||||
let target_timeout = value_t!(args, "target_timeout", u64).ok();
|
||||
let check_retry_count = value_t!(args, "check_retry_count", u64)?;
|
||||
|
||||
let config = Config {
|
||||
target_exe,
|
||||
target_env,
|
||||
target_options,
|
||||
target_timeout,
|
||||
check_retry_count,
|
||||
input_queue: None,
|
||||
crashes: None,
|
||||
reports: None,
|
||||
no_repro: None,
|
||||
unique_reports: SyncedDir {
|
||||
path: "unique_reports".into(),
|
||||
url: BlobContainerUrl::new(Url::parse("https://contoso.com/unique_reports")?)?,
|
||||
},
|
||||
common: CommonConfig {
|
||||
heartbeat_queue: None,
|
||||
instrumentation_key: None,
|
||||
telemetry_key: None,
|
||||
job_id: Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(),
|
||||
task_id: Uuid::parse_str("11111111-1111-1111-1111-111111111111").unwrap(),
|
||||
},
|
||||
};
|
||||
|
||||
let mut rt = Runtime::new()?;
|
||||
rt.block_on(async { run_impl(input, config).await })?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn args() -> App<'static, 'static> {
|
||||
SubCommand::with_name("libfuzzer-crash-report")
|
||||
.about("execute a local-only libfuzzer crash report task")
|
||||
.arg(
|
||||
Arg::with_name("target_exe")
|
||||
.takes_value(true)
|
||||
.required(true),
|
||||
)
|
||||
.arg(Arg::with_name("input").takes_value(true).required(true))
|
||||
.arg(
|
||||
Arg::with_name("target_env")
|
||||
.long("target_env")
|
||||
.takes_value(true)
|
||||
.multiple(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_options")
|
||||
.long("target_options")
|
||||
.takes_value(true)
|
||||
.multiple(true)
|
||||
.allow_hyphen_values(true)
|
||||
.help("Supports hyphens. Recommendation: Set target_env first"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_timeout")
|
||||
.takes_value(true)
|
||||
.long("target_timeout"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("check_retry_count")
|
||||
.takes_value(true)
|
||||
.long("check_retry_count")
|
||||
.default_value("0"),
|
||||
)
|
||||
}
|
7
src/agent/onefuzz-agent/src/debug/mod.rs
Normal file
7
src/agent/onefuzz-agent/src/debug/mod.rs
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
pub mod cmd;
|
||||
pub mod generic_crash_report;
|
||||
pub mod libfuzzer_coverage;
|
||||
pub mod libfuzzer_crash_report;
|
100
src/agent/onefuzz-agent/src/main.rs
Normal file
100
src/agent/onefuzz-agent/src/main.rs
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#[macro_use]
|
||||
extern crate anyhow;
|
||||
|
||||
#[macro_use]
|
||||
extern crate onefuzz;
|
||||
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use onefuzz::telemetry::{self};
|
||||
|
||||
mod debug;
|
||||
mod tasks;
|
||||
|
||||
use tasks::config::Config;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let built_version = format!(
|
||||
"{} onefuzz:{} git:{}",
|
||||
crate_version!(),
|
||||
env!("ONEFUZZ_VERSION"),
|
||||
env!("GIT_VERSION")
|
||||
);
|
||||
|
||||
let app = App::new("onefuzz-agent")
|
||||
.version(built_version.as_str())
|
||||
.arg(
|
||||
Arg::with_name("config")
|
||||
.long("config")
|
||||
.short("c")
|
||||
.takes_value(true),
|
||||
)
|
||||
.subcommand(debug::cmd::args())
|
||||
.subcommand(SubCommand::with_name("licenses").about("display third-party licenses"));
|
||||
|
||||
let matches = app.get_matches();
|
||||
|
||||
match matches.subcommand() {
|
||||
("licenses", Some(_)) => {
|
||||
return licenses();
|
||||
}
|
||||
("debug", Some(sub)) => return crate::debug::cmd::run(sub),
|
||||
_ => {} // no subcommand
|
||||
}
|
||||
|
||||
if matches.value_of("config").is_none() {
|
||||
println!("Missing '--config'\n{}", matches.usage());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let config_path: PathBuf = matches.value_of("config").unwrap().parse()?;
|
||||
let config = Config::from_file(config_path)?;
|
||||
|
||||
init_telemetry(&config);
|
||||
|
||||
verbose!("config parsed");
|
||||
|
||||
let mut rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
let result = rt.block_on(config.run());
|
||||
|
||||
if let Err(err) = &result {
|
||||
error!("error running task: {}", err);
|
||||
}
|
||||
|
||||
telemetry::try_flush_and_close();
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn licenses() -> Result<()> {
|
||||
use std::io::{self, Write};
|
||||
io::stdout().write_all(include_bytes!("../../data/licenses.json"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn init_telemetry(config: &Config) {
|
||||
let inst_key = config
|
||||
.common()
|
||||
.instrumentation_key
|
||||
.map(|k| k.to_string())
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
let tele_key = config
|
||||
.common()
|
||||
.telemetry_key
|
||||
.map(|k| k.to_string())
|
||||
.unwrap_or_else(|| "".to_string());
|
||||
|
||||
telemetry::set_appinsights_clients(inst_key, tele_key);
|
||||
}
|
147
src/agent/onefuzz-agent/src/tasks/analysis/generic.rs
Normal file
147
src/agent/onefuzz-agent/src/tasks/analysis/generic.rs
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
heartbeat::HeartbeatSender,
|
||||
utils,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use futures::stream::StreamExt;
|
||||
use onefuzz::{az_copy, blob::url::BlobUrl};
|
||||
use onefuzz::{expand::Expand, fs::set_executable, fs::OwnedDir};
|
||||
use reqwest::Url;
|
||||
use serde::Deserialize;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
str,
|
||||
};
|
||||
use storage_queue::{QueueClient, EMPTY_QUEUE_DELAY};
|
||||
use tokio::{fs, process::Command};
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub analyzer_exe: String,
|
||||
pub analyzer_options: Vec<String>,
|
||||
pub analyzer_env: HashMap<String, String>,
|
||||
|
||||
pub target_exe: PathBuf,
|
||||
pub target_options: Vec<String>,
|
||||
pub input_queue: Option<Url>,
|
||||
pub crashes: Option<SyncedDir>,
|
||||
|
||||
pub analysis: SyncedDir,
|
||||
pub tools: SyncedDir,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
pub async fn spawn(config: Config) -> Result<()> {
|
||||
let tmp_dir = PathBuf::from(format!("./{}/tmp", config.common.task_id));
|
||||
let tmp = OwnedDir::new(tmp_dir);
|
||||
tmp.reset().await?;
|
||||
|
||||
utils::init_dir(&config.analysis.path).await?;
|
||||
utils::init_dir(&config.tools.path).await?;
|
||||
utils::sync_remote_dir(&config.tools, utils::SyncOperation::Pull).await?;
|
||||
set_executable(&config.tools.path).await?;
|
||||
run_existing(&config).await?;
|
||||
poll_inputs(&config, tmp).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_existing(config: &Config) -> Result<()> {
|
||||
if let Some(crashes) = &config.crashes {
|
||||
utils::init_dir(&crashes.path).await?;
|
||||
utils::sync_remote_dir(&crashes, utils::SyncOperation::Pull).await?;
|
||||
|
||||
let mut read_dir = fs::read_dir(&crashes.path).await?;
|
||||
while let Some(file) = read_dir.next().await {
|
||||
verbose!("Processing file {:?}", file);
|
||||
let file = file?;
|
||||
run_tool(file.path(), &config).await?;
|
||||
}
|
||||
utils::sync_remote_dir(&config.analysis, utils::SyncOperation::Push).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn already_checked(config: &Config, input: &BlobUrl) -> Result<bool> {
|
||||
let result = if let Some(crashes) = &config.crashes {
|
||||
crashes.url.account() == input.account()
|
||||
&& crashes.url.container() == input.container()
|
||||
&& crashes.path.join(input.name()).exists()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn poll_inputs(config: &Config, tmp_dir: OwnedDir) -> Result<()> {
|
||||
let heartbeat = config.common.init_heartbeat();
|
||||
if let Some(queue) = &config.input_queue {
|
||||
let mut input_queue = QueueClient::new(queue.clone());
|
||||
|
||||
loop {
|
||||
heartbeat.alive();
|
||||
if let Some(message) = input_queue.pop().await? {
|
||||
let input_url = match BlobUrl::parse(str::from_utf8(message.data())?) {
|
||||
Ok(url) => url,
|
||||
Err(err) => {
|
||||
error!("could not parse input URL from queue message: {}", err);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
if !already_checked(&config, &input_url).await? {
|
||||
let file_name = input_url.name();
|
||||
let mut destination_path = PathBuf::from(tmp_dir.path());
|
||||
destination_path.push(file_name);
|
||||
az_copy::copy(input_url.url().as_ref(), &destination_path, false).await?;
|
||||
|
||||
run_tool(destination_path, &config).await?;
|
||||
utils::sync_remote_dir(&config.analysis, utils::SyncOperation::Push).await?;
|
||||
}
|
||||
input_queue.delete(message).await?;
|
||||
} else {
|
||||
warn!("no new candidate inputs found, sleeping");
|
||||
tokio::time::delay_for(EMPTY_QUEUE_DELAY).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run_tool(input: impl AsRef<Path>, config: &Config) -> Result<()> {
|
||||
let mut tool_args = Expand::new();
|
||||
|
||||
tool_args
|
||||
.input(&input)
|
||||
.target_exe(&config.target_exe)
|
||||
.target_options(&config.target_options)
|
||||
.analyzer_exe(&config.analyzer_exe)
|
||||
.analyzer_options(&config.analyzer_options)
|
||||
.output_dir(&config.analysis.path);
|
||||
|
||||
let analyzer_path = Expand::new()
|
||||
.tools_dir(&config.tools.path)
|
||||
.evaluate_value(&config.analyzer_exe)?;
|
||||
|
||||
let mut cmd = Command::new(analyzer_path);
|
||||
cmd.kill_on_drop(true).env_remove("RUST_LOG");
|
||||
|
||||
for arg in tool_args.evaluate(&config.analyzer_options)? {
|
||||
cmd.arg(arg);
|
||||
}
|
||||
|
||||
for (k, v) in &config.analyzer_env {
|
||||
cmd.env(k, tool_args.evaluate_value(v)?);
|
||||
}
|
||||
|
||||
cmd.output().await?;
|
||||
Ok(())
|
||||
}
|
4
src/agent/onefuzz-agent/src/tasks/analysis/mod.rs
Normal file
4
src/agent/onefuzz-agent/src/tasks/analysis/mod.rs
Normal file
@ -0,0 +1,4 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
pub mod generic;
|
150
src/agent/onefuzz-agent/src/tasks/config.rs
Normal file
150
src/agent/onefuzz-agent/src/tasks/config.rs
Normal file
@ -0,0 +1,150 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::large_enum_variant)]
|
||||
use crate::tasks::{analysis, coverage, fuzz, heartbeat::*, merge, report};
|
||||
use anyhow::Result;
|
||||
use onefuzz::{
|
||||
blob::BlobContainerUrl,
|
||||
machine_id::get_machine_id,
|
||||
telemetry::{self, Event::task_start, EventData},
|
||||
};
|
||||
use reqwest::Url;
|
||||
use serde::{self, Deserialize};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq, Clone)]
|
||||
pub enum ContainerType {
|
||||
#[serde(alias = "inputs")]
|
||||
Inputs,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct CommonConfig {
|
||||
pub job_id: Uuid,
|
||||
|
||||
pub task_id: Uuid,
|
||||
|
||||
pub instrumentation_key: Option<Uuid>,
|
||||
|
||||
pub heartbeat_queue: Option<Url>,
|
||||
|
||||
pub telemetry_key: Option<Uuid>,
|
||||
}
|
||||
|
||||
impl CommonConfig {
|
||||
pub fn init_heartbeat(&self) -> Option<HeartbeatClient> {
|
||||
self.heartbeat_queue
|
||||
.clone()
|
||||
.map(|url| HeartbeatClient::init(url, self.task_id))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(tag = "task_type")]
|
||||
pub enum Config {
|
||||
#[serde(alias = "libfuzzer_fuzz")]
|
||||
LibFuzzerFuzz(fuzz::libfuzzer_fuzz::Config),
|
||||
|
||||
#[serde(alias = "libfuzzer_crash_report")]
|
||||
LibFuzzerReport(report::libfuzzer_report::Config),
|
||||
|
||||
#[serde(alias = "libfuzzer_merge")]
|
||||
LibFuzzerMerge(merge::libfuzzer_merge::Config),
|
||||
|
||||
#[serde(alias = "libfuzzer_coverage")]
|
||||
LibFuzzerCoverage(coverage::libfuzzer_coverage::Config),
|
||||
|
||||
#[serde(alias = "generic_analysis")]
|
||||
GenericAnalysis(analysis::generic::Config),
|
||||
|
||||
#[serde(alias = "generic_generator")]
|
||||
GenericGenerator(fuzz::generator::GeneratorConfig),
|
||||
|
||||
#[serde(alias = "generic_supervisor")]
|
||||
GenericSupervisor(fuzz::supervisor::SupervisorConfig),
|
||||
|
||||
#[serde(alias = "generic_merge")]
|
||||
GenericMerge(merge::generic::Config),
|
||||
|
||||
#[serde(alias = "generic_crash_report")]
|
||||
GenericReport(report::generic::Config),
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_file(path: impl AsRef<Path>) -> Result<Self> {
|
||||
let json = std::fs::read_to_string(path)?;
|
||||
Ok(serde_json::from_str(&json)?)
|
||||
}
|
||||
|
||||
pub fn common(&self) -> &CommonConfig {
|
||||
match self {
|
||||
Config::LibFuzzerFuzz(c) => &c.common,
|
||||
Config::LibFuzzerMerge(c) => &c.common,
|
||||
Config::LibFuzzerReport(c) => &c.common,
|
||||
Config::LibFuzzerCoverage(c) => &c.common,
|
||||
Config::GenericAnalysis(c) => &c.common,
|
||||
Config::GenericMerge(c) => &c.common,
|
||||
Config::GenericReport(c) => &c.common,
|
||||
Config::GenericSupervisor(c) => &c.common,
|
||||
Config::GenericGenerator(c) => &c.common,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn report_event(&self) {
|
||||
let event_type = match self {
|
||||
Config::LibFuzzerFuzz(_) => "libfuzzer_fuzz",
|
||||
Config::LibFuzzerMerge(_) => "libfuzzer_merge",
|
||||
Config::LibFuzzerReport(_) => "libfuzzer_crash_report",
|
||||
Config::LibFuzzerCoverage(_) => "libfuzzer_coverage",
|
||||
Config::GenericAnalysis(_) => "generic_analysis",
|
||||
Config::GenericMerge(_) => "generic_merge",
|
||||
Config::GenericReport(_) => "generic_crash_report",
|
||||
Config::GenericSupervisor(_) => "generic_supervisor",
|
||||
Config::GenericGenerator(_) => "generic_generator",
|
||||
};
|
||||
|
||||
event!(task_start; EventData::Type = event_type);
|
||||
}
|
||||
|
||||
pub async fn run(self) -> Result<()> {
|
||||
telemetry::set_property(EventData::JobId(self.common().job_id));
|
||||
telemetry::set_property(EventData::TaskId(self.common().task_id));
|
||||
telemetry::set_property(EventData::MachineId(get_machine_id().await?));
|
||||
|
||||
info!("agent ready, dispatching task");
|
||||
self.report_event();
|
||||
|
||||
match self {
|
||||
Config::LibFuzzerFuzz(config) => {
|
||||
fuzz::libfuzzer_fuzz::LibFuzzerFuzzTask::new(config)?
|
||||
.start()
|
||||
.await
|
||||
}
|
||||
Config::LibFuzzerReport(config) => {
|
||||
report::libfuzzer_report::ReportTask::new(config)
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
Config::LibFuzzerCoverage(config) => {
|
||||
coverage::libfuzzer_coverage::CoverageTask::new(Arc::new(config))
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
Config::LibFuzzerMerge(config) => merge::libfuzzer_merge::spawn(Arc::new(config)).await,
|
||||
Config::GenericAnalysis(config) => analysis::generic::spawn(config).await,
|
||||
Config::GenericGenerator(config) => fuzz::generator::spawn(Arc::new(config)).await,
|
||||
Config::GenericSupervisor(config) => fuzz::supervisor::spawn(config).await,
|
||||
Config::GenericMerge(config) => merge::generic::spawn(Arc::new(config)).await,
|
||||
Config::GenericReport(config) => report::generic::ReportTask::new(&config).run().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, PartialEq)]
|
||||
pub struct SyncedDir {
|
||||
pub path: PathBuf,
|
||||
pub url: BlobContainerUrl,
|
||||
}
|
252
src/agent/onefuzz-agent/src/tasks/coverage/libfuzzer_coverage.rs
Normal file
252
src/agent/onefuzz-agent/src/tasks/coverage/libfuzzer_coverage.rs
Normal file
@ -0,0 +1,252 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
//! # Coverage Task
|
||||
//!
|
||||
//! Computes a streaming coverage metric using Sancov-instrumented libFuzzers.
|
||||
//! Reports the latest coverage rate via telemetry events and updates a remote
|
||||
//! total coverage map in blob storage.
|
||||
//!
|
||||
//! ## Instrumentation
|
||||
//!
|
||||
//! Assumes the libFuzzer is instrumented with Sancov inline 8-bit counters.
|
||||
//! This feature updates a global table without any PC callback. The coverage
|
||||
//! scripts find and dump this table after executing the test input. For now,
|
||||
//! our metric projects the counter value to a single bit, treating each table
|
||||
//! entry as a flag rather than a counter.
|
||||
//!
|
||||
//! ## Dependencies
|
||||
//!
|
||||
//! This task invokes OS-specific debugger scripts to dump the coverage for
|
||||
//! each input. To do this, the following must be in the `$PATH`:
|
||||
//!
|
||||
//! ### Linux
|
||||
//! - `python3` (3.6)
|
||||
//! - `gdb` (8.1)
|
||||
//!
|
||||
//! ### Windows
|
||||
//! - `powershell.exe` (5.1)
|
||||
//! - `cdb.exe` (10.0)
|
||||
//!
|
||||
//! Versions in parentheses have been tested.
|
||||
|
||||
use crate::tasks::config::SyncedDir;
|
||||
use crate::tasks::coverage::{recorder::CoverageRecorder, total::TotalCoverage};
|
||||
use crate::tasks::heartbeat::*;
|
||||
use crate::tasks::utils::{init_dir, sync_remote_dir, SyncOperation};
|
||||
use crate::tasks::{config::CommonConfig, generic::input_poller::*};
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use futures::stream::StreamExt;
|
||||
use onefuzz::fs::list_files;
|
||||
use onefuzz::telemetry::Event::coverage_data;
|
||||
use onefuzz::telemetry::EventData;
|
||||
use reqwest::Url;
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
use std::{
|
||||
ffi::OsString,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use storage_queue::Message;
|
||||
use tokio::fs;
|
||||
|
||||
const TOTAL_COVERAGE: &str = "total.cov";
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub target_exe: PathBuf,
|
||||
pub target_env: HashMap<String, String>,
|
||||
pub target_options: Vec<String>,
|
||||
pub input_queue: Option<Url>,
|
||||
pub readonly_inputs: Vec<SyncedDir>,
|
||||
pub coverage: SyncedDir,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
/// Compute the coverage provided by one or both of:
|
||||
///
|
||||
/// 1. A list of seed corpus containers (one-time batch mode)
|
||||
/// 2. A queue of inputs pending coverage analysis (streaming)
|
||||
///
|
||||
/// If `seed_containers` is empty and `input_queue` is absent, this task
|
||||
/// will do nothing. If `input_queue` is present, then this task will poll
|
||||
/// forever.
|
||||
pub struct CoverageTask {
|
||||
config: Arc<Config>,
|
||||
poller: InputPoller<Message>,
|
||||
}
|
||||
|
||||
impl CoverageTask {
|
||||
pub fn new(config: impl Into<Arc<Config>>) -> Self {
|
||||
let config = config.into();
|
||||
|
||||
let task_dir = PathBuf::from(config.common.task_id.to_string());
|
||||
let poller_dir = task_dir.join("poller");
|
||||
let poller = InputPoller::<Message>::new(poller_dir);
|
||||
|
||||
Self { config, poller }
|
||||
}
|
||||
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
info!("starting libFuzzer coverage task");
|
||||
|
||||
init_dir(&self.config.coverage.path).await?;
|
||||
verbose!(
|
||||
"initialized coverage dir, path = {}",
|
||||
self.config.coverage.path.display()
|
||||
);
|
||||
|
||||
sync_remote_dir(&self.config.coverage, SyncOperation::Pull).await?;
|
||||
verbose!(
|
||||
"synced coverage dir, path = {}",
|
||||
self.config.coverage.path.display()
|
||||
);
|
||||
|
||||
self.process().await
|
||||
}
|
||||
|
||||
async fn process(&mut self) -> Result<()> {
|
||||
let mut processor = CoverageProcessor::new(self.config.clone()).await?;
|
||||
|
||||
// Update the total with the coverage from each seed corpus.
|
||||
for dir in &self.config.readonly_inputs {
|
||||
verbose!("recording coverage for {}", dir.path.display());
|
||||
init_dir(&dir.path).await?;
|
||||
sync_remote_dir(&dir, SyncOperation::Pull).await?;
|
||||
self.record_corpus_coverage(&mut processor, dir).await?;
|
||||
fs::remove_dir_all(&dir.path).await?;
|
||||
sync_remote_dir(&self.config.coverage, SyncOperation::Push).await?;
|
||||
}
|
||||
|
||||
info!(
|
||||
"recorded coverage for {} containers in `readonly_inputs`",
|
||||
self.config.readonly_inputs.len(),
|
||||
);
|
||||
|
||||
// If a queue has been provided, poll it for new coverage.
|
||||
if let Some(queue) = &self.config.input_queue {
|
||||
verbose!("polling queue for new coverage");
|
||||
let callback = CallbackImpl::new(queue.clone(), processor);
|
||||
self.poller.run(callback).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn record_corpus_coverage(
|
||||
&self,
|
||||
processor: &mut CoverageProcessor,
|
||||
corpus_dir: &SyncedDir,
|
||||
) -> Result<()> {
|
||||
let mut corpus = fs::read_dir(&corpus_dir.path).await?;
|
||||
|
||||
while let Some(input) = corpus.next().await {
|
||||
let input = match input {
|
||||
Ok(input) => input,
|
||||
Err(err) => {
|
||||
error!("{}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
processor.test_input(&input.path()).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CoverageProcessor {
|
||||
config: Arc<Config>,
|
||||
pub recorder: CoverageRecorder,
|
||||
pub total: TotalCoverage,
|
||||
pub module_totals: HashMap<OsString, TotalCoverage>,
|
||||
heartbeat_client: Option<HeartbeatClient>,
|
||||
}
|
||||
|
||||
impl CoverageProcessor {
|
||||
pub async fn new(config: Arc<Config>) -> Result<Self> {
|
||||
let heartbeat_client = config.common.init_heartbeat();
|
||||
let total = TotalCoverage::new(config.coverage.path.join(TOTAL_COVERAGE));
|
||||
let recorder = CoverageRecorder::new(config.clone());
|
||||
let module_totals = HashMap::default();
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
recorder,
|
||||
total,
|
||||
module_totals,
|
||||
heartbeat_client,
|
||||
})
|
||||
}
|
||||
|
||||
async fn update_module_total(&mut self, file: &Path, data: &[u8]) -> Result<()> {
|
||||
let module = file
|
||||
.file_name()
|
||||
.ok_or_else(|| format_err!("module must have filename"))?
|
||||
.to_os_string();
|
||||
|
||||
verbose!("updating module info {:?}", module);
|
||||
|
||||
if !self.module_totals.contains_key(&module) {
|
||||
let parent = &self.config.coverage.path.join("by-module");
|
||||
fs::create_dir_all(parent).await?;
|
||||
let module_total = parent.join(&module);
|
||||
let total = TotalCoverage::new(module_total);
|
||||
self.module_totals.insert(module.clone(), total);
|
||||
}
|
||||
|
||||
self.module_totals[&module].update_bytes(data).await?;
|
||||
|
||||
verbose!("updated {:?}", module);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn collect_by_module(&mut self, path: &Path) -> Result<PathBuf> {
|
||||
let files = list_files(&path).await?;
|
||||
let mut sum = Vec::new();
|
||||
|
||||
for file in &files {
|
||||
verbose!("checking {:?}", file);
|
||||
let mut content = fs::read(file).await?;
|
||||
self.update_module_total(file, &content).await?;
|
||||
sum.append(&mut content);
|
||||
}
|
||||
|
||||
let mut combined = path.as_os_str().to_owned();
|
||||
combined.push(".cov");
|
||||
|
||||
fs::write(&combined, sum).await?;
|
||||
|
||||
Ok(combined.into())
|
||||
}
|
||||
|
||||
pub async fn test_input(&mut self, input: &Path) -> Result<()> {
|
||||
info!("processing input {:?}", input);
|
||||
let new_coverage = self.recorder.record(input).await?;
|
||||
let combined = self.collect_by_module(&new_coverage).await?;
|
||||
self.total.update(&combined).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn report_total(&self) -> Result<()> {
|
||||
let info = self.total.info().await?;
|
||||
event!(coverage_data; EventData::Covered = info.covered, EventData::Features = info.features, EventData::Rate = info.rate);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Processor for CoverageProcessor {
|
||||
async fn process(&mut self, _url: Url, input: &Path) -> Result<()> {
|
||||
self.heartbeat_client.alive();
|
||||
self.test_input(input).await?;
|
||||
self.report_total().await?;
|
||||
sync_remote_dir(&self.config.coverage, SyncOperation::Push).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
6
src/agent/onefuzz-agent/src/tasks/coverage/mod.rs
Normal file
6
src/agent/onefuzz-agent/src/tasks/coverage/mod.rs
Normal file
@ -0,0 +1,6 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
pub mod libfuzzer_coverage;
|
||||
pub mod recorder;
|
||||
pub mod total;
|
155
src/agent/onefuzz-agent/src/tasks/coverage/recorder.rs
Normal file
155
src/agent/onefuzz-agent/src/tasks/coverage/recorder.rs
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use std::{
|
||||
env,
|
||||
path::{Path, PathBuf},
|
||||
process::Stdio,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use onefuzz::{
|
||||
fs::{has_files, OwnedDir},
|
||||
sha256::digest_file,
|
||||
};
|
||||
use tokio::{
|
||||
fs,
|
||||
process::{Child, Command},
|
||||
};
|
||||
|
||||
use crate::tasks::coverage::libfuzzer_coverage::Config;
|
||||
|
||||
pub struct CoverageRecorder {
|
||||
config: Arc<Config>,
|
||||
script_dir: OwnedDir,
|
||||
}
|
||||
|
||||
impl CoverageRecorder {
|
||||
pub fn new(config: Arc<Config>) -> Self {
|
||||
let script_dir =
|
||||
OwnedDir::new(env::var("ONEFUZZ_TOOLS").unwrap_or_else(|_| "script".to_string()));
|
||||
|
||||
Self { config, script_dir }
|
||||
}
|
||||
|
||||
/// Invoke a script to write coverage to a file.
|
||||
///
|
||||
/// Per module coverage is written to:
|
||||
/// coverage/inputs/<SHA256_OF_INPUT>/<module_name>.cov
|
||||
///
|
||||
/// The `.cov` file is a binary dump of the 8-bit PC counter table.
|
||||
pub async fn record(&mut self, test_input: impl AsRef<Path>) -> Result<PathBuf> {
|
||||
let test_input = test_input.as_ref();
|
||||
|
||||
let coverage_path = {
|
||||
let digest = digest_file(test_input).await?;
|
||||
self.config.coverage.path.join("inputs").join(digest)
|
||||
};
|
||||
|
||||
fs::create_dir_all(&coverage_path).await?;
|
||||
|
||||
let script = self.invoke_debugger_script(test_input, &coverage_path)?;
|
||||
let output = script.wait_with_output().await?;
|
||||
|
||||
if !output.status.success() {
|
||||
let err = format_err!("coverage recording failed: {}", output.status);
|
||||
error!("{}", err);
|
||||
error!(
|
||||
"recording stderr: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
error!(
|
||||
"recording stdout: {}",
|
||||
String::from_utf8_lossy(&output.stdout)
|
||||
);
|
||||
|
||||
return Err(err);
|
||||
} else {
|
||||
verbose!(
|
||||
"recording stderr: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
verbose!(
|
||||
"recording stdout: {}",
|
||||
String::from_utf8_lossy(&output.stdout)
|
||||
);
|
||||
}
|
||||
|
||||
if !has_files(&coverage_path).await? {
|
||||
tokio::fs::remove_dir(&coverage_path).await?;
|
||||
bail!("no coverage files for input: {}", test_input.display());
|
||||
}
|
||||
|
||||
Ok(coverage_path)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn invoke_debugger_script(&self, test_input: &Path, output: &Path) -> Result<Child> {
|
||||
let script_path = self
|
||||
.script_dir
|
||||
.path()
|
||||
.join("linux")
|
||||
.join("libfuzzer-coverage")
|
||||
.join("coverage_cmd.py");
|
||||
|
||||
let mut cmd = Command::new("gdb");
|
||||
cmd.arg(&self.config.target_exe)
|
||||
.arg("-nh")
|
||||
.arg("-batch")
|
||||
.arg("-x")
|
||||
.arg(script_path)
|
||||
.arg("-ex")
|
||||
.arg(format!(
|
||||
"coverage {} {} {}",
|
||||
&self.config.target_exe.to_string_lossy(),
|
||||
test_input.to_string_lossy(),
|
||||
output.to_string_lossy(),
|
||||
))
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.kill_on_drop(true);
|
||||
|
||||
for (k, v) in &self.config.target_env {
|
||||
cmd.env(k, v);
|
||||
}
|
||||
|
||||
let child = cmd.spawn()?;
|
||||
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn invoke_debugger_script(&self, test_input: &Path, output: &Path) -> Result<Child> {
|
||||
let script_path = self
|
||||
.script_dir
|
||||
.path()
|
||||
.join("win64")
|
||||
.join("libfuzzer-coverage")
|
||||
.join("DumpCounters.js");
|
||||
|
||||
let cdb_cmd = format!(
|
||||
".scriptload {}; !dumpcounters {:?}; q",
|
||||
script_path.to_string_lossy(),
|
||||
output.to_string_lossy()
|
||||
);
|
||||
|
||||
let mut cmd = Command::new("cdb.exe");
|
||||
|
||||
cmd.arg("-c")
|
||||
.arg(cdb_cmd)
|
||||
.arg(&self.config.target_exe)
|
||||
.arg(test_input)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.kill_on_drop(true);
|
||||
|
||||
for (k, v) in &self.config.target_env {
|
||||
cmd.env(k, v);
|
||||
}
|
||||
|
||||
let child = cmd.spawn()?;
|
||||
|
||||
Ok(child)
|
||||
}
|
||||
}
|
92
src/agent/onefuzz-agent/src/tasks/coverage/total.rs
Normal file
92
src/agent/onefuzz-agent/src/tasks/coverage/total.rs
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Result;
|
||||
use tokio::{fs, io};
|
||||
|
||||
pub struct TotalCoverage {
|
||||
/// Absolute path to the total coverage file.
|
||||
///
|
||||
/// May not yet exist on disk.
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Info {
|
||||
pub covered: u64,
|
||||
pub features: u64,
|
||||
pub rate: f64,
|
||||
}
|
||||
|
||||
impl TotalCoverage {
|
||||
pub fn new(path: PathBuf) -> Self {
|
||||
Self { path }
|
||||
}
|
||||
|
||||
pub async fn data(&self) -> Result<Option<Vec<u8>>> {
|
||||
use io::ErrorKind::NotFound;
|
||||
|
||||
let data = fs::read(&self.path).await;
|
||||
|
||||
if let Err(err) = &data {
|
||||
if err.kind() == NotFound {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(data?))
|
||||
}
|
||||
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
|
||||
pub async fn update_bytes(&self, new_data: &[u8]) -> Result<()> {
|
||||
match self.data().await {
|
||||
Ok(Some(mut total_data)) => {
|
||||
for (i, b) in new_data.iter().enumerate() {
|
||||
if *b > 0 {
|
||||
total_data[i] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
fs::write(self.path(), total_data).await?;
|
||||
}
|
||||
Ok(None) => {
|
||||
// Base case: we don't yet have any total coverage. Promote the
|
||||
// new coverage to being our total coverage.
|
||||
info!("initializing total coverage map {}", self.path().display());
|
||||
fs::write(self.path(), new_data).await?;
|
||||
}
|
||||
Err(err) => {
|
||||
// Couldn't read total for some other reason, so this is a real error.
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update(&self, new: impl AsRef<Path>) -> Result<()> {
|
||||
let new_data = fs::read(new).await?;
|
||||
self.update_bytes(&new_data).await
|
||||
}
|
||||
|
||||
pub async fn info(&self) -> Result<Info> {
|
||||
let data = self
|
||||
.data()
|
||||
.await?
|
||||
.ok_or_else(|| format_err!("coverage file not found"))?;
|
||||
|
||||
let covered = data.iter().filter(|&&c| c > 0).count() as u64;
|
||||
let features = data.len() as u64;
|
||||
let rate = (covered as f64) / (features as f64);
|
||||
Ok(Info {
|
||||
covered,
|
||||
features,
|
||||
rate,
|
||||
})
|
||||
}
|
||||
}
|
239
src/agent/onefuzz-agent/src/tasks/fuzz/generator.rs
Normal file
239
src/agent/onefuzz-agent/src/tasks/fuzz/generator.rs
Normal file
@ -0,0 +1,239 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
heartbeat::*,
|
||||
utils,
|
||||
};
|
||||
use anyhow::{Error, Result};
|
||||
use futures::stream::StreamExt;
|
||||
use onefuzz::{expand::Expand, fs::set_executable, input_tester::Tester, sha256};
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
use std::{
|
||||
ffi::OsString,
|
||||
path::{Path, PathBuf},
|
||||
process::Stdio,
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::{fs, process::Command};
|
||||
|
||||
fn default_bool_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct GeneratorConfig {
|
||||
pub generator_exe: String,
|
||||
pub generator_env: HashMap<String, String>,
|
||||
pub generator_options: Vec<String>,
|
||||
pub readonly_inputs: Vec<SyncedDir>,
|
||||
pub crashes: SyncedDir,
|
||||
pub tools: SyncedDir,
|
||||
|
||||
pub target_exe: PathBuf,
|
||||
pub target_env: HashMap<String, String>,
|
||||
pub target_options: Vec<String>,
|
||||
pub target_timeout: Option<u64>,
|
||||
#[serde(default)]
|
||||
pub check_asan_log: bool,
|
||||
#[serde(default = "default_bool_true")]
|
||||
pub check_debugger: bool,
|
||||
#[serde(default)]
|
||||
pub check_retry_count: u64,
|
||||
pub rename_output: bool,
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
pub async fn spawn(config: Arc<GeneratorConfig>) -> Result<(), Error> {
|
||||
utils::init_dir(&config.crashes.path).await?;
|
||||
utils::init_dir(&config.tools.path).await?;
|
||||
utils::sync_remote_dir(&config.tools, utils::SyncOperation::Pull).await?;
|
||||
set_executable(&config.tools.path).await?;
|
||||
let hb_client = config.common.init_heartbeat();
|
||||
|
||||
for sync_dir in &config.readonly_inputs {
|
||||
utils::init_dir(&sync_dir.path).await?;
|
||||
utils::sync_remote_dir(&sync_dir, utils::SyncOperation::Pull).await?;
|
||||
}
|
||||
|
||||
let resync = resync_corpuses(
|
||||
config.readonly_inputs.clone(),
|
||||
std::time::Duration::from_secs(10),
|
||||
);
|
||||
let crash_dir_monitor = utils::monitor_result_dir(config.crashes.clone());
|
||||
let tester = Tester::new(
|
||||
&config.target_exe,
|
||||
&config.target_options,
|
||||
&config.target_env,
|
||||
&config.target_timeout,
|
||||
config.check_asan_log,
|
||||
config.check_debugger,
|
||||
config.check_retry_count,
|
||||
);
|
||||
let inputs: Vec<_> = config.readonly_inputs.iter().map(|x| &x.path).collect();
|
||||
let fuzzing_monitor = start_fuzzing(&config, inputs, tester, hb_client);
|
||||
futures::try_join!(fuzzing_monitor, resync, crash_dir_monitor)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn generate_input(
|
||||
generator_exe: &str,
|
||||
generator_env: &HashMap<String, String>,
|
||||
generator_options: &[String],
|
||||
tools_dir: impl AsRef<Path>,
|
||||
corpus_dir: impl AsRef<Path>,
|
||||
output_dir: impl AsRef<Path>,
|
||||
) -> Result<()> {
|
||||
let mut expand = Expand::new();
|
||||
expand
|
||||
.generated_inputs(&output_dir)
|
||||
.input_corpus(&corpus_dir)
|
||||
.generator_exe(&generator_exe)
|
||||
.generator_options(&generator_options)
|
||||
.tools_dir(&tools_dir);
|
||||
|
||||
utils::reset_tmp_dir(&output_dir).await?;
|
||||
|
||||
let generator_path = Expand::new()
|
||||
.tools_dir(tools_dir.as_ref())
|
||||
.evaluate_value(generator_exe)?;
|
||||
|
||||
let mut generator = Command::new(&generator_path);
|
||||
generator
|
||||
.kill_on_drop(true)
|
||||
.env_remove("RUST_LOG")
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::piped());
|
||||
|
||||
for arg in expand.evaluate(generator_options)? {
|
||||
generator.arg(arg);
|
||||
}
|
||||
|
||||
for (k, v) in generator_env {
|
||||
generator.env(k, expand.evaluate_value(v)?);
|
||||
}
|
||||
|
||||
info!("Generating test cases with {:?}", generator);
|
||||
let output = generator.spawn()?.wait_with_output().await?;
|
||||
|
||||
info!("Test case generation result {:?}", output);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn start_fuzzing<'a>(
|
||||
config: &GeneratorConfig,
|
||||
corpus_dirs: Vec<impl AsRef<Path>>,
|
||||
tester: Tester<'a>,
|
||||
heartbeat_sender: Option<HeartbeatClient>,
|
||||
) -> Result<()> {
|
||||
let generator_tmp = "generator_tmp";
|
||||
|
||||
info!("Starting generator fuzzing loop");
|
||||
|
||||
loop {
|
||||
heartbeat_sender.alive();
|
||||
|
||||
for corpus_dir in &corpus_dirs {
|
||||
let corpus_dir = corpus_dir.as_ref();
|
||||
|
||||
generate_input(
|
||||
&config.generator_exe,
|
||||
&config.generator_env,
|
||||
&config.generator_options,
|
||||
&config.tools.path,
|
||||
corpus_dir,
|
||||
generator_tmp,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut read_dir = fs::read_dir(generator_tmp).await?;
|
||||
while let Some(file) = read_dir.next().await {
|
||||
verbose!("Processing file {:?}", file);
|
||||
let file = file?;
|
||||
|
||||
let destination_file = if config.rename_output {
|
||||
let hash = sha256::digest_file(file.path()).await?;
|
||||
OsString::from(hash)
|
||||
} else {
|
||||
file.file_name()
|
||||
};
|
||||
|
||||
let destination_file = config.crashes.path.join(destination_file);
|
||||
if tester.is_crash(file.path()).await? {
|
||||
info!("Crash found, path = {}", file.path().display());
|
||||
|
||||
if let Err(err) = fs::rename(file.path(), &destination_file).await {
|
||||
warn!("Unable to move file {:?} : {:?}", file.path(), err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
verbose!(
|
||||
"Tested generated inputs for corpus = {}",
|
||||
corpus_dir.display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn resync_corpuses(dirs: Vec<SyncedDir>, delay: std::time::Duration) -> Result<()> {
|
||||
loop {
|
||||
for sync_dir in &dirs {
|
||||
utils::sync_remote_dir(sync_dir, utils::SyncOperation::Pull)
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
tokio::time::delay_for(delay).await;
|
||||
}
|
||||
}
|
||||
|
||||
mod tests {
|
||||
#[tokio::test]
|
||||
#[cfg(target_os = "linux")]
|
||||
#[ignore]
|
||||
async fn test_radamsa_linux() {
|
||||
use super::*;
|
||||
use std::env;
|
||||
|
||||
let radamsa_path = env::var("ONEFUZZ_TEST_RADAMSA_LINUX").unwrap();
|
||||
let corpus_dir_temp = tempfile::tempdir().unwrap();
|
||||
let corpus_dir = corpus_dir_temp.into_path();
|
||||
let seed_file_name = corpus_dir.clone().join("seed.txt");
|
||||
let radamsa_output_temp = tempfile::tempdir().unwrap();
|
||||
let radamsa_output = radamsa_output_temp.into_path();
|
||||
|
||||
let generator_options: Vec<String> = vec![
|
||||
"-o",
|
||||
"{generated_inputs}/input-%n-%s",
|
||||
"-n",
|
||||
"100",
|
||||
"-r",
|
||||
"{input_corpus}",
|
||||
]
|
||||
.iter()
|
||||
.map(|p| p.to_string())
|
||||
.collect();
|
||||
|
||||
let radamsa_as_path = Path::new(&radamsa_path);
|
||||
let radamsa_dir = radamsa_as_path.parent().unwrap();
|
||||
let radamsa_exe = String::from("{tools_dir}/radamsa");
|
||||
let radamsa_env = HashMap::new();
|
||||
|
||||
tokio::fs::write(seed_file_name, "test").await.unwrap();
|
||||
let _output = generate_input(
|
||||
&radamsa_exe,
|
||||
&radamsa_env,
|
||||
&generator_options,
|
||||
&radamsa_dir,
|
||||
corpus_dir,
|
||||
radamsa_output.clone(),
|
||||
)
|
||||
.await;
|
||||
let generated_outputs = std::fs::read_dir(radamsa_output.clone()).unwrap();
|
||||
assert_eq!(generated_outputs.count(), 100, "No crashes generated");
|
||||
}
|
||||
}
|
375
src/agent/onefuzz-agent/src/tasks/fuzz/libfuzzer_fuzz.rs
Normal file
375
src/agent/onefuzz-agent/src/tasks/fuzz/libfuzzer_fuzz.rs
Normal file
@ -0,0 +1,375 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
heartbeat::HeartbeatSender,
|
||||
utils,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use futures::{future::try_join_all, stream::StreamExt};
|
||||
use onefuzz::{
|
||||
libfuzzer::{LibFuzzer, LibFuzzerLine},
|
||||
monitor::DirectoryMonitor,
|
||||
system,
|
||||
telemetry::{
|
||||
Event::{new_coverage, new_result, process_stats, runtime_stats},
|
||||
EventData,
|
||||
},
|
||||
uploader::BlobUploader,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use std::{collections::HashMap, path::PathBuf, process::ExitStatus};
|
||||
use tokio::{
|
||||
io,
|
||||
sync::mpsc,
|
||||
task,
|
||||
time::{self, Duration},
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
// Time between resync of all corpus container directories.
|
||||
const RESYNC_PERIOD: Duration = Duration::from_secs(30);
|
||||
|
||||
// Delay to allow for observation of CPU usage when reporting proc info.
|
||||
const PROC_INFO_COLLECTION_DELAY: Duration = Duration::from_secs(1);
|
||||
|
||||
// Period of reporting proc info about running processes.
|
||||
const PROC_INFO_PERIOD: Duration = Duration::from_secs(30);
|
||||
|
||||
// Period of reporting fuzzer-generated runtime stats.
|
||||
const RUNTIME_STATS_PERIOD: Duration = Duration::from_secs(60);
|
||||
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct Config {
|
||||
pub inputs: SyncedDir,
|
||||
pub readonly_inputs: Option<Vec<SyncedDir>>,
|
||||
pub crashes: SyncedDir,
|
||||
pub target_exe: PathBuf,
|
||||
pub target_env: HashMap<String, String>,
|
||||
pub target_options: Vec<String>,
|
||||
pub target_workers: Option<u64>,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
pub struct LibFuzzerFuzzTask {
|
||||
config: Config,
|
||||
}
|
||||
|
||||
impl LibFuzzerFuzzTask {
|
||||
pub fn new(config: Config) -> Result<Self> {
|
||||
Ok(Self { config })
|
||||
}
|
||||
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
let workers = self.config.target_workers.unwrap_or_else(|| {
|
||||
let cpus = num_cpus::get() as u64;
|
||||
u64::max(1, cpus - 1)
|
||||
});
|
||||
|
||||
self.init_directories().await?;
|
||||
self.sync_all_corpuses().await?;
|
||||
let hb_client = self.config.common.init_heartbeat();
|
||||
|
||||
// To be scheduled.
|
||||
let resync = self.resync_all_corpuses();
|
||||
let new_corpus = self.monitor_new_corpus();
|
||||
let faults = self.monitor_faults();
|
||||
|
||||
let (stats_sender, stats_receiver) = mpsc::unbounded_channel();
|
||||
let report_stats = report_runtime_stats(workers as usize, stats_receiver, hb_client);
|
||||
|
||||
let fuzzers: Vec<_> = (0..workers)
|
||||
.map(|id| self.start_fuzzer_monitor(id, stats_sender.clone()))
|
||||
.collect();
|
||||
|
||||
let fuzzers = try_join_all(fuzzers);
|
||||
|
||||
futures::try_join!(resync, new_corpus, faults, fuzzers, report_stats)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// The fuzzer monitor coordinates a _series_ of fuzzer runs.
|
||||
//
|
||||
// A run is one session of continuous fuzzing, terminated by a fuzzing error
|
||||
// or discovered fault. The monitor restarts the libFuzzer when it exits.
|
||||
async fn start_fuzzer_monitor(&self, worker_id: u64, stats_sender: StatsSender) -> Result<()> {
|
||||
loop {
|
||||
let run = self.run_fuzzer(worker_id, stats_sender.clone());
|
||||
|
||||
if let Err(err) = run.await {
|
||||
error!("Fuzzer run failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fuzz with a libFuzzer until it exits.
|
||||
//
|
||||
// While it runs, parse stderr for progress metrics, and report them.
|
||||
async fn run_fuzzer(&self, worker_id: u64, stats_sender: StatsSender) -> Result<ExitStatus> {
|
||||
use io::AsyncBufReadExt;
|
||||
|
||||
let run_id = Uuid::new_v4();
|
||||
|
||||
info!("starting fuzzer run, run_id = {}", run_id);
|
||||
|
||||
let inputs: Vec<_> = {
|
||||
if let Some(readonly_inputs) = &self.config.readonly_inputs {
|
||||
readonly_inputs.iter().map(|d| &d.path).collect()
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
};
|
||||
|
||||
let fuzzer = LibFuzzer::new(
|
||||
&self.config.target_exe,
|
||||
&self.config.target_options,
|
||||
&self.config.target_env,
|
||||
);
|
||||
let mut running =
|
||||
fuzzer.fuzz(&self.config.crashes.path, &self.config.inputs.path, &inputs)?;
|
||||
|
||||
let sys_info = task::spawn(report_fuzzer_sys_info(worker_id, run_id, running.id()));
|
||||
|
||||
// Splitting borrow.
|
||||
let stderr = running
|
||||
.stderr
|
||||
.as_mut()
|
||||
.ok_or_else(|| format_err!("stderr not captured"))?;
|
||||
let stderr = io::BufReader::new(stderr);
|
||||
|
||||
stderr
|
||||
.lines()
|
||||
.for_each(|line| {
|
||||
let stats_sender = stats_sender.clone();
|
||||
|
||||
async move {
|
||||
let line = line.map_err(|e| e.into());
|
||||
|
||||
if let Err(err) = try_report_iter_update(stats_sender, worker_id, run_id, line)
|
||||
{
|
||||
error!("could not parse fuzzing iteration update: {}", err);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
let (exit_status, _) = tokio::join!(running, sys_info);
|
||||
|
||||
Ok(exit_status?)
|
||||
}
|
||||
|
||||
async fn init_directories(&self) -> Result<()> {
|
||||
utils::init_dir(&self.config.inputs.path).await?;
|
||||
utils::init_dir(&self.config.crashes.path).await?;
|
||||
if let Some(readonly_inputs) = &self.config.readonly_inputs {
|
||||
for dir in readonly_inputs {
|
||||
utils::init_dir(&dir.path).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn sync_all_corpuses(&self) -> Result<()> {
|
||||
utils::sync_remote_dir(&self.config.inputs, utils::SyncOperation::Pull).await?;
|
||||
|
||||
if let Some(readonly_inputs) = &self.config.readonly_inputs {
|
||||
for corpus in readonly_inputs {
|
||||
utils::sync_remote_dir(corpus, utils::SyncOperation::Pull).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn resync_all_corpuses(&self) -> Result<()> {
|
||||
loop {
|
||||
time::delay_for(RESYNC_PERIOD).await;
|
||||
|
||||
self.sync_all_corpuses().await?;
|
||||
}
|
||||
}
|
||||
|
||||
async fn monitor_new_corpus(&self) -> Result<()> {
|
||||
let url = self.config.inputs.url.url();
|
||||
let dir = self.config.inputs.path.clone();
|
||||
|
||||
let mut monitor = DirectoryMonitor::new(dir);
|
||||
monitor.start()?;
|
||||
|
||||
monitor
|
||||
.for_each(move |item| {
|
||||
let url = url.clone();
|
||||
|
||||
async move {
|
||||
event!(new_coverage; EventData::Path = item.display().to_string());
|
||||
|
||||
let mut uploader = BlobUploader::new(url);
|
||||
|
||||
if let Err(err) = uploader.upload(item.clone()).await {
|
||||
error!("Couldn't upload coverage: {}", err);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn monitor_faults(&self) -> Result<()> {
|
||||
let url = self.config.crashes.url.url();
|
||||
let dir = self.config.crashes.path.clone();
|
||||
|
||||
let mut monitor = DirectoryMonitor::new(dir);
|
||||
monitor.start()?;
|
||||
|
||||
monitor
|
||||
.for_each(move |item| {
|
||||
let url = url.clone();
|
||||
|
||||
async move {
|
||||
event!(new_result; EventData::Path = item.display().to_string());
|
||||
|
||||
let mut uploader = BlobUploader::new(url);
|
||||
|
||||
if let Err(err) = uploader.upload(item.clone()).await {
|
||||
error!("Couldn't upload fault: {}", err);
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn try_report_iter_update(
|
||||
stats_sender: StatsSender,
|
||||
worker_id: u64,
|
||||
run_id: Uuid,
|
||||
line: Result<String>,
|
||||
) -> Result<()> {
|
||||
let line = line?;
|
||||
let line = LibFuzzerLine::parse(line)?;
|
||||
|
||||
if let Some(line) = line {
|
||||
stats_sender.send(RuntimeStats {
|
||||
worker_id,
|
||||
run_id,
|
||||
count: line.iters(),
|
||||
execs_sec: line.execs_sec(),
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn report_fuzzer_sys_info(worker_id: u64, run_id: Uuid, fuzzer_pid: u32) -> Result<()> {
|
||||
loop {
|
||||
system::refresh()?;
|
||||
|
||||
// Allow for sampling CPU usage.
|
||||
time::delay_for(PROC_INFO_COLLECTION_DELAY).await;
|
||||
|
||||
if let Some(proc_info) = system::proc_info(fuzzer_pid)? {
|
||||
event!(process_stats;
|
||||
EventData::WorkerId = worker_id,
|
||||
EventData::RunId = run_id,
|
||||
EventData::Name = proc_info.name,
|
||||
EventData::Pid = proc_info.pid,
|
||||
EventData::ProcessStatus = proc_info.status,
|
||||
EventData::CpuUsage = proc_info.cpu_usage,
|
||||
EventData::PhysicalMemory = proc_info.memory_kb,
|
||||
EventData::VirtualMemory = proc_info.virtual_memory_kb
|
||||
);
|
||||
} else {
|
||||
// The process no longer exists.
|
||||
break;
|
||||
}
|
||||
|
||||
time::delay_for(PROC_INFO_PERIOD).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct RuntimeStats {
|
||||
worker_id: u64,
|
||||
run_id: Uuid,
|
||||
count: u64,
|
||||
execs_sec: f64,
|
||||
}
|
||||
|
||||
impl RuntimeStats {
|
||||
pub fn report(&self) {
|
||||
event!(
|
||||
runtime_stats;
|
||||
EventData::WorkerId = self.worker_id,
|
||||
EventData::RunId = self.run_id,
|
||||
EventData::Count = self.count,
|
||||
EventData::ExecsSecond = self.execs_sec
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
type StatsSender = mpsc::UnboundedSender<RuntimeStats>;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct Timer {
|
||||
interval: Duration,
|
||||
}
|
||||
|
||||
impl Timer {
|
||||
pub fn new(interval: Duration) -> Self {
|
||||
Self { interval }
|
||||
}
|
||||
|
||||
async fn wait(&self) {
|
||||
time::delay_for(self.interval).await;
|
||||
}
|
||||
}
|
||||
|
||||
// Report runtime stats, as delivered via the `stats` channel, with a periodic trigger to
|
||||
// guarantee a minimum reporting frequency.
|
||||
//
|
||||
// The minimum frequency is to aid metric visualization. The libFuzzer binary runtime's `pulse`
|
||||
// event is triggered by a doubling of the last (locally) logged total iteration count. For long-
|
||||
// running worker runs, this can result in misleading gaps and binning artifacts. In effect, we
|
||||
// are approximating nearest-neighbor interpolation on the runtime stats time series.
|
||||
async fn report_runtime_stats(
|
||||
workers: usize,
|
||||
mut stats_channel: mpsc::UnboundedReceiver<RuntimeStats>,
|
||||
heartbeat_sender: impl HeartbeatSender,
|
||||
) -> Result<()> {
|
||||
// Cache the last-reported stats for a given worker.
|
||||
//
|
||||
// When logging stats, the most recently reported runtime stats will be used for any
|
||||
// missing data. For time-triggered logging, it will be used for all workers.
|
||||
let mut last_reported: Vec<Option<RuntimeStats>> =
|
||||
std::iter::repeat(None).take(workers).collect();
|
||||
|
||||
let timer = Timer::new(RUNTIME_STATS_PERIOD);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(stats) = stats_channel.next() => {
|
||||
heartbeat_sender.alive();
|
||||
stats.report();
|
||||
|
||||
let idx = stats.worker_id as usize;
|
||||
last_reported[idx] = Some(stats);
|
||||
}
|
||||
_ = timer.wait() => {
|
||||
for stats in &last_reported {
|
||||
if let Some(stats) = stats {
|
||||
stats.report();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
6
src/agent/onefuzz-agent/src/tasks/fuzz/mod.rs
Normal file
6
src/agent/onefuzz-agent/src/tasks/fuzz/mod.rs
Normal file
@ -0,0 +1,6 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
pub mod generator;
|
||||
pub mod libfuzzer_fuzz;
|
||||
pub mod supervisor;
|
330
src/agent/onefuzz-agent/src/tasks/fuzz/supervisor.rs
Normal file
330
src/agent/onefuzz-agent/src/tasks/fuzz/supervisor.rs
Normal file
@ -0,0 +1,330 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, ContainerType, SyncedDir},
|
||||
heartbeat::*,
|
||||
stats::common::{monitor_stats, StatsFormat},
|
||||
utils::{self, CheckNotify},
|
||||
};
|
||||
use anyhow::{Error, Result};
|
||||
use appinsights::telemetry::SeverityLevel;
|
||||
use onefuzz::{
|
||||
expand::Expand,
|
||||
fs::{has_files, set_executable, OwnedDir},
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
process::Stdio,
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::{
|
||||
process::{Child, Command},
|
||||
sync::Notify,
|
||||
};
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct SupervisorConfig {
|
||||
pub inputs: SyncedDir,
|
||||
pub crashes: SyncedDir,
|
||||
pub supervisor_exe: String,
|
||||
pub supervisor_env: HashMap<String, String>,
|
||||
pub supervisor_options: Vec<String>,
|
||||
pub supervisor_input_marker: Option<String>,
|
||||
pub target_exe: PathBuf,
|
||||
pub target_options: Vec<String>,
|
||||
pub tools: SyncedDir,
|
||||
pub wait_for_files: Option<ContainerType>,
|
||||
pub stats_file: Option<String>,
|
||||
pub stats_format: Option<StatsFormat>,
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
const HEARTBEAT_PERIOD: Duration = Duration::from_secs(60);
|
||||
|
||||
pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
|
||||
let runtime_dir = OwnedDir::new(config.common.task_id.to_string());
|
||||
runtime_dir.create_if_missing().await?;
|
||||
|
||||
utils::init_dir(&config.tools.path).await?;
|
||||
utils::sync_remote_dir(&config.tools, utils::SyncOperation::Pull).await?;
|
||||
set_executable(&config.tools.path).await?;
|
||||
|
||||
let supervisor_path = Expand::new()
|
||||
.tools_dir(&config.tools.path)
|
||||
.evaluate_value(&config.supervisor_exe)?;
|
||||
|
||||
let crashes = SyncedDir {
|
||||
path: runtime_dir.path().join("crashes"),
|
||||
url: config.crashes.url.clone(),
|
||||
};
|
||||
|
||||
utils::init_dir(&crashes.path).await?;
|
||||
let monitor_crashes = utils::monitor_result_dir(crashes.clone());
|
||||
|
||||
let inputs = SyncedDir {
|
||||
path: runtime_dir.path().join("inputs"),
|
||||
url: config.inputs.url.clone(),
|
||||
};
|
||||
utils::init_dir(&inputs.path).await?;
|
||||
verbose!("initialized {}", inputs.path.display());
|
||||
|
||||
let sync_inputs = resync_corpus(inputs.clone());
|
||||
|
||||
if let Some(context) = &config.wait_for_files {
|
||||
let dir = match context {
|
||||
ContainerType::Inputs => &inputs,
|
||||
};
|
||||
|
||||
let delay = std::time::Duration::from_secs(10);
|
||||
loop {
|
||||
utils::sync_remote_dir(dir, utils::SyncOperation::Pull).await?;
|
||||
if has_files(&dir.path).await? {
|
||||
break;
|
||||
}
|
||||
tokio::time::delay_for(delay).await;
|
||||
}
|
||||
}
|
||||
|
||||
let process = start_supervisor(
|
||||
&runtime_dir.path(),
|
||||
&supervisor_path,
|
||||
&config.target_exe,
|
||||
&crashes.path,
|
||||
&inputs.path,
|
||||
&config.target_options,
|
||||
&config.supervisor_options,
|
||||
&config.supervisor_env,
|
||||
&config.supervisor_input_marker,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let stopped = Notify::new();
|
||||
let monitor_process = monitor_process(process, &stopped);
|
||||
let hb = config.common.init_heartbeat();
|
||||
|
||||
let heartbeat_process = heartbeat_process(&stopped, hb);
|
||||
|
||||
let monitor_path = if let Some(stats_file) = &config.stats_file {
|
||||
Some(
|
||||
Expand::new()
|
||||
.runtime_dir(runtime_dir.path())
|
||||
.evaluate_value(stats_file)?,
|
||||
)
|
||||
} else {
|
||||
verbose!("no stats file to monitor");
|
||||
None
|
||||
};
|
||||
|
||||
let monitor_stats = monitor_stats(monitor_path, config.stats_format);
|
||||
|
||||
futures::try_join!(
|
||||
heartbeat_process,
|
||||
monitor_process,
|
||||
monitor_stats,
|
||||
monitor_crashes,
|
||||
sync_inputs,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn heartbeat_process(
|
||||
stopped: &Notify,
|
||||
heartbeat_client: Option<HeartbeatClient>,
|
||||
) -> Result<()> {
|
||||
while !stopped.is_notified(HEARTBEAT_PERIOD).await {
|
||||
heartbeat_client.alive();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn monitor_process(process: tokio::process::Child, stopped: &Notify) -> Result<()> {
|
||||
verbose!("waiting for child output...");
|
||||
let output: std::process::Output = process.wait_with_output().await?;
|
||||
verbose!("child exited with {:?}", output.status);
|
||||
|
||||
if output.status.success() {
|
||||
verbose!("child status is success, notifying");
|
||||
stopped.notify();
|
||||
Ok(())
|
||||
} else {
|
||||
let err_text = String::from_utf8_lossy(&output.stderr);
|
||||
let output_text = String::from_utf8_lossy(&output.stdout);
|
||||
let message = format!("{} {}", err_text, output_text);
|
||||
error!("{}", message);
|
||||
stopped.notify();
|
||||
Err(Error::msg(message))
|
||||
}
|
||||
}
|
||||
|
||||
async fn start_supervisor(
|
||||
runtime_dir: impl AsRef<Path>,
|
||||
supervisor_path: impl AsRef<Path>,
|
||||
target_exe: impl AsRef<Path>,
|
||||
fault_dir: impl AsRef<Path>,
|
||||
inputs_dir: impl AsRef<Path>,
|
||||
target_options: &[String],
|
||||
supervisor_options: &[String],
|
||||
supervisor_env: &HashMap<String, String>,
|
||||
supervisor_input_marker: &Option<String>,
|
||||
) -> Result<Child> {
|
||||
let mut cmd = Command::new(supervisor_path.as_ref());
|
||||
|
||||
let cmd = cmd
|
||||
.kill_on_drop(true)
|
||||
.env_remove("RUST_LOG")
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped());
|
||||
|
||||
let mut expand = Expand::new();
|
||||
expand
|
||||
.supervisor_exe(supervisor_path)
|
||||
.supervisor_options(supervisor_options)
|
||||
.crashes(fault_dir)
|
||||
.runtime_dir(runtime_dir)
|
||||
.target_exe(target_exe)
|
||||
.target_options(target_options)
|
||||
.input_corpus(inputs_dir);
|
||||
|
||||
if let Some(input_marker) = supervisor_input_marker {
|
||||
expand.input(input_marker);
|
||||
}
|
||||
|
||||
let args = expand.evaluate(supervisor_options)?;
|
||||
cmd.args(&args);
|
||||
|
||||
for (k, v) in supervisor_env {
|
||||
cmd.env(k, expand.evaluate_value(v)?);
|
||||
}
|
||||
|
||||
info!("starting supervisor '{:?}'", cmd);
|
||||
let child = cmd.spawn()?;
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
pub async fn resync_corpus(sync_dir: SyncedDir) -> Result<()> {
|
||||
let delay = std::time::Duration::from_secs(10);
|
||||
|
||||
loop {
|
||||
let result = utils::sync_remote_dir(&sync_dir, utils::SyncOperation::Pull).await;
|
||||
|
||||
if result.is_err() {
|
||||
warn!("error syncing dir: {:?}", sync_dir);
|
||||
}
|
||||
|
||||
tokio::time::delay_for(delay).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::tasks::stats::afl::read_stats;
|
||||
use onefuzz::telemetry::EventData;
|
||||
use std::collections::HashMap;
|
||||
use std::time::Instant;
|
||||
|
||||
const MAX_FUZZ_TIME_SECONDS: u64 = 120;
|
||||
|
||||
async fn has_stats(path: &PathBuf) -> bool {
|
||||
if let Ok(stats) = read_stats(path).await {
|
||||
for entry in stats {
|
||||
if matches!(entry, EventData::ExecsSecond(x) if x > 0.0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[cfg(target_os = "linux")]
|
||||
#[cfg_attr(not(feature = "integration_test"), ignore)]
|
||||
async fn test_fuzzer_linux() {
|
||||
use std::env;
|
||||
|
||||
let runtime_dir = tempfile::tempdir().unwrap();
|
||||
let afl_fuzz_exe = if let Ok(x) = env::var("ONEFUZZ_TEST_AFL_LINUX_FUZZER") {
|
||||
x
|
||||
} else {
|
||||
warn!("Unable to test AFL integration");
|
||||
return;
|
||||
};
|
||||
|
||||
let afl_test_binary = if let Ok(x) = env::var("ONEFUZZ_TEST_AFL_LINUX_TEST_BINARY") {
|
||||
x
|
||||
} else {
|
||||
warn!("Unable to test AFL integration");
|
||||
return;
|
||||
};
|
||||
|
||||
let fault_dir_temp = tempfile::tempdir().unwrap();
|
||||
let fault_dir = fault_dir_temp.path();
|
||||
let corpus_dir_temp = tempfile::tempdir().unwrap();
|
||||
let corpus_dir = corpus_dir_temp.into_path();
|
||||
let seed_file_name = corpus_dir.clone().join("seed.txt");
|
||||
let target_options = vec!["{input}".to_owned()];
|
||||
let supervisor_env = HashMap::new();
|
||||
let supervisor_options: Vec<_> = vec![
|
||||
"-d",
|
||||
"-i",
|
||||
"{input_corpus}",
|
||||
"-o",
|
||||
"{crashes}",
|
||||
"--",
|
||||
"{target_exe}",
|
||||
"{target_options}",
|
||||
]
|
||||
.iter()
|
||||
.map(|p| p.to_string())
|
||||
.collect();
|
||||
|
||||
// AFL input marker
|
||||
let supervisor_input_marker = Some("@@".to_owned());
|
||||
|
||||
println!(
|
||||
"testing 2: corpus_dir {:?} -- fault_dir {:?} -- seed_file_name {:?}",
|
||||
corpus_dir, fault_dir, seed_file_name
|
||||
);
|
||||
|
||||
tokio::fs::write(seed_file_name, "xyz").await.unwrap();
|
||||
let process = start_supervisor(
|
||||
runtime_dir,
|
||||
PathBuf::from(afl_fuzz_exe),
|
||||
PathBuf::from(afl_test_binary),
|
||||
fault_dir.clone(),
|
||||
corpus_dir,
|
||||
&target_options,
|
||||
&supervisor_options,
|
||||
&supervisor_env,
|
||||
&supervisor_input_marker,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let notify = Notify::new();
|
||||
let _fuzzing_monitor = monitor_process(process, ¬ify);
|
||||
let stat_output = fault_dir.join("fuzzer_stats");
|
||||
let start = Instant::now();
|
||||
loop {
|
||||
if has_stats(&stat_output).await {
|
||||
break;
|
||||
}
|
||||
|
||||
if start.elapsed().as_secs() > MAX_FUZZ_TIME_SECONDS {
|
||||
panic!(
|
||||
"afl did not generate stats in {} seconds",
|
||||
MAX_FUZZ_TIME_SECONDS
|
||||
);
|
||||
}
|
||||
tokio::time::delay_for(std::time::Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
293
src/agent/onefuzz-agent/src/tasks/generic/input_poller.rs
Normal file
293
src/agent/onefuzz-agent/src/tasks/generic/input_poller.rs
Normal file
@ -0,0 +1,293 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use std::{fmt, path::PathBuf};
|
||||
|
||||
use anyhow::Result;
|
||||
use futures::stream::StreamExt;
|
||||
use onefuzz::blob::BlobUrl;
|
||||
use onefuzz::fs::OwnedDir;
|
||||
use reqwest::Url;
|
||||
use tokio::{
|
||||
fs,
|
||||
time::{self, Duration},
|
||||
};
|
||||
|
||||
use crate::tasks::{config::SyncedDir, utils};
|
||||
|
||||
mod callback;
|
||||
pub use callback::*;
|
||||
|
||||
const POLL_INTERVAL: Duration = Duration::from_secs(10);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub enum State<M> {
|
||||
Ready,
|
||||
Polled(Option<M>),
|
||||
Parsed(M, Url),
|
||||
Downloaded(M, Url, PathBuf),
|
||||
Processed(M),
|
||||
}
|
||||
|
||||
impl<M> fmt::Display for State<M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
State::Ready => write!(f, "Ready")?,
|
||||
State::Polled(..) => write!(f, "Polled")?,
|
||||
State::Parsed(..) => write!(f, "Parsed")?,
|
||||
State::Downloaded(..) => write!(f, "Downloaded")?,
|
||||
State::Processed(..) => write!(f, "Processed")?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Event<'a, M> {
|
||||
Poll(&'a mut dyn Queue<M>),
|
||||
Parse(&'a mut dyn Parser<M>),
|
||||
Download(&'a mut dyn Downloader),
|
||||
Process(&'a mut dyn Processor),
|
||||
Finish(&'a mut dyn Queue<M>),
|
||||
}
|
||||
|
||||
impl<'a, M> fmt::Display for Event<'a, M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Event::Poll(..) => write!(f, "Poll")?,
|
||||
Event::Parse(..) => write!(f, "Parse")?,
|
||||
Event::Download(..) => write!(f, "Download")?,
|
||||
Event::Process(..) => write!(f, "Process")?,
|
||||
Event::Finish(..) => write!(f, "Finish")?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, M> fmt::Debug for Event<'a, M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self)
|
||||
}
|
||||
}
|
||||
|
||||
/// State machine that tries to poll a queue for new messages, parse a test
|
||||
/// input URL from each message, download the test input, then process it.
|
||||
///
|
||||
/// The implementation of the transition actions are provided by impls of
|
||||
/// callback traits.
|
||||
///
|
||||
/// Generic in the type `M` of the queue message. We assume `M` carries both
|
||||
/// application data (here, the input URL, in some encoding) and metadata for
|
||||
/// operations like finalizing a dequeue with a pop receipt.
|
||||
pub struct InputPoller<M> {
|
||||
/// Agent-local directory where the poller will download inputs.
|
||||
/// Will be reset for each new input.
|
||||
working_dir: OwnedDir,
|
||||
|
||||
/// Internal automaton state.
|
||||
///
|
||||
/// This is only nullable so we can internally `take()` the current state
|
||||
/// when scrutinizing it in the `trigger()` method.
|
||||
state: Option<State<M>>,
|
||||
|
||||
batch_dir: Option<SyncedDir>,
|
||||
}
|
||||
|
||||
impl<M> InputPoller<M> {
|
||||
pub fn new(working_dir: impl Into<PathBuf>) -> Self {
|
||||
let working_dir = OwnedDir::new(working_dir);
|
||||
let state = Some(State::Ready);
|
||||
Self {
|
||||
state,
|
||||
working_dir,
|
||||
batch_dir: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a given SyncedDir in batch
|
||||
pub async fn batch_process(
|
||||
&mut self,
|
||||
processor: &mut dyn Processor,
|
||||
to_process: &SyncedDir,
|
||||
) -> Result<()> {
|
||||
self.batch_dir = Some(to_process.clone());
|
||||
utils::init_dir(&to_process.path).await?;
|
||||
utils::sync_remote_dir(&to_process, utils::SyncOperation::Pull).await?;
|
||||
let mut read_dir = fs::read_dir(&to_process.path).await?;
|
||||
while let Some(file) = read_dir.next().await {
|
||||
verbose!("Processing batch-downloaded input {:?}", file);
|
||||
|
||||
let file = file?;
|
||||
let path = file.path();
|
||||
|
||||
// Compute the file name relative to the synced directory, and thus the
|
||||
// container.
|
||||
let blob_name = {
|
||||
let dir_path = to_process.path.canonicalize()?;
|
||||
let input_path = path.canonicalize()?;
|
||||
let dir_relative = input_path.strip_prefix(&dir_path)?;
|
||||
dir_relative.display().to_string()
|
||||
};
|
||||
let url = to_process.url.blob(blob_name).url();
|
||||
|
||||
processor.process(url, &path).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if an input was already processed via batch-processing its container.
|
||||
pub async fn seen_in_batch(&self, url: &Url) -> Result<bool> {
|
||||
let result = if let Some(batch_dir) = &self.batch_dir {
|
||||
if let Ok(blob) = BlobUrl::new(url.clone()) {
|
||||
batch_dir.url.account() == blob.account()
|
||||
&& batch_dir.url.container() == blob.container()
|
||||
&& batch_dir.path.join(blob.name()).exists()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Path to the working directory.
|
||||
///
|
||||
/// We will create or reset the working directory before entering the
|
||||
/// `Downloaded` state, but a caller cannot otherwise assume it exists.
|
||||
#[allow(unused)]
|
||||
pub fn working_dir(&self) -> &OwnedDir {
|
||||
&self.working_dir
|
||||
}
|
||||
|
||||
/// Get the current automaton state, including the state data.
|
||||
pub fn state(&self) -> &State<M> {
|
||||
self.state.as_ref().unwrap_or_else(|| unreachable!())
|
||||
}
|
||||
|
||||
fn set_state(&mut self, state: impl Into<Option<State<M>>>) {
|
||||
self.state = state.into();
|
||||
}
|
||||
|
||||
pub async fn run(&mut self, mut cb: impl Callback<M>) -> Result<()> {
|
||||
loop {
|
||||
match self.state() {
|
||||
State::Polled(None) => {
|
||||
verbose!("Input queue empty, sleeping");
|
||||
time::delay_for(POLL_INTERVAL).await;
|
||||
}
|
||||
State::Downloaded(_msg, _url, input) => {
|
||||
info!("Processing downloaded input: {:?}", input);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
self.next(&mut cb).await?;
|
||||
}
|
||||
}
|
||||
|
||||
/// Transition to the next state in the poll loop, using `cb` to implement
|
||||
/// the transition actions.
|
||||
pub async fn next(&mut self, cb: &mut impl Callback<M>) -> Result<()> {
|
||||
use Event::*;
|
||||
use State::*;
|
||||
|
||||
match self.state() {
|
||||
Ready => self.trigger(Poll(cb.queue())).await?,
|
||||
Polled(..) => self.trigger(Parse(cb.parser())).await?,
|
||||
Parsed(..) => self.trigger(Download(cb.downloader())).await?,
|
||||
Downloaded(..) => self.trigger(Process(cb.processor())).await?,
|
||||
Processed(..) => self.trigger(Finish(cb.queue())).await?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Trigger a state transition event, and execute the action for each valid
|
||||
/// transition.
|
||||
///
|
||||
/// The `Event` itself contains any callback functions and data needed to
|
||||
/// concretely implement the transition action.
|
||||
pub async fn trigger(&mut self, event: Event<'_, M>) -> Result<()> {
|
||||
// Take ownership of the current state so we can move its data out
|
||||
// of the variant.
|
||||
//
|
||||
// Invariant: `self.state.is_some()` on function entry.
|
||||
//
|
||||
// This local now repesents the current state, and we must not call
|
||||
// any other method on `self` that assumes `self.state.is_some()`.
|
||||
let state = self.state.take().unwrap();
|
||||
|
||||
let result = self.try_trigger(state, event).await;
|
||||
|
||||
if result.is_err() {
|
||||
// We must maintain a valid state, and we can logically recover from
|
||||
// any failed action or invalid transition.
|
||||
self.state = Some(State::Ready);
|
||||
}
|
||||
|
||||
// Check that we always have a defined internal state.
|
||||
assert!(self.state.is_some());
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn try_trigger(&mut self, state: State<M>, event: Event<'_, M>) -> Result<()> {
|
||||
use Event::*;
|
||||
use State::*;
|
||||
|
||||
match (state, event) {
|
||||
(Ready, Poll(queue)) => {
|
||||
let msg = queue.pop().await?;
|
||||
|
||||
self.set_state(Polled(msg));
|
||||
}
|
||||
(Polled(msg), Parse(parser)) => {
|
||||
if let Some(msg) = msg {
|
||||
let url = parser.parse(&msg)?;
|
||||
self.set_state(Parsed(msg, url));
|
||||
} else {
|
||||
self.set_state(Ready);
|
||||
}
|
||||
}
|
||||
(Parsed(msg, url), Download(downloader)) => {
|
||||
self.working_dir.reset().await?;
|
||||
|
||||
if self.seen_in_batch(&url).await? {
|
||||
verbose!("url was seen during batch processing: {:?}", url);
|
||||
self.set_state(Processed(msg));
|
||||
} else {
|
||||
let input = downloader
|
||||
.download(url.clone(), self.working_dir.path())
|
||||
.await?;
|
||||
|
||||
self.set_state(Downloaded(msg, url, input));
|
||||
}
|
||||
}
|
||||
(Downloaded(msg, url, input), Process(processor)) => {
|
||||
processor.process(url, &input).await?;
|
||||
|
||||
self.set_state(Processed(msg));
|
||||
}
|
||||
(Processed(msg), Finish(queue)) => {
|
||||
queue.delete(msg).await?;
|
||||
|
||||
self.set_state(Ready);
|
||||
}
|
||||
// We could panic here, and treat this case as a logic error.
|
||||
// However, we want users of this struct to be able to override the
|
||||
// default transition, so let them recover if they misuse it.
|
||||
(state, event) => bail!(
|
||||
"Invalid transition, state = {state}, event = {event}",
|
||||
state = state,
|
||||
event = event,
|
||||
),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -0,0 +1,119 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use reqwest::Url;
|
||||
use storage_queue::{Message, QueueClient};
|
||||
|
||||
#[async_trait]
|
||||
pub trait Queue<M> {
|
||||
async fn pop(&mut self) -> Result<Option<M>>;
|
||||
|
||||
async fn delete(&mut self, msg: M) -> Result<()>;
|
||||
}
|
||||
|
||||
pub trait Parser<M> {
|
||||
fn parse(&mut self, msg: &M) -> Result<Url>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Downloader {
|
||||
async fn download(&mut self, url: Url, dir: &Path) -> Result<PathBuf>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Processor {
|
||||
async fn process(&mut self, url: Url, input: &Path) -> Result<()>;
|
||||
}
|
||||
|
||||
pub trait Callback<M> {
|
||||
fn queue(&mut self) -> &mut dyn Queue<M>;
|
||||
|
||||
fn parser(&mut self) -> &mut dyn Parser<M>;
|
||||
|
||||
fn downloader(&mut self) -> &mut dyn Downloader;
|
||||
|
||||
fn processor(&mut self) -> &mut dyn Processor;
|
||||
}
|
||||
|
||||
pub struct CallbackImpl<P>
|
||||
where
|
||||
P: Processor + Send,
|
||||
{
|
||||
queue: QueueClient,
|
||||
pub processor: P,
|
||||
}
|
||||
|
||||
impl<P> Callback<Message> for CallbackImpl<P>
|
||||
where
|
||||
P: Processor + Send,
|
||||
{
|
||||
fn queue(&mut self) -> &mut dyn Queue<Message> {
|
||||
self
|
||||
}
|
||||
|
||||
fn parser(&mut self) -> &mut dyn Parser<Message> {
|
||||
self
|
||||
}
|
||||
|
||||
fn downloader(&mut self) -> &mut dyn Downloader {
|
||||
self
|
||||
}
|
||||
|
||||
fn processor(&mut self) -> &mut dyn Processor {
|
||||
&mut self.processor
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> CallbackImpl<P>
|
||||
where
|
||||
P: Processor + Send,
|
||||
{
|
||||
pub fn new(queue_url: Url, processor: P) -> Self {
|
||||
let queue = QueueClient::new(queue_url);
|
||||
Self { queue, processor }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<P> Queue<Message> for CallbackImpl<P>
|
||||
where
|
||||
P: Processor + Send,
|
||||
{
|
||||
async fn pop(&mut self) -> Result<Option<Message>> {
|
||||
self.queue.pop().await
|
||||
}
|
||||
|
||||
async fn delete(&mut self, msg: Message) -> Result<()> {
|
||||
self.queue.delete(msg).await
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> Parser<Message> for CallbackImpl<P>
|
||||
where
|
||||
P: Processor + Send,
|
||||
{
|
||||
fn parse(&mut self, msg: &Message) -> Result<Url> {
|
||||
let text = std::str::from_utf8(msg.data())?;
|
||||
let url = Url::parse(text)?;
|
||||
|
||||
Ok(url)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<P> Downloader for CallbackImpl<P>
|
||||
where
|
||||
P: Processor + Send,
|
||||
{
|
||||
async fn download(&mut self, url: Url, dir: &Path) -> Result<PathBuf> {
|
||||
use crate::tasks::utils::download_input;
|
||||
|
||||
let input = download_input(url, dir).await?;
|
||||
|
||||
Ok(input)
|
||||
}
|
||||
}
|
243
src/agent/onefuzz-agent/src/tasks/generic/input_poller/tests.rs
Normal file
243
src/agent/onefuzz-agent/src/tasks/generic/input_poller/tests.rs
Normal file
@ -0,0 +1,243 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use reqwest::Url;
|
||||
use std::path::Path;
|
||||
use tempfile::{tempdir, TempDir};
|
||||
|
||||
use super::*;
|
||||
|
||||
type Msg = u64;
|
||||
|
||||
#[derive(Default)]
|
||||
struct TestQueue {
|
||||
pending: Vec<Msg>,
|
||||
popped: Vec<Msg>,
|
||||
deleted: Vec<Msg>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Queue<Msg> for TestQueue {
|
||||
async fn pop(&mut self) -> Result<Option<Msg>> {
|
||||
let msg = self.pending.pop();
|
||||
|
||||
if let Some(msg) = msg {
|
||||
self.popped.push(msg);
|
||||
}
|
||||
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
async fn delete(&mut self, msg: Msg) -> Result<()> {
|
||||
self.deleted.push(msg);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TestQueueAlwaysFails;
|
||||
|
||||
#[async_trait]
|
||||
impl Queue<Msg> for TestQueueAlwaysFails {
|
||||
async fn pop(&mut self) -> Result<Option<Msg>> {
|
||||
bail!("simulated `Queue::pop()` failure")
|
||||
}
|
||||
|
||||
async fn delete(&mut self, _msg: Msg) -> Result<()> {
|
||||
bail!("simulated `Queue::delete()` failure")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TestParser {
|
||||
urls: Vec<Url>,
|
||||
}
|
||||
|
||||
impl Parser<Msg> for TestParser {
|
||||
fn parse(&mut self, msg: &Msg) -> Result<Url> {
|
||||
// By returning the `Url` at index `msg`, we witness that `parse()` was
|
||||
// called with `msg`, and simulate a valid input.
|
||||
let url = self.urls[*msg as usize].clone();
|
||||
|
||||
Ok(url)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TestDownloader {
|
||||
downloaded: Vec<Url>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Downloader for TestDownloader {
|
||||
async fn download(&mut self, url: Url, dir: &Path) -> Result<PathBuf> {
|
||||
let name = url_input_name(&url);
|
||||
let dst = dir.join(name);
|
||||
|
||||
self.downloaded.push(url);
|
||||
|
||||
Ok(dst)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TestProcessor {
|
||||
processed: Vec<(Url, PathBuf)>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Processor for TestProcessor {
|
||||
async fn process(&mut self, url: Url, input: &Path) -> Result<()> {
|
||||
self.processed.push((url, input.to_owned()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn url_input_name(url: &Url) -> String {
|
||||
url.path_segments().unwrap().last().unwrap().to_owned()
|
||||
}
|
||||
|
||||
fn fixture() -> (TempDir, InputPoller<Msg>) {
|
||||
let dir = tempdir().unwrap();
|
||||
let task = InputPoller::new(dir.path());
|
||||
|
||||
(dir, task)
|
||||
}
|
||||
|
||||
fn url_fixture(msg: Msg) -> Url {
|
||||
Url::parse(&format!("https://azure.com/c/{}", msg)).unwrap()
|
||||
}
|
||||
|
||||
fn input_fixture(dir: &Path, msg: Msg) -> PathBuf {
|
||||
let name = msg.to_string();
|
||||
dir.join(name)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ready_poll() {
|
||||
let (_, mut task) = fixture();
|
||||
|
||||
let msg: Msg = 0;
|
||||
|
||||
let mut queue = TestQueue::default();
|
||||
queue.pending = vec![msg];
|
||||
|
||||
task.trigger(Event::Poll(&mut queue)).await.unwrap();
|
||||
|
||||
assert_eq!(task.state(), &State::Polled(Some(msg)));
|
||||
assert_eq!(queue.popped, vec![msg]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_polled_some_parse() {
|
||||
let (_, mut task) = fixture();
|
||||
|
||||
let msg: Msg = 0;
|
||||
let url = url_fixture(msg);
|
||||
|
||||
task.set_state(State::Polled(Some(msg)));
|
||||
|
||||
let mut parser = TestParser::default();
|
||||
parser.urls = vec![url.clone()]; // at index `msg`
|
||||
|
||||
task.trigger(Event::Parse(&mut parser)).await.unwrap();
|
||||
|
||||
assert_eq!(task.state(), &State::Parsed(msg, url));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_polled_none_parse() {
|
||||
let (_, mut task) = fixture();
|
||||
|
||||
task.set_state(State::Polled(None));
|
||||
|
||||
let mut parser = TestParser::default();
|
||||
parser.urls = vec![];
|
||||
|
||||
task.trigger(Event::Parse(&mut parser)).await.unwrap();
|
||||
|
||||
assert_eq!(task.state(), &State::Ready);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_parsed_download() {
|
||||
let (dir, mut task) = fixture();
|
||||
|
||||
let msg: Msg = 0;
|
||||
let url = url_fixture(msg);
|
||||
let input = input_fixture(dir.path(), msg);
|
||||
|
||||
task.set_state(State::Parsed(msg, url.clone()));
|
||||
|
||||
let mut downloader = TestDownloader::default();
|
||||
|
||||
task.trigger(Event::Download(&mut downloader))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(task.state(), &State::Downloaded(msg, url.clone(), input));
|
||||
assert_eq!(downloader.downloaded, vec![url]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_downloaded_process() {
|
||||
let (dir, mut task) = fixture();
|
||||
|
||||
let msg: Msg = 0;
|
||||
let url = url_fixture(msg);
|
||||
let input = input_fixture(dir.path(), msg);
|
||||
|
||||
task.set_state(State::Downloaded(msg, url.clone(), input.clone()));
|
||||
|
||||
let mut processor = TestProcessor::default();
|
||||
|
||||
task.trigger(Event::Process(&mut processor)).await.unwrap();
|
||||
|
||||
assert_eq!(task.state(), &State::Processed(msg));
|
||||
assert_eq!(processor.processed, vec![(url, input)]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_processed_finish() {
|
||||
let (_, mut task) = fixture();
|
||||
|
||||
let msg: Msg = 0;
|
||||
|
||||
task.set_state(State::Processed(msg));
|
||||
|
||||
let mut queue = TestQueue::default();
|
||||
|
||||
task.trigger(Event::Finish(&mut queue)).await.unwrap();
|
||||
|
||||
assert_eq!(task.state(), &State::Ready);
|
||||
assert_eq!(queue.deleted, vec![msg]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_invalid_trigger() {
|
||||
let (_, mut task) = fixture();
|
||||
|
||||
let mut queue = TestQueue::default();
|
||||
|
||||
// Invalid transition: `(Ready, Finish)`.
|
||||
let result = task.trigger(Event::Finish(&mut queue)).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(task.state(), &State::Ready);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_valid_trigger_failed_action() {
|
||||
let (_, mut task) = fixture();
|
||||
|
||||
let mut queue = TestQueueAlwaysFails;
|
||||
|
||||
// Valid transition, but `queue.popo()` will return `Err`.
|
||||
let result = task.trigger(Event::Poll(&mut queue)).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(task.state(), &State::Ready);
|
||||
}
|
4
src/agent/onefuzz-agent/src/tasks/generic/mod.rs
Normal file
4
src/agent/onefuzz-agent/src/tasks/generic/mod.rs
Normal file
@ -0,0 +1,4 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
pub mod input_poller;
|
164
src/agent/onefuzz-agent/src/tasks/heartbeat.rs
Normal file
164
src/agent/onefuzz-agent/src/tasks/heartbeat.rs
Normal file
@ -0,0 +1,164 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::onefuzz::machine_id::{get_machine_id, get_machine_name};
|
||||
use crate::tasks::utils::CheckNotify;
|
||||
use anyhow::Result;
|
||||
use reqwest::Url;
|
||||
use serde::{self, Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
sync::{Arc, Mutex},
|
||||
time::Duration,
|
||||
};
|
||||
use storage_queue::QueueClient;
|
||||
use tokio::{
|
||||
sync::Notify,
|
||||
task::{self, JoinHandle},
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
const DEFAULT_HEARTBEAT_PERIOD: Duration = Duration::from_secs(60 * 5);
|
||||
#[derive(Debug, Deserialize, Serialize, Hash, Eq, PartialEq, Clone)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum HeartbeatData {
|
||||
TaskAlive,
|
||||
MachineAlive,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
struct Heartbeat<'a> {
|
||||
task_id: Uuid,
|
||||
machine_id: Uuid,
|
||||
machine_name: &'a str,
|
||||
data: Vec<HeartbeatData>,
|
||||
}
|
||||
|
||||
pub struct HeartbeatClient {
|
||||
cancelled: Arc<Notify>,
|
||||
messages: Arc<Mutex<HashSet<HeartbeatData>>>,
|
||||
_heartbeat_process: JoinHandle<Result<()>>,
|
||||
}
|
||||
|
||||
impl Drop for HeartbeatClient {
|
||||
fn drop(&mut self) {
|
||||
self.cancelled.notify();
|
||||
}
|
||||
}
|
||||
|
||||
impl HeartbeatClient {
|
||||
pub fn init(queue_url: Url, task_id: Uuid) -> Self {
|
||||
HeartbeatClient::init_with_period(queue_url, task_id, DEFAULT_HEARTBEAT_PERIOD)
|
||||
}
|
||||
|
||||
pub fn init_with_period(queue_url: Url, task_id: Uuid, heartbeat_period: Duration) -> Self {
|
||||
let messages = Arc::new(Mutex::new(HashSet::new()));
|
||||
let cancelled = Arc::new(Notify::new());
|
||||
let _heartbeat_process = HeartbeatClient::start_background_process(
|
||||
task_id,
|
||||
queue_url,
|
||||
messages.clone(),
|
||||
cancelled.clone(),
|
||||
heartbeat_period,
|
||||
);
|
||||
HeartbeatClient {
|
||||
messages,
|
||||
_heartbeat_process,
|
||||
cancelled,
|
||||
}
|
||||
}
|
||||
|
||||
fn drain_current_messages(messages: Arc<Mutex<HashSet<HeartbeatData>>>) -> Vec<HeartbeatData> {
|
||||
let lock = messages.lock();
|
||||
let mut messages = lock.unwrap();
|
||||
let drain = messages.iter().cloned().collect::<Vec<HeartbeatData>>();
|
||||
messages.clear();
|
||||
drain
|
||||
}
|
||||
|
||||
async fn flush(
|
||||
task_id: Uuid,
|
||||
machine_id: Uuid,
|
||||
machine_name: &str,
|
||||
queue_client: &QueueClient,
|
||||
messages: Arc<Mutex<HashSet<HeartbeatData>>>,
|
||||
) {
|
||||
let mut data = HeartbeatClient::drain_current_messages(messages.clone());
|
||||
data.push(HeartbeatData::MachineAlive);
|
||||
let _ = queue_client
|
||||
.enqueue(Heartbeat {
|
||||
task_id,
|
||||
data,
|
||||
machine_id,
|
||||
machine_name,
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
pub fn start_background_process(
|
||||
task_id: Uuid,
|
||||
queue_url: Url,
|
||||
messages: Arc<Mutex<HashSet<HeartbeatData>>>,
|
||||
cancelled: Arc<Notify>,
|
||||
heartbeat_period: Duration,
|
||||
) -> JoinHandle<Result<()>> {
|
||||
let queue_client = QueueClient::new(queue_url);
|
||||
task::spawn(async move {
|
||||
let machine_id = get_machine_id().await?;
|
||||
let machine_name = get_machine_name().await?;
|
||||
|
||||
HeartbeatClient::flush(
|
||||
task_id,
|
||||
machine_id,
|
||||
&machine_name,
|
||||
&queue_client,
|
||||
messages.clone(),
|
||||
)
|
||||
.await;
|
||||
while !cancelled.is_notified(heartbeat_period).await {
|
||||
HeartbeatClient::flush(
|
||||
task_id,
|
||||
machine_id,
|
||||
&machine_name,
|
||||
&queue_client,
|
||||
messages.clone(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
HeartbeatClient::flush(
|
||||
task_id,
|
||||
machine_id,
|
||||
&machine_name,
|
||||
&queue_client,
|
||||
messages.clone(),
|
||||
)
|
||||
.await;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub trait HeartbeatSender {
|
||||
fn send(&self, data: HeartbeatData) -> Result<()>;
|
||||
|
||||
fn alive(&self) {
|
||||
self.send(HeartbeatData::TaskAlive).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl HeartbeatSender for HeartbeatClient {
|
||||
fn send(&self, data: HeartbeatData) -> Result<()> {
|
||||
let mut messages_lock = self.messages.lock().unwrap();
|
||||
messages_lock.insert(data);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl HeartbeatSender for Option<HeartbeatClient> {
|
||||
fn send(&self, data: HeartbeatData) -> Result<()> {
|
||||
match self {
|
||||
Some(client) => client.send(data),
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
175
src/agent/onefuzz-agent/src/tasks/merge/generic.rs
Normal file
175
src/agent/onefuzz-agent/src/tasks/merge/generic.rs
Normal file
@ -0,0 +1,175 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
heartbeat::HeartbeatSender,
|
||||
utils,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use onefuzz::{expand::Expand, fs::set_executable};
|
||||
use reqwest::Url;
|
||||
use serde::Deserialize;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
process::Stdio,
|
||||
sync::Arc,
|
||||
};
|
||||
use storage_queue::{QueueClient, EMPTY_QUEUE_DELAY};
|
||||
use tokio::process::Command;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct QueueMessage {
|
||||
content_length: u32,
|
||||
|
||||
url: Url,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub supervisor_exe: String,
|
||||
pub supervisor_options: Vec<String>,
|
||||
pub supervisor_env: HashMap<String, String>,
|
||||
pub supervisor_input_marker: String,
|
||||
pub target_exe: PathBuf,
|
||||
pub target_options: Vec<String>,
|
||||
pub target_options_merge: bool,
|
||||
pub tools: SyncedDir,
|
||||
pub input_queue: Url,
|
||||
pub inputs: SyncedDir,
|
||||
pub unique_inputs: SyncedDir,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
pub async fn spawn(config: Arc<Config>) -> Result<()> {
|
||||
utils::init_dir(&config.tools.path).await?;
|
||||
utils::sync_remote_dir(&config.tools, utils::SyncOperation::Pull).await?;
|
||||
set_executable(&config.tools.path).await?;
|
||||
|
||||
utils::init_dir(&config.unique_inputs.path).await?;
|
||||
let hb_client = config.common.init_heartbeat();
|
||||
loop {
|
||||
hb_client.alive();
|
||||
let tmp_dir = PathBuf::from("./tmp");
|
||||
verbose!("tmp dir reset");
|
||||
utils::reset_tmp_dir(&tmp_dir).await?;
|
||||
utils::sync_remote_dir(&config.unique_inputs, utils::SyncOperation::Pull).await?;
|
||||
let mut queue = QueueClient::new(config.input_queue.clone());
|
||||
if let Some(msg) = queue.pop().await? {
|
||||
let input_url = match utils::parse_url_data(msg.data()) {
|
||||
Ok(url) => url,
|
||||
Err(err) => {
|
||||
error!("could not parse input URL from queue message: {}", err);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(error) = process_message(config.clone(), &input_url, &tmp_dir).await {
|
||||
error!(
|
||||
"failed to process latest message from notification queue: {}",
|
||||
error
|
||||
);
|
||||
} else {
|
||||
verbose!("will delete popped message with id = {}", msg.id());
|
||||
|
||||
queue.delete(msg).await?;
|
||||
|
||||
verbose!(
|
||||
"Attempting to delete {} from the candidate container",
|
||||
input_url.clone()
|
||||
);
|
||||
|
||||
if let Err(e) = try_delete_blob(input_url.clone()).await {
|
||||
error!("Failed to delete blob {}", e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("no new candidate inputs found, sleeping");
|
||||
tokio::time::delay_for(EMPTY_QUEUE_DELAY).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_message(config: Arc<Config>, input_url: &Url, tmp_dir: &PathBuf) -> Result<()> {
|
||||
let input_path = utils::download_input(input_url.clone(), &config.unique_inputs.path).await?;
|
||||
info!("downloaded input to {}", input_path.display());
|
||||
|
||||
info!("Merging corpus");
|
||||
match merge(&config, tmp_dir).await {
|
||||
Ok(_) => {
|
||||
// remove the 'queue' folder
|
||||
let mut queue_dir = tmp_dir.clone();
|
||||
queue_dir.push("queue");
|
||||
let _delete_output = tokio::fs::remove_dir_all(queue_dir).await;
|
||||
let synced_dir = SyncedDir {
|
||||
path: tmp_dir.clone(),
|
||||
url: config.unique_inputs.url.clone(),
|
||||
};
|
||||
utils::sync_remote_dir(&synced_dir, utils::SyncOperation::Push).await?;
|
||||
}
|
||||
Err(e) => error!("Merge failed : {}", e),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn try_delete_blob(input_url: Url) -> Result<()> {
|
||||
let http_client = reqwest::Client::new();
|
||||
match http_client
|
||||
.delete(input_url)
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()
|
||||
{
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn merge(config: &Config, output_dir: impl AsRef<Path>) -> Result<()> {
|
||||
let mut supervisor_args = Expand::new();
|
||||
|
||||
supervisor_args
|
||||
.input(&config.supervisor_input_marker)
|
||||
.input_corpus(&config.unique_inputs.path)
|
||||
.target_options(&config.target_options)
|
||||
.supervisor_exe(&config.supervisor_exe)
|
||||
.supervisor_options(&config.supervisor_options)
|
||||
.generated_inputs(output_dir)
|
||||
.target_exe(&config.target_exe);
|
||||
|
||||
if config.target_options_merge {
|
||||
supervisor_args.target_options(&config.target_options);
|
||||
}
|
||||
|
||||
let supervisor_path = Expand::new()
|
||||
.tools_dir(&config.tools.path)
|
||||
.evaluate_value(&config.supervisor_exe)?;
|
||||
|
||||
let mut cmd = Command::new(supervisor_path);
|
||||
|
||||
cmd.kill_on_drop(true)
|
||||
.env_remove("RUST_LOG")
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped());
|
||||
|
||||
for (k, v) in &config.supervisor_env {
|
||||
cmd.env(k, v);
|
||||
}
|
||||
|
||||
for arg in supervisor_args.evaluate(&config.supervisor_options)? {
|
||||
cmd.arg(arg);
|
||||
}
|
||||
|
||||
if !config.target_options_merge {
|
||||
for arg in supervisor_args.evaluate(&config.target_options)? {
|
||||
cmd.arg(arg);
|
||||
}
|
||||
}
|
||||
|
||||
info!("Starting merge '{:?}'", cmd);
|
||||
cmd.spawn()?.wait_with_output().await?;
|
||||
Ok(())
|
||||
}
|
137
src/agent/onefuzz-agent/src/tasks/merge/libfuzzer_merge.rs
Normal file
137
src/agent/onefuzz-agent/src/tasks/merge/libfuzzer_merge.rs
Normal file
@ -0,0 +1,137 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
heartbeat::*,
|
||||
utils,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use onefuzz::libfuzzer::{LibFuzzer, LibFuzzerMergeOutput};
|
||||
use reqwest::Url;
|
||||
use serde::Deserialize;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use storage_queue::{QueueClient, EMPTY_QUEUE_DELAY};
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct QueueMessage {
|
||||
content_length: u32,
|
||||
|
||||
url: Url,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub target_exe: PathBuf,
|
||||
pub target_env: HashMap<String, String>,
|
||||
pub target_options: Vec<String>,
|
||||
pub input_queue: Url,
|
||||
pub inputs: SyncedDir,
|
||||
pub unique_inputs: SyncedDir,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
pub async fn spawn(config: Arc<Config>) -> Result<()> {
|
||||
let hb_client = config.common.init_heartbeat();
|
||||
utils::init_dir(&config.unique_inputs.path).await?;
|
||||
loop {
|
||||
hb_client.alive();
|
||||
if let Err(error) = process_message(config.clone()).await {
|
||||
error!(
|
||||
"failed to process latest message from notification queue: {}",
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_message(config: Arc<Config>) -> Result<()> {
|
||||
let tmp_dir = "./tmp";
|
||||
|
||||
verbose!("tmp dir reset");
|
||||
|
||||
utils::reset_tmp_dir(tmp_dir).await?;
|
||||
utils::sync_remote_dir(&config.unique_inputs, utils::SyncOperation::Pull).await?;
|
||||
|
||||
let mut queue = QueueClient::new(config.input_queue.clone());
|
||||
|
||||
if let Some(msg) = queue.pop().await? {
|
||||
let input_url = match utils::parse_url_data(msg.data()) {
|
||||
Ok(url) => url,
|
||||
Err(err) => {
|
||||
error!("could not parse input URL from queue message: {}", err);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let input_path = utils::download_input(input_url.clone(), tmp_dir).await?;
|
||||
info!("downloaded input to {}", input_path.display());
|
||||
|
||||
info!("Merging corpus");
|
||||
match merge(
|
||||
&config.target_exe,
|
||||
&config.target_options,
|
||||
&config.target_env,
|
||||
&config.unique_inputs.path,
|
||||
&tmp_dir,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(result) if result.added_files_count > 0 => {
|
||||
info!("Added {} new files to the corpus", result.added_files_count);
|
||||
utils::sync_remote_dir(&config.unique_inputs, utils::SyncOperation::Push).await?;
|
||||
}
|
||||
Ok(_) => info!("No new files added by the merge"),
|
||||
Err(e) => error!("Merge failed : {}", e),
|
||||
}
|
||||
|
||||
verbose!("will delete popped message with id = {}", msg.id());
|
||||
|
||||
queue.delete(msg).await?;
|
||||
|
||||
verbose!(
|
||||
"Attempting to delete {} from the candidate container",
|
||||
input_url.clone()
|
||||
);
|
||||
|
||||
if let Err(e) = try_delete_blob(input_url.clone()).await {
|
||||
error!("Failed to delete blob {}", e)
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
warn!("no new candidate inputs found, sleeping");
|
||||
tokio::time::delay_for(EMPTY_QUEUE_DELAY).await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_delete_blob(input_url: Url) -> Result<()> {
|
||||
let http_client = reqwest::Client::new();
|
||||
match http_client
|
||||
.delete(input_url)
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()
|
||||
{
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn merge(
|
||||
target_exe: &Path,
|
||||
target_options: &[String],
|
||||
target_env: &HashMap<String, String>,
|
||||
corpus_dir: &Path,
|
||||
candidate_dir: impl AsRef<Path>,
|
||||
) -> Result<LibFuzzerMergeOutput> {
|
||||
let merger = LibFuzzer::new(target_exe, target_options, target_env);
|
||||
let candidates = vec![candidate_dir];
|
||||
merger.merge(&corpus_dir, &candidates).await
|
||||
}
|
5
src/agent/onefuzz-agent/src/tasks/merge/mod.rs
Normal file
5
src/agent/onefuzz-agent/src/tasks/merge/mod.rs
Normal file
@ -0,0 +1,5 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
pub mod generic;
|
||||
pub mod libfuzzer_merge;
|
13
src/agent/onefuzz-agent/src/tasks/mod.rs
Normal file
13
src/agent/onefuzz-agent/src/tasks/mod.rs
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
pub mod analysis;
|
||||
pub mod config;
|
||||
pub mod coverage;
|
||||
pub mod fuzz;
|
||||
pub mod generic;
|
||||
pub mod heartbeat;
|
||||
pub mod merge;
|
||||
pub mod report;
|
||||
pub mod stats;
|
||||
pub mod utils;
|
162
src/agent/onefuzz-agent/src/tasks/report/crash_report.rs
Normal file
162
src/agent/onefuzz-agent/src/tasks/report/crash_report.rs
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::config::SyncedDir;
|
||||
use anyhow::Result;
|
||||
use onefuzz::{
|
||||
asan::AsanLog,
|
||||
blob::{BlobClient, BlobContainerUrl, BlobUrl},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::PathBuf;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct CrashReport {
|
||||
pub input_sha256: String,
|
||||
|
||||
pub input_blob: InputBlob,
|
||||
|
||||
pub executable: PathBuf,
|
||||
|
||||
pub crash_type: String,
|
||||
|
||||
pub crash_site: String,
|
||||
|
||||
pub call_stack: Vec<String>,
|
||||
|
||||
pub call_stack_sha256: String,
|
||||
|
||||
pub asan_log: Option<String>,
|
||||
|
||||
pub task_id: Uuid,
|
||||
|
||||
pub job_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct NoCrash {
|
||||
pub input_sha256: String,
|
||||
pub input_blob: InputBlob,
|
||||
pub executable: PathBuf,
|
||||
pub task_id: Uuid,
|
||||
pub job_id: Uuid,
|
||||
pub tries: u64,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub enum CrashTestResult {
|
||||
CrashReport(CrashReport),
|
||||
NoRepro(NoCrash),
|
||||
}
|
||||
|
||||
// Conditionally upload a report, if it would not be a duplicate.
|
||||
//
|
||||
// Use SHA-256 of call stack as dedupe key.
|
||||
async fn upload_deduped(report: &CrashReport, container: &BlobContainerUrl) -> Result<()> {
|
||||
let blob = BlobClient::new();
|
||||
let deduped_name = report.unique_blob_name();
|
||||
let deduped_url = container.blob(deduped_name).url();
|
||||
blob.put(deduped_url)
|
||||
.json(report)
|
||||
// Conditional PUT, only if-not-exists.
|
||||
.header("If-None-Match", "*")
|
||||
.send()
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn upload_report(report: &CrashReport, container: &BlobContainerUrl) -> Result<()> {
|
||||
let blob = BlobClient::new();
|
||||
let url = container.blob(report.blob_name()).url();
|
||||
blob.put(url).json(report).send().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn upload_no_repro(report: &NoCrash, container: &BlobContainerUrl) -> Result<()> {
|
||||
let blob = BlobClient::new();
|
||||
|
||||
let url = container.blob(report.blob_name()).url();
|
||||
blob.put(url).json(report).send().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl CrashTestResult {
|
||||
pub async fn upload(
|
||||
&self,
|
||||
unique_reports: &SyncedDir,
|
||||
reports: &Option<SyncedDir>,
|
||||
no_repro: &Option<SyncedDir>,
|
||||
) -> Result<()> {
|
||||
match self {
|
||||
Self::CrashReport(report) => {
|
||||
upload_deduped(report, &unique_reports.url).await?;
|
||||
if let Some(reports) = reports {
|
||||
upload_report(report, &reports.url).await?;
|
||||
}
|
||||
}
|
||||
Self::NoRepro(report) => {
|
||||
if let Some(no_repro) = no_repro {
|
||||
upload_no_repro(report, &no_repro.url).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct InputBlob {
|
||||
pub account: String,
|
||||
pub container: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl From<BlobUrl> for InputBlob {
|
||||
fn from(blob: BlobUrl) -> Self {
|
||||
Self {
|
||||
account: blob.account(),
|
||||
container: blob.container(),
|
||||
name: blob.name(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CrashReport {
|
||||
pub fn new(
|
||||
asan_log: AsanLog,
|
||||
task_id: Uuid,
|
||||
job_id: Uuid,
|
||||
executable: impl Into<PathBuf>,
|
||||
input_blob: InputBlob,
|
||||
input_sha256: String,
|
||||
) -> Self {
|
||||
Self {
|
||||
input_sha256,
|
||||
input_blob,
|
||||
executable: executable.into(),
|
||||
crash_type: asan_log.fault_type().into(),
|
||||
crash_site: asan_log.summary().into(),
|
||||
call_stack: asan_log.call_stack().to_vec(),
|
||||
call_stack_sha256: asan_log.call_stack_sha256(),
|
||||
asan_log: Some(asan_log.text().to_string()),
|
||||
task_id,
|
||||
job_id,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn blob_name(&self) -> String {
|
||||
format!("{}.json", self.input_sha256)
|
||||
}
|
||||
|
||||
pub fn unique_blob_name(&self) -> String {
|
||||
format!("{}.json", self.call_stack_sha256)
|
||||
}
|
||||
}
|
||||
|
||||
impl NoCrash {
|
||||
pub fn blob_name(&self) -> String {
|
||||
format!("{}.json", self.input_sha256)
|
||||
}
|
||||
}
|
171
src/agent/onefuzz-agent/src/tasks/report/generic.rs
Normal file
171
src/agent/onefuzz-agent/src/tasks/report/generic.rs
Normal file
@ -0,0 +1,171 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use super::crash_report::{CrashReport, CrashTestResult, InputBlob, NoCrash};
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
generic::input_poller::{CallbackImpl, InputPoller, Processor},
|
||||
heartbeat::*,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use onefuzz::{blob::BlobUrl, input_tester::Tester, sha256};
|
||||
use reqwest::Url;
|
||||
use serde::Deserialize;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use storage_queue::Message;
|
||||
|
||||
fn default_bool_true() -> bool {
|
||||
true
|
||||
}
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub target_exe: PathBuf,
|
||||
|
||||
#[serde(default)]
|
||||
pub target_options: Vec<String>,
|
||||
|
||||
#[serde(default)]
|
||||
pub target_env: HashMap<String, String>,
|
||||
|
||||
pub input_queue: Option<Url>,
|
||||
pub crashes: Option<SyncedDir>,
|
||||
pub reports: Option<SyncedDir>,
|
||||
pub unique_reports: SyncedDir,
|
||||
pub no_repro: Option<SyncedDir>,
|
||||
|
||||
pub target_timeout: Option<u64>,
|
||||
|
||||
#[serde(default)]
|
||||
pub check_asan_log: bool,
|
||||
#[serde(default = "default_bool_true")]
|
||||
pub check_debugger: bool,
|
||||
#[serde(default)]
|
||||
pub check_retry_count: u64,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
pub struct ReportTask<'a> {
|
||||
config: &'a Config,
|
||||
poller: InputPoller<Message>,
|
||||
}
|
||||
|
||||
impl<'a> ReportTask<'a> {
|
||||
pub fn new(config: &'a Config) -> Self {
|
||||
let working_dir = config.common.task_id.to_string();
|
||||
let poller = InputPoller::new(working_dir);
|
||||
|
||||
Self { config, poller }
|
||||
}
|
||||
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
info!("Starting generic crash report task");
|
||||
let mut processor = GenericReportProcessor::new(&self.config);
|
||||
|
||||
if let Some(crashes) = &self.config.crashes {
|
||||
self.poller.batch_process(&mut processor, &crashes).await?;
|
||||
}
|
||||
|
||||
if let Some(queue) = &self.config.input_queue {
|
||||
let callback = CallbackImpl::new(queue.clone(), processor);
|
||||
self.poller.run(callback).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GenericReportProcessor<'a> {
|
||||
config: &'a Config,
|
||||
tester: Tester<'a>,
|
||||
heartbeat_client: Option<HeartbeatClient>,
|
||||
}
|
||||
|
||||
impl<'a> GenericReportProcessor<'a> {
|
||||
pub fn new(config: &'a Config) -> Self {
|
||||
let heartbeat_client = config.common.init_heartbeat();
|
||||
let tester = Tester::new(
|
||||
&config.target_exe,
|
||||
&config.target_options,
|
||||
&config.target_env,
|
||||
&config.target_timeout,
|
||||
config.check_asan_log,
|
||||
config.check_debugger,
|
||||
config.check_retry_count,
|
||||
);
|
||||
|
||||
Self {
|
||||
config,
|
||||
tester,
|
||||
heartbeat_client,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn test_input(&self, input_url: Url, input: &Path) -> Result<CrashTestResult> {
|
||||
self.heartbeat_client.alive();
|
||||
let input_sha256 = sha256::digest_file(input).await?;
|
||||
let task_id = self.config.common.task_id;
|
||||
let job_id = self.config.common.job_id;
|
||||
let input_blob = InputBlob::from(BlobUrl::new(input_url)?);
|
||||
|
||||
let test_report = self.tester.test_input(input).await?;
|
||||
|
||||
if let Some(asan_log) = test_report.asan_log {
|
||||
let crash_report = CrashReport::new(
|
||||
asan_log,
|
||||
task_id,
|
||||
job_id,
|
||||
&self.config.target_exe,
|
||||
input_blob,
|
||||
input_sha256,
|
||||
);
|
||||
Ok(CrashTestResult::CrashReport(crash_report))
|
||||
} else if let Some(crash) = test_report.crash {
|
||||
let call_stack_sha256 = sha256::digest_iter(&crash.call_stack);
|
||||
let crash_report = CrashReport {
|
||||
input_blob,
|
||||
input_sha256,
|
||||
executable: PathBuf::from(&self.config.target_exe),
|
||||
call_stack: crash.call_stack,
|
||||
crash_type: crash.crash_type,
|
||||
crash_site: crash.crash_site,
|
||||
call_stack_sha256,
|
||||
asan_log: None,
|
||||
task_id,
|
||||
job_id,
|
||||
};
|
||||
|
||||
Ok(CrashTestResult::CrashReport(crash_report))
|
||||
} else {
|
||||
let no_repro = NoCrash {
|
||||
input_blob,
|
||||
input_sha256,
|
||||
executable: PathBuf::from(&self.config.target_exe),
|
||||
task_id,
|
||||
job_id,
|
||||
tries: 1 + self.config.check_retry_count,
|
||||
error: test_report.error.map(|e| format!("{}", e)),
|
||||
};
|
||||
|
||||
Ok(CrashTestResult::NoRepro(no_repro))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<'a> Processor for GenericReportProcessor<'a> {
|
||||
async fn process(&mut self, url: Url, input: &Path) -> Result<()> {
|
||||
let report = self.test_input(url, input).await?;
|
||||
report
|
||||
.upload(
|
||||
&self.config.unique_reports,
|
||||
&self.config.reports,
|
||||
&self.config.no_repro,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
149
src/agent/onefuzz-agent/src/tasks/report/libfuzzer_report.rs
Normal file
149
src/agent/onefuzz-agent/src/tasks/report/libfuzzer_report.rs
Normal file
@ -0,0 +1,149 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use super::crash_report::*;
|
||||
use crate::tasks::{
|
||||
config::{CommonConfig, SyncedDir},
|
||||
generic::input_poller::*,
|
||||
heartbeat::*,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use onefuzz::{blob::BlobUrl, libfuzzer::LibFuzzer, sha256};
|
||||
use reqwest::Url;
|
||||
use serde::Deserialize;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use storage_queue::Message;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub target_exe: PathBuf,
|
||||
pub target_env: HashMap<String, String>,
|
||||
// TODO: options are not yet used for crash reporting
|
||||
pub target_options: Vec<String>,
|
||||
pub target_timeout: Option<u64>,
|
||||
pub input_queue: Option<Url>,
|
||||
pub crashes: Option<SyncedDir>,
|
||||
pub reports: Option<SyncedDir>,
|
||||
pub unique_reports: SyncedDir,
|
||||
pub no_repro: Option<SyncedDir>,
|
||||
#[serde(default)]
|
||||
pub check_retry_count: u64,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
pub struct ReportTask {
|
||||
config: Arc<Config>,
|
||||
pub poller: InputPoller<Message>,
|
||||
}
|
||||
|
||||
impl ReportTask {
|
||||
pub fn new(config: impl Into<Arc<Config>>) -> Self {
|
||||
let config = config.into();
|
||||
|
||||
let working_dir = config.common.task_id.to_string();
|
||||
let poller = InputPoller::new(working_dir);
|
||||
|
||||
Self { config, poller }
|
||||
}
|
||||
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
info!("Starting libFuzzer crash report task");
|
||||
let mut processor = AsanProcessor::new(self.config.clone());
|
||||
|
||||
if let Some(crashes) = &self.config.crashes {
|
||||
self.poller.batch_process(&mut processor, crashes).await?;
|
||||
}
|
||||
|
||||
if let Some(queue) = &self.config.input_queue {
|
||||
let callback = CallbackImpl::new(queue.clone(), processor);
|
||||
self.poller.run(callback).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AsanProcessor {
|
||||
config: Arc<Config>,
|
||||
heartbeat_client: Option<HeartbeatClient>,
|
||||
}
|
||||
|
||||
impl AsanProcessor {
|
||||
pub fn new(config: Arc<Config>) -> Self {
|
||||
let heartbeat_client = config.common.init_heartbeat();
|
||||
|
||||
Self {
|
||||
config,
|
||||
heartbeat_client,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn test_input(&self, input_url: Url, input: &Path) -> Result<CrashTestResult> {
|
||||
self.heartbeat_client.alive();
|
||||
let fuzzer = LibFuzzer::new(
|
||||
&self.config.target_exe,
|
||||
&self.config.target_options,
|
||||
&self.config.target_env,
|
||||
);
|
||||
|
||||
let task_id = self.config.common.task_id;
|
||||
let job_id = self.config.common.job_id;
|
||||
let input_blob = InputBlob::from(BlobUrl::new(input_url)?);
|
||||
let input_sha256 = sha256::digest_file(input).await?;
|
||||
|
||||
let test_report = fuzzer
|
||||
.repro(
|
||||
input,
|
||||
self.config.target_timeout,
|
||||
self.config.check_retry_count,
|
||||
)
|
||||
.await?;
|
||||
|
||||
match test_report.asan_log {
|
||||
Some(asan_log) => {
|
||||
let crash_report = CrashReport::new(
|
||||
asan_log,
|
||||
task_id,
|
||||
job_id,
|
||||
&self.config.target_exe,
|
||||
input_blob,
|
||||
input_sha256,
|
||||
);
|
||||
Ok(CrashTestResult::CrashReport(crash_report))
|
||||
}
|
||||
None => {
|
||||
let no_repro = NoCrash {
|
||||
input_blob,
|
||||
input_sha256,
|
||||
executable: PathBuf::from(&self.config.target_exe),
|
||||
task_id,
|
||||
job_id,
|
||||
tries: 1 + self.config.check_retry_count,
|
||||
error: test_report.error.map(|e| format!("{}", e)),
|
||||
};
|
||||
|
||||
Ok(CrashTestResult::NoRepro(no_repro))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Processor for AsanProcessor {
|
||||
async fn process(&mut self, url: Url, input: &Path) -> Result<()> {
|
||||
let report = self.test_input(url, input).await?;
|
||||
report
|
||||
.upload(
|
||||
&self.config.unique_reports,
|
||||
&self.config.reports,
|
||||
&self.config.no_repro,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user