mirror of
https://github.com/microsoft/onefuzz.git
synced 2025-06-17 12:28:07 +00:00
Capture crash dumps from libfuzzer, when provided (#2793)
Enables capturing crashdumps generated by ASAN at point of failure. This helps in several ways: - provides a crash dump in the case that we cannot repro a failure later - some people have stated that crash dumps would be more useful to their team than the repro VM - we should be able to use these for automated submission to Watson or similar --- Crash dumps are automatically collected (for libfuzzer) jobs, if we find any. They should be activated by enabling crash dumps in ASAN, via: - On Linux: `ASAN_OPTIONS=disable_coredump=0:abort_on_error=1:unmap_shadow_on_exit=1` - OneFuzz will override the (Ubuntu) crash dump options to generate core dumps instead and then upload them. - On Windows: `ASAN_SAVE_DUMPS=my_dump.dmp` - OneFuzz will look for any `*.dmp` files in the working directory and then upload them. In both cases, the crash dump will be renamed to match the crashing input, if possible, and uploaded to a new `crashdumps` container. --- Also updated: the “simple” LibFuzzer test has been updated to be compiled with `cl.exe` instead of `clang` on Windows, so that we are exercising the MSVC implementation of ASAN/LibFuzzer, and the CI image has been updated to `windows-2022`. The restriction to an old version of the Windows SDK has been removed.
This commit is contained in:
35
.github/workflows/ci.yml
vendored
35
.github/workflows/ci.yml
vendored
@ -527,7 +527,7 @@ jobs:
|
|||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: src/integration-tests/artifacts
|
path: src/integration-tests/artifacts
|
||||||
key: integration-tests|linux|${{ hashFiles('src/integration-tests/**/*') }}
|
key: integration-tests|linux|${{ env.ACTIONS_CACHE_KEY_DATE }}|${{ hashFiles('src/integration-tests/**/*') }}
|
||||||
- name: Build integration tests
|
- name: Build integration tests
|
||||||
if: steps.cache-integration-tests.outputs.cache-hit != 'true'
|
if: steps.cache-integration-tests.outputs.cache-hit != 'true'
|
||||||
run: |
|
run: |
|
||||||
@ -590,7 +590,7 @@ jobs:
|
|||||||
name: artifact-integration-tests-linux
|
name: artifact-integration-tests-linux
|
||||||
path: src/integration-tests/artifacts
|
path: src/integration-tests/artifacts
|
||||||
build-integration-tests-windows:
|
build-integration-tests-windows:
|
||||||
runs-on: windows-2019
|
runs-on: windows-2022
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Cache integration tests
|
- name: Cache integration tests
|
||||||
@ -598,7 +598,9 @@ jobs:
|
|||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: src/integration-tests/artifacts
|
path: src/integration-tests/artifacts
|
||||||
key: integration-tests|windows|${{ hashFiles('src/integration-tests/**/*') }}
|
key: integration-tests|windows|${{ env.ACTIONS_CACHE_KEY_DATE }}|${{ hashFiles('src/integration-tests/**/*') }}
|
||||||
|
- name: Setup C/C++ environment
|
||||||
|
uses: ilammy/msvc-dev-cmd@cec98b9d092141f74527d0afa6feb2af698cfe89 # pinned to v1.12.1
|
||||||
- name: Build integration tests
|
- name: Build integration tests
|
||||||
if: steps.cache-integration-tests.outputs.cache-hit != 'true'
|
if: steps.cache-integration-tests.outputs.cache-hit != 'true'
|
||||||
run: |
|
run: |
|
||||||
@ -609,33 +611,6 @@ jobs:
|
|||||||
choco install make
|
choco install make
|
||||||
$env:Path += ";C:\Program Files\LLVM\bin;C:\ProgramData\chocolatey\bin"
|
$env:Path += ";C:\Program Files\LLVM\bin;C:\ProgramData\chocolatey\bin"
|
||||||
|
|
||||||
# WORKAROUND: effectively downgrade the default Windows 10 SDK version.
|
|
||||||
#
|
|
||||||
# This ensures we link against a version of the SDK which won't trigger a
|
|
||||||
# startup bug in the LLVM-shipped ASAN runtime.
|
|
||||||
|
|
||||||
# Assume a default MSVC 2019 install path.
|
|
||||||
$MsvcDir = 'C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/Tools/MSVC'
|
|
||||||
|
|
||||||
# Assume that `$MsvcDir` only contains version-named subdirectories.
|
|
||||||
$MsvcVersion = ((Get-ChildItem $MsvcDir).name | Sort-Object -Descending)[0]
|
|
||||||
$MsvcLib = "${MsvcDir}/${MsvcVersion}/lib/x64"
|
|
||||||
|
|
||||||
# Known "good" (non-bug-surfacing) version.
|
|
||||||
$WindowsSdkVersion = '10.0.18362.0'
|
|
||||||
|
|
||||||
# Assume default install path.
|
|
||||||
$WindowsSdkDir = 'C:/Program Files (x86)/Windows Kits/10'
|
|
||||||
$WindowsSdkLib = "${WindowsSdkDir}/Lib/${WindowsSdkVersion}"
|
|
||||||
$WindowsSdkInclude = "${WindowsSdkDir}/Include/${WindowsSdkVersion}"
|
|
||||||
|
|
||||||
# Used by `clang.exe`.
|
|
||||||
$env:CPATH = $WindowsSdkInclude
|
|
||||||
$env:LIBRARY_PATH = "${MsvcLib};${WindowsSdkLib}/ucrt/x64;${WindowsSdkLib}/um/x64"
|
|
||||||
|
|
||||||
# Used by `link.exe`.
|
|
||||||
$env:LIB = $env:LIBRARY_PATH
|
|
||||||
|
|
||||||
cd src/integration-tests
|
cd src/integration-tests
|
||||||
|
|
||||||
mkdir artifacts/windows-libfuzzer
|
mkdir artifacts/windows-libfuzzer
|
||||||
|
@ -143,6 +143,7 @@ If webhook is set to have Event Grid message format then the payload will look a
|
|||||||
"coverage",
|
"coverage",
|
||||||
"crashes",
|
"crashes",
|
||||||
"inputs",
|
"inputs",
|
||||||
|
"crashdumps",
|
||||||
"no_repro",
|
"no_repro",
|
||||||
"readonly_inputs",
|
"readonly_inputs",
|
||||||
"reports",
|
"reports",
|
||||||
@ -1956,6 +1957,7 @@ If webhook is set to have Event Grid message format then the payload will look a
|
|||||||
"coverage",
|
"coverage",
|
||||||
"crashes",
|
"crashes",
|
||||||
"inputs",
|
"inputs",
|
||||||
|
"crashdumps",
|
||||||
"no_repro",
|
"no_repro",
|
||||||
"readonly_inputs",
|
"readonly_inputs",
|
||||||
"reports",
|
"reports",
|
||||||
@ -2867,6 +2869,7 @@ If webhook is set to have Event Grid message format then the payload will look a
|
|||||||
"coverage",
|
"coverage",
|
||||||
"crashes",
|
"crashes",
|
||||||
"inputs",
|
"inputs",
|
||||||
|
"crashdumps",
|
||||||
"no_repro",
|
"no_repro",
|
||||||
"readonly_inputs",
|
"readonly_inputs",
|
||||||
"reports",
|
"reports",
|
||||||
@ -3358,6 +3361,7 @@ If webhook is set to have Event Grid message format then the payload will look a
|
|||||||
"coverage",
|
"coverage",
|
||||||
"crashes",
|
"crashes",
|
||||||
"inputs",
|
"inputs",
|
||||||
|
"crashdumps",
|
||||||
"no_repro",
|
"no_repro",
|
||||||
"readonly_inputs",
|
"readonly_inputs",
|
||||||
"reports",
|
"reports",
|
||||||
@ -3867,6 +3871,7 @@ If webhook is set to have Event Grid message format then the payload will look a
|
|||||||
"coverage",
|
"coverage",
|
||||||
"crashes",
|
"crashes",
|
||||||
"inputs",
|
"inputs",
|
||||||
|
"crashdumps",
|
||||||
"no_repro",
|
"no_repro",
|
||||||
"readonly_inputs",
|
"readonly_inputs",
|
||||||
"reports",
|
"reports",
|
||||||
@ -4324,6 +4329,7 @@ If webhook is set to have Event Grid message format then the payload will look a
|
|||||||
"coverage",
|
"coverage",
|
||||||
"crashes",
|
"crashes",
|
||||||
"inputs",
|
"inputs",
|
||||||
|
"crashdumps",
|
||||||
"no_repro",
|
"no_repro",
|
||||||
"readonly_inputs",
|
"readonly_inputs",
|
||||||
"reports",
|
"reports",
|
||||||
@ -4808,6 +4814,7 @@ If webhook is set to have Event Grid message format then the payload will look a
|
|||||||
"coverage",
|
"coverage",
|
||||||
"crashes",
|
"crashes",
|
||||||
"inputs",
|
"inputs",
|
||||||
|
"crashdumps",
|
||||||
"no_repro",
|
"no_repro",
|
||||||
"readonly_inputs",
|
"readonly_inputs",
|
||||||
"reports",
|
"reports",
|
||||||
@ -5422,6 +5429,7 @@ If webhook is set to have Event Grid message format then the payload will look a
|
|||||||
"coverage",
|
"coverage",
|
||||||
"crashes",
|
"crashes",
|
||||||
"inputs",
|
"inputs",
|
||||||
|
"crashdumps",
|
||||||
"no_repro",
|
"no_repro",
|
||||||
"readonly_inputs",
|
"readonly_inputs",
|
||||||
"reports",
|
"reports",
|
||||||
|
@ -106,6 +106,7 @@ public enum ContainerType {
|
|||||||
Analysis,
|
Analysis,
|
||||||
Coverage,
|
Coverage,
|
||||||
Crashes,
|
Crashes,
|
||||||
|
Crashdumps,
|
||||||
Inputs,
|
Inputs,
|
||||||
NoRepro,
|
NoRepro,
|
||||||
ReadonlyInputs,
|
ReadonlyInputs,
|
||||||
|
@ -1103,6 +1103,7 @@ public record TaskUnitConfig(
|
|||||||
public IContainerDef? Analysis { get; set; }
|
public IContainerDef? Analysis { get; set; }
|
||||||
public IContainerDef? Coverage { get; set; }
|
public IContainerDef? Coverage { get; set; }
|
||||||
public IContainerDef? Crashes { get; set; }
|
public IContainerDef? Crashes { get; set; }
|
||||||
|
public IContainerDef? Crashdumps { get; set; }
|
||||||
public IContainerDef? Inputs { get; set; }
|
public IContainerDef? Inputs { get; set; }
|
||||||
public IContainerDef? NoRepro { get; set; }
|
public IContainerDef? NoRepro { get; set; }
|
||||||
public IContainerDef? ReadonlyInputs { get; set; }
|
public IContainerDef? ReadonlyInputs { get; set; }
|
||||||
|
@ -118,6 +118,9 @@ public class Config : IConfig {
|
|||||||
case ContainerType.Crashes:
|
case ContainerType.Crashes:
|
||||||
config.Crashes = def;
|
config.Crashes = def;
|
||||||
break;
|
break;
|
||||||
|
case ContainerType.Crashdumps:
|
||||||
|
config.Crashdumps = def;
|
||||||
|
break;
|
||||||
case ContainerType.Inputs:
|
case ContainerType.Inputs:
|
||||||
config.Inputs = def;
|
config.Inputs = def;
|
||||||
break;
|
break;
|
||||||
|
@ -190,6 +190,12 @@ public static class Defs {
|
|||||||
Value: 1,
|
Value: 1,
|
||||||
Permissions: ContainerPermission.Write
|
Permissions: ContainerPermission.Write
|
||||||
),
|
),
|
||||||
|
new ContainerDefinition(
|
||||||
|
Type:ContainerType.Crashdumps,
|
||||||
|
Compare: Compare.Equal,
|
||||||
|
Value:1,
|
||||||
|
Permissions: ContainerPermission.Write
|
||||||
|
),
|
||||||
new ContainerDefinition(
|
new ContainerDefinition(
|
||||||
Type: ContainerType.Inputs,
|
Type: ContainerType.Inputs,
|
||||||
Compare: Compare.Equal,
|
Compare: Compare.Equal,
|
||||||
@ -279,6 +285,12 @@ public static class Defs {
|
|||||||
Value: 1,
|
Value: 1,
|
||||||
Permissions: ContainerPermission.Write
|
Permissions: ContainerPermission.Write
|
||||||
),
|
),
|
||||||
|
new ContainerDefinition(
|
||||||
|
Type:ContainerType.Crashdumps,
|
||||||
|
Compare: Compare.Equal,
|
||||||
|
Value:1,
|
||||||
|
Permissions: ContainerPermission.Write
|
||||||
|
),
|
||||||
new ContainerDefinition(
|
new ContainerDefinition(
|
||||||
Type: ContainerType.Inputs,
|
Type: ContainerType.Inputs,
|
||||||
Compare: Compare.Equal,
|
Compare: Compare.Equal,
|
||||||
|
@ -24,6 +24,7 @@ use crate::tasks::utils::parse_key_value;
|
|||||||
pub const SETUP_DIR: &str = "setup_dir";
|
pub const SETUP_DIR: &str = "setup_dir";
|
||||||
pub const INPUTS_DIR: &str = "inputs_dir";
|
pub const INPUTS_DIR: &str = "inputs_dir";
|
||||||
pub const CRASHES_DIR: &str = "crashes_dir";
|
pub const CRASHES_DIR: &str = "crashes_dir";
|
||||||
|
pub const CRASHDUMPS_DIR: &str = "crashdumps_dir";
|
||||||
pub const TARGET_WORKERS: &str = "target_workers";
|
pub const TARGET_WORKERS: &str = "target_workers";
|
||||||
pub const REPORTS_DIR: &str = "reports_dir";
|
pub const REPORTS_DIR: &str = "reports_dir";
|
||||||
pub const NO_REPRO_DIR: &str = "no_repro_dir";
|
pub const NO_REPRO_DIR: &str = "no_repro_dir";
|
||||||
|
@ -6,8 +6,8 @@ use std::path::PathBuf;
|
|||||||
use crate::{
|
use crate::{
|
||||||
local::common::{
|
local::common::{
|
||||||
build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType,
|
build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType,
|
||||||
SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CRASHES_DIR, INPUTS_DIR, TARGET_ENV,
|
SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CRASHDUMPS_DIR, CRASHES_DIR, INPUTS_DIR,
|
||||||
TARGET_EXE, TARGET_OPTIONS, TARGET_WORKERS,
|
TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_WORKERS,
|
||||||
},
|
},
|
||||||
tasks::{
|
tasks::{
|
||||||
config::CommonConfig,
|
config::CommonConfig,
|
||||||
@ -27,6 +27,8 @@ pub fn build_fuzz_config(
|
|||||||
) -> Result<Config> {
|
) -> Result<Config> {
|
||||||
let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)?
|
let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)?
|
||||||
.monitor_count(&event_sender)?;
|
.monitor_count(&event_sender)?;
|
||||||
|
let crashdumps = get_synced_dir(CRASHDUMPS_DIR, common.job_id, common.task_id, args)?
|
||||||
|
.monitor_count(&event_sender)?;
|
||||||
let inputs = get_synced_dir(INPUTS_DIR, common.job_id, common.task_id, args)?
|
let inputs = get_synced_dir(INPUTS_DIR, common.job_id, common.task_id, args)?
|
||||||
.monitor_count(&event_sender)?;
|
.monitor_count(&event_sender)?;
|
||||||
|
|
||||||
@ -49,6 +51,7 @@ pub fn build_fuzz_config(
|
|||||||
inputs,
|
inputs,
|
||||||
readonly_inputs,
|
readonly_inputs,
|
||||||
crashes,
|
crashes,
|
||||||
|
crashdumps,
|
||||||
target_exe,
|
target_exe,
|
||||||
target_env,
|
target_env,
|
||||||
target_options,
|
target_options,
|
||||||
@ -85,6 +88,10 @@ pub fn build_shared_args() -> Vec<Arg> {
|
|||||||
.long(CRASHES_DIR)
|
.long(CRASHES_DIR)
|
||||||
.required(true)
|
.required(true)
|
||||||
.value_parser(value_parser!(PathBuf)),
|
.value_parser(value_parser!(PathBuf)),
|
||||||
|
Arg::new(CRASHDUMPS_DIR)
|
||||||
|
.long(CRASHDUMPS_DIR)
|
||||||
|
.required(true)
|
||||||
|
.value_parser(value_parser!(PathBuf)),
|
||||||
Arg::new(TARGET_WORKERS)
|
Arg::new(TARGET_WORKERS)
|
||||||
.long(TARGET_WORKERS)
|
.long(TARGET_WORKERS)
|
||||||
.value_parser(value_parser!(u64)),
|
.value_parser(value_parser!(u64)),
|
||||||
|
@ -51,6 +51,7 @@ struct LibFuzzer {
|
|||||||
inputs: PathBuf,
|
inputs: PathBuf,
|
||||||
readonly_inputs: Vec<PathBuf>,
|
readonly_inputs: Vec<PathBuf>,
|
||||||
crashes: PathBuf,
|
crashes: PathBuf,
|
||||||
|
crashdumps: PathBuf,
|
||||||
target_exe: PathBuf,
|
target_exe: PathBuf,
|
||||||
target_env: HashMap<String, String>,
|
target_env: HashMap<String, String>,
|
||||||
target_options: Vec<String>,
|
target_options: Vec<String>,
|
||||||
@ -143,6 +144,7 @@ impl TaskConfig {
|
|||||||
inputs: context.to_monitored_sync_dir("inputs", &config.inputs)?,
|
inputs: context.to_monitored_sync_dir("inputs", &config.inputs)?,
|
||||||
readonly_inputs: Some(ri?),
|
readonly_inputs: Some(ri?),
|
||||||
crashes: context.to_monitored_sync_dir("crashes", &config.crashes)?,
|
crashes: context.to_monitored_sync_dir("crashes", &config.crashes)?,
|
||||||
|
crashdumps: context.to_monitored_sync_dir("crashdumps", &config.crashdumps)?,
|
||||||
target_exe: config.target_exe.clone(),
|
target_exe: config.target_exe.clone(),
|
||||||
target_env: config.target_env.clone(),
|
target_env: config.target_env.clone(),
|
||||||
target_options: config.target_options.clone(),
|
target_options: config.target_options.clone(),
|
||||||
|
@ -13,12 +13,14 @@ use onefuzz::{
|
|||||||
syncdir::{continuous_sync, SyncOperation::Pull, SyncedDir},
|
syncdir::{continuous_sync, SyncOperation::Pull, SyncedDir},
|
||||||
};
|
};
|
||||||
use onefuzz_telemetry::{
|
use onefuzz_telemetry::{
|
||||||
Event::{new_coverage, new_result, runtime_stats},
|
Event::{new_coverage, new_crashdump, new_result, runtime_stats},
|
||||||
EventData,
|
EventData,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
|
ffi::{OsStr, OsString},
|
||||||
|
fmt::Debug,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
@ -72,6 +74,7 @@ pub struct Config<L: LibFuzzerType + Send + Sync + ?Sized> {
|
|||||||
pub inputs: SyncedDir,
|
pub inputs: SyncedDir,
|
||||||
pub readonly_inputs: Option<Vec<SyncedDir>>,
|
pub readonly_inputs: Option<Vec<SyncedDir>>,
|
||||||
pub crashes: SyncedDir,
|
pub crashes: SyncedDir,
|
||||||
|
pub crashdumps: SyncedDir,
|
||||||
pub target_exe: PathBuf,
|
pub target_exe: PathBuf,
|
||||||
pub target_env: HashMap<String, String>,
|
pub target_env: HashMap<String, String>,
|
||||||
pub target_options: Vec<String>,
|
pub target_options: Vec<String>,
|
||||||
@ -96,6 +99,7 @@ pub struct Config<L: LibFuzzerType + Send + Sync + ?Sized> {
|
|||||||
pub struct LibFuzzerFuzzTask<L>
|
pub struct LibFuzzerFuzzTask<L>
|
||||||
where
|
where
|
||||||
L: LibFuzzerType,
|
L: LibFuzzerType,
|
||||||
|
Config<L>: Debug,
|
||||||
{
|
{
|
||||||
config: Config<L>,
|
config: Config<L>,
|
||||||
}
|
}
|
||||||
@ -103,6 +107,7 @@ where
|
|||||||
impl<L> LibFuzzerFuzzTask<L>
|
impl<L> LibFuzzerFuzzTask<L>
|
||||||
where
|
where
|
||||||
L: LibFuzzerType,
|
L: LibFuzzerType,
|
||||||
|
Config<L>: Debug,
|
||||||
{
|
{
|
||||||
pub fn new(config: Config<L>) -> Result<Self> {
|
pub fn new(config: Config<L>) -> Result<Self> {
|
||||||
Ok(Self { config })
|
Ok(Self { config })
|
||||||
@ -126,11 +131,19 @@ where
|
|||||||
let resync = self.continuous_sync_inputs();
|
let resync = self.continuous_sync_inputs();
|
||||||
let new_inputs = self.config.inputs.monitor_results(new_coverage, true);
|
let new_inputs = self.config.inputs.monitor_results(new_coverage, true);
|
||||||
let new_crashes = self.config.crashes.monitor_results(new_result, true);
|
let new_crashes = self.config.crashes.monitor_results(new_result, true);
|
||||||
|
let new_crashdumps = self.config.crashdumps.monitor_results(new_crashdump, true);
|
||||||
|
|
||||||
let (stats_sender, stats_receiver) = mpsc::unbounded_channel();
|
let (stats_sender, stats_receiver) = mpsc::unbounded_channel();
|
||||||
let report_stats = report_runtime_stats(stats_receiver, hb_client);
|
let report_stats = report_runtime_stats(stats_receiver, hb_client);
|
||||||
let fuzzers = self.run_fuzzers(Some(&stats_sender));
|
let fuzzers = self.run_fuzzers(Some(&stats_sender));
|
||||||
futures::try_join!(resync, new_inputs, new_crashes, fuzzers, report_stats)?;
|
futures::try_join!(
|
||||||
|
resync,
|
||||||
|
new_inputs,
|
||||||
|
new_crashes,
|
||||||
|
new_crashdumps,
|
||||||
|
fuzzers,
|
||||||
|
report_stats
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -235,8 +248,16 @@ where
|
|||||||
.for_each(|d| inputs.push(&d.local_path));
|
.for_each(|d| inputs.push(&d.local_path));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info!("config is: {:?}", self.config);
|
||||||
|
|
||||||
let fuzzer = L::from_config(&self.config).await?;
|
let fuzzer = L::from_config(&self.config).await?;
|
||||||
let mut running = fuzzer.fuzz(crash_dir.path(), local_inputs, &inputs).await?;
|
let mut running = fuzzer.fuzz(crash_dir.path(), local_inputs, &inputs).await?;
|
||||||
|
|
||||||
|
info!("child is: {:?}", running);
|
||||||
|
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
let pid = running.id();
|
||||||
|
|
||||||
let notify = Arc::new(Notify::new());
|
let notify = Arc::new(Notify::new());
|
||||||
|
|
||||||
// Splitting borrow.
|
// Splitting borrow.
|
||||||
@ -247,28 +268,44 @@ where
|
|||||||
let mut stderr = BufReader::new(stderr);
|
let mut stderr = BufReader::new(stderr);
|
||||||
|
|
||||||
let mut libfuzzer_output: ArrayDeque<_, LOGS_BUFFER_SIZE, Wrapping> = ArrayDeque::new();
|
let mut libfuzzer_output: ArrayDeque<_, LOGS_BUFFER_SIZE, Wrapping> = ArrayDeque::new();
|
||||||
loop {
|
{
|
||||||
let mut buf = vec![];
|
let mut buf = vec![];
|
||||||
|
loop {
|
||||||
|
buf.clear();
|
||||||
let bytes_read = stderr.read_until(b'\n', &mut buf).await?;
|
let bytes_read = stderr.read_until(b'\n', &mut buf).await?;
|
||||||
if bytes_read == 0 && buf.is_empty() {
|
if bytes_read == 0 && buf.is_empty() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let line = String::from_utf8_lossy(&buf).to_string();
|
let line = String::from_utf8_lossy(&buf).to_string();
|
||||||
if let Some(stats_sender) = stats_sender {
|
if let Some(stats_sender) = stats_sender {
|
||||||
if let Err(err) = try_report_iter_update(stats_sender, worker_id, run_id, &line) {
|
if let Err(err) = try_report_iter_update(stats_sender, worker_id, run_id, &line)
|
||||||
|
{
|
||||||
error!("could not parse fuzzing interation update: {}", err);
|
error!("could not parse fuzzing interation update: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
libfuzzer_output.push_back(line);
|
libfuzzer_output.push_back(line);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let exit_status = running.wait().await;
|
let exit_status = running.wait().await;
|
||||||
notify.notify_one();
|
notify.notify_one();
|
||||||
|
|
||||||
let exit_status: ExitStatus = exit_status?.into();
|
let exit_status: ExitStatus = exit_status?.into();
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"fuzzer exited, here are the last {} lines of stderr:",
|
||||||
|
libfuzzer_output.len()
|
||||||
|
);
|
||||||
|
info!("------------------------");
|
||||||
|
for line in libfuzzer_output.iter() {
|
||||||
|
info!("{}", line.trim_end_matches('\n'));
|
||||||
|
}
|
||||||
|
info!("------------------------");
|
||||||
|
|
||||||
let files = list_files(crash_dir.path()).await?;
|
let files = list_files(crash_dir.path()).await?;
|
||||||
|
|
||||||
|
info!("found {} crashes", files.len());
|
||||||
|
|
||||||
// If the target exits, crashes are required unless
|
// If the target exits, crashes are required unless
|
||||||
// 1. Exited cleanly (happens with -runs=N)
|
// 1. Exited cleanly (happens with -runs=N)
|
||||||
// 2. expect_crash_on_failure is disabled
|
// 2. expect_crash_on_failure is disabled
|
||||||
@ -291,10 +328,21 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for file in &files {
|
// name the dumpfile after the crash file (if one)
|
||||||
|
// otherwise don't rename it
|
||||||
|
let dump_file_name: Option<OsString> = if files.len() == 1 {
|
||||||
|
files
|
||||||
|
.first()
|
||||||
|
.and_then(|f| f.file_name().map(OsStr::to_os_string))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// move crashing inputs to output directory
|
||||||
|
for file in files {
|
||||||
if let Some(filename) = file.file_name() {
|
if let Some(filename) = file.file_name() {
|
||||||
let dest = self.config.crashes.local_path.join(filename);
|
let dest = self.config.crashes.local_path.join(filename);
|
||||||
if let Err(e) = tokio::fs::rename(file.clone(), dest.clone()).await {
|
if let Err(e) = tokio::fs::rename(file, dest.clone()).await {
|
||||||
if !dest.exists() {
|
if !dest.exists() {
|
||||||
bail!(e)
|
bail!(e)
|
||||||
}
|
}
|
||||||
@ -302,17 +350,83 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check for core dumps on Linux:
|
||||||
|
// note that collecting the dumps must be enabled by the template
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
if let Some(pid) = pid {
|
||||||
|
// expect crash dump to exist in CWD
|
||||||
|
let filename = format!("core.{pid}");
|
||||||
|
let dest_filename = dump_file_name.as_deref().unwrap_or(OsStr::new(&filename));
|
||||||
|
let dest_path = self.config.crashdumps.local_path.join(dest_filename);
|
||||||
|
match tokio::fs::rename(&filename, &dest_path).await {
|
||||||
|
Ok(()) => {
|
||||||
|
info!(
|
||||||
|
"moved crash dump {} to output directory: {}",
|
||||||
|
filename,
|
||||||
|
dest_path.display()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
if e.kind() == std::io::ErrorKind::NotFound {
|
||||||
|
// okay, no crash dump found
|
||||||
|
info!("no crash dump found with name: {}", filename);
|
||||||
|
} else {
|
||||||
|
return Err(e).context("moving crash dump to output directory");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("no PID found for libfuzzer process");
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for crash dumps on Windows:
|
||||||
|
#[cfg(target_os = "windows")]
|
||||||
|
{
|
||||||
|
let dumpfile_extension = Some(std::ffi::OsStr::new("dmp"));
|
||||||
|
|
||||||
|
let mut working_dir = tokio::fs::read_dir(".").await?;
|
||||||
|
let mut found_dump = false;
|
||||||
|
while let Some(next) = working_dir.next_entry().await? {
|
||||||
|
if next.path().extension() == dumpfile_extension {
|
||||||
|
// Windows dumps get a fixed filename so we will generate a random one,
|
||||||
|
// if there's no valid target crash name:
|
||||||
|
let dest_filename =
|
||||||
|
dump_file_name.unwrap_or_else(|| uuid::Uuid::new_v4().to_string().into());
|
||||||
|
let dest_path = self.config.crashdumps.local_path.join(&dest_filename);
|
||||||
|
tokio::fs::rename(next.path(), &dest_path)
|
||||||
|
.await
|
||||||
|
.context("moving crash dump to output directory")?;
|
||||||
|
info!(
|
||||||
|
"moved crash dump {} to output directory: {}",
|
||||||
|
next.path().display(),
|
||||||
|
dest_path.display()
|
||||||
|
);
|
||||||
|
found_dump = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found_dump {
|
||||||
|
info!("no crash dump found with extension .dmp");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn init_directories(&self) -> Result<()> {
|
async fn init_directories(&self) -> Result<()> {
|
||||||
|
// input directories (init_pull):
|
||||||
self.config.inputs.init_pull().await?;
|
self.config.inputs.init_pull().await?;
|
||||||
self.config.crashes.init().await?;
|
|
||||||
if let Some(readonly_inputs) = &self.config.readonly_inputs {
|
if let Some(readonly_inputs) = &self.config.readonly_inputs {
|
||||||
for dir in readonly_inputs {
|
for dir in readonly_inputs {
|
||||||
dir.init_pull().await?;
|
dir.init_pull().await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// output directories (init):
|
||||||
|
self.config.crashes.init().await?;
|
||||||
|
self.config.crashdumps.init().await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ use onefuzz::{
|
|||||||
SyncedDir,
|
SyncedDir,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use onefuzz_telemetry::Event::{new_coverage, new_result};
|
use onefuzz_telemetry::Event::{new_coverage, new_crashdump, new_result};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
@ -41,6 +41,7 @@ use futures::TryFutureExt;
|
|||||||
pub struct SupervisorConfig {
|
pub struct SupervisorConfig {
|
||||||
pub inputs: SyncedDir,
|
pub inputs: SyncedDir,
|
||||||
pub crashes: SyncedDir,
|
pub crashes: SyncedDir,
|
||||||
|
pub crashdumps: SyncedDir,
|
||||||
pub supervisor_exe: String,
|
pub supervisor_exe: String,
|
||||||
pub supervisor_env: HashMap<String, String>,
|
pub supervisor_env: HashMap<String, String>,
|
||||||
pub supervisor_options: Vec<String>,
|
pub supervisor_options: Vec<String>,
|
||||||
@ -80,6 +81,14 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
|
|||||||
crashes.init().await?;
|
crashes.init().await?;
|
||||||
let monitor_crashes = crashes.monitor_results(new_result, false);
|
let monitor_crashes = crashes.monitor_results(new_result, false);
|
||||||
|
|
||||||
|
// setup crashdumps
|
||||||
|
let crashdumps = SyncedDir {
|
||||||
|
local_path: runtime_dir.path().join("crashdumps"),
|
||||||
|
remote_path: config.crashdumps.remote_path.clone(),
|
||||||
|
};
|
||||||
|
crashdumps.init().await?;
|
||||||
|
let monitor_crashdumps = crashdumps.monitor_results(new_crashdump, false);
|
||||||
|
|
||||||
// setup coverage
|
// setup coverage
|
||||||
if let Some(coverage) = &config.coverage {
|
if let Some(coverage) = &config.coverage {
|
||||||
coverage.init_pull().await?;
|
coverage.init_pull().await?;
|
||||||
@ -139,6 +148,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
|
|||||||
&runtime_dir.path(),
|
&runtime_dir.path(),
|
||||||
&config,
|
&config,
|
||||||
&crashes,
|
&crashes,
|
||||||
|
&crashdumps,
|
||||||
&inputs,
|
&inputs,
|
||||||
reports_dir.path().to_path_buf(),
|
reports_dir.path().to_path_buf(),
|
||||||
)
|
)
|
||||||
@ -170,6 +180,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
|
|||||||
monitor_supervisor.map_err(|e| e.context("Failure in monitor_supervisor")),
|
monitor_supervisor.map_err(|e| e.context("Failure in monitor_supervisor")),
|
||||||
monitor_stats.map_err(|e| e.context("Failure in monitor_stats")),
|
monitor_stats.map_err(|e| e.context("Failure in monitor_stats")),
|
||||||
monitor_crashes.map_err(|e| e.context("Failure in monitor_crashes")),
|
monitor_crashes.map_err(|e| e.context("Failure in monitor_crashes")),
|
||||||
|
monitor_crashdumps.map_err(|e| e.context("Failure in monitor_crashdumps")),
|
||||||
monitor_inputs.map_err(|e| e.context("Failure in monitor_inputs")),
|
monitor_inputs.map_err(|e| e.context("Failure in monitor_inputs")),
|
||||||
inputs_sync_task.map_err(|e| e.context("Failure in continuous_sync_task")),
|
inputs_sync_task.map_err(|e| e.context("Failure in continuous_sync_task")),
|
||||||
monitor_reports_future.map_err(|e| e.context("Failure in monitor_reports_future")),
|
monitor_reports_future.map_err(|e| e.context("Failure in monitor_reports_future")),
|
||||||
@ -206,6 +217,7 @@ async fn start_supervisor(
|
|||||||
runtime_dir: impl AsRef<Path>,
|
runtime_dir: impl AsRef<Path>,
|
||||||
config: &SupervisorConfig,
|
config: &SupervisorConfig,
|
||||||
crashes: &SyncedDir,
|
crashes: &SyncedDir,
|
||||||
|
crashdumps: &SyncedDir,
|
||||||
inputs: &SyncedDir,
|
inputs: &SyncedDir,
|
||||||
reports_dir: PathBuf,
|
reports_dir: PathBuf,
|
||||||
) -> Result<Child> {
|
) -> Result<Child> {
|
||||||
@ -221,6 +233,7 @@ async fn start_supervisor(
|
|||||||
.supervisor_options(&config.supervisor_options)
|
.supervisor_options(&config.supervisor_options)
|
||||||
.runtime_dir(&runtime_dir)
|
.runtime_dir(&runtime_dir)
|
||||||
.crashes(&crashes.local_path)
|
.crashes(&crashes.local_path)
|
||||||
|
.crashdumps(&crashdumps.local_path)
|
||||||
.input_corpus(&inputs.local_path)
|
.input_corpus(&inputs.local_path)
|
||||||
.reports_dir(reports_dir)
|
.reports_dir(reports_dir)
|
||||||
.setup_dir(&config.common.setup_dir)
|
.setup_dir(&config.common.setup_dir)
|
||||||
@ -245,15 +258,15 @@ async fn start_supervisor(
|
|||||||
.set_optional_ref(&config.target_options, |expand, target_options| {
|
.set_optional_ref(&config.target_options, |expand, target_options| {
|
||||||
expand.target_options(target_options)
|
expand.target_options(target_options)
|
||||||
})
|
})
|
||||||
.set_optional_ref(&config.common.microsoft_telemetry_key, |tester, key| {
|
.set_optional_ref(&config.common.microsoft_telemetry_key, |expand, key| {
|
||||||
tester.microsoft_telemetry_key(key)
|
expand.microsoft_telemetry_key(key)
|
||||||
})
|
})
|
||||||
.set_optional_ref(&config.common.instance_telemetry_key, |tester, key| {
|
.set_optional_ref(&config.common.instance_telemetry_key, |expand, key| {
|
||||||
tester.instance_telemetry_key(key)
|
expand.instance_telemetry_key(key)
|
||||||
})
|
})
|
||||||
.set_optional_ref(
|
.set_optional_ref(
|
||||||
&config.crashes.remote_path.clone().and_then(|u| u.account()),
|
&config.crashes.remote_path.clone().and_then(|u| u.account()),
|
||||||
|tester, account| tester.crashes_account(account),
|
|expand, account| expand.crashes_account(account),
|
||||||
)
|
)
|
||||||
.set_optional_ref(
|
.set_optional_ref(
|
||||||
&config
|
&config
|
||||||
@ -261,7 +274,7 @@ async fn start_supervisor(
|
|||||||
.remote_path
|
.remote_path
|
||||||
.clone()
|
.clone()
|
||||||
.and_then(|u| u.container()),
|
.and_then(|u| u.container()),
|
||||||
|tester, container| tester.crashes_container(container),
|
|expand, container| expand.crashes_container(container),
|
||||||
);
|
);
|
||||||
|
|
||||||
let supervisor_path = expand.evaluate_value(&config.supervisor_exe)?;
|
let supervisor_path = expand.evaluate_value(&config.supervisor_exe)?;
|
||||||
@ -340,7 +353,6 @@ mod tests {
|
|||||||
|
|
||||||
let fault_dir_temp = tempfile::tempdir().unwrap();
|
let fault_dir_temp = tempfile::tempdir().unwrap();
|
||||||
let crashes_local = tempfile::tempdir().unwrap().path().into();
|
let crashes_local = tempfile::tempdir().unwrap().path().into();
|
||||||
let corpus_dir_local = tempfile::tempdir().unwrap().path().into();
|
|
||||||
let crashes = SyncedDir {
|
let crashes = SyncedDir {
|
||||||
local_path: crashes_local,
|
local_path: crashes_local,
|
||||||
remote_path: Some(
|
remote_path: Some(
|
||||||
@ -348,6 +360,17 @@ mod tests {
|
|||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let crashdumps_dir_temp = tempfile::tempdir().unwrap();
|
||||||
|
let crashdumps_local = tempfile::tempdir().unwrap().path().into();
|
||||||
|
let crashdumps = SyncedDir {
|
||||||
|
local_path: crashdumps_local,
|
||||||
|
remote_path: Some(
|
||||||
|
BlobContainerUrl::parse(Url::from_directory_path(crashdumps_dir_temp).unwrap())
|
||||||
|
.unwrap(),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let corpus_dir_local = tempfile::tempdir().unwrap().path().into();
|
||||||
let corpus_dir_temp = tempfile::tempdir().unwrap();
|
let corpus_dir_temp = tempfile::tempdir().unwrap();
|
||||||
let corpus_dir = SyncedDir {
|
let corpus_dir = SyncedDir {
|
||||||
local_path: corpus_dir_local,
|
local_path: corpus_dir_local,
|
||||||
@ -387,6 +410,7 @@ mod tests {
|
|||||||
target_options,
|
target_options,
|
||||||
inputs: corpus_dir.clone(),
|
inputs: corpus_dir.clone(),
|
||||||
crashes: crashes.clone(),
|
crashes: crashes.clone(),
|
||||||
|
crashdumps: crashdumps.clone(),
|
||||||
tools: None,
|
tools: None,
|
||||||
wait_for_files: None,
|
wait_for_files: None,
|
||||||
stats_file: None,
|
stats_file: None,
|
||||||
@ -419,7 +443,14 @@ mod tests {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let process = start_supervisor(runtime_dir, &config, &crashes, &corpus_dir, reports_dir)
|
let process = start_supervisor(
|
||||||
|
runtime_dir,
|
||||||
|
&config,
|
||||||
|
&crashes,
|
||||||
|
&crashdumps,
|
||||||
|
&corpus_dir,
|
||||||
|
reports_dir,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
@ -77,6 +77,7 @@ pub enum Event {
|
|||||||
coverage_data,
|
coverage_data,
|
||||||
coverage_failed,
|
coverage_failed,
|
||||||
new_result,
|
new_result,
|
||||||
|
new_crashdump,
|
||||||
new_coverage,
|
new_coverage,
|
||||||
runtime_stats,
|
runtime_stats,
|
||||||
new_report,
|
new_report,
|
||||||
@ -93,6 +94,7 @@ impl Event {
|
|||||||
Self::coverage_data => "coverage_data",
|
Self::coverage_data => "coverage_data",
|
||||||
Self::coverage_failed => "coverage_failed",
|
Self::coverage_failed => "coverage_failed",
|
||||||
Self::new_coverage => "new_coverage",
|
Self::new_coverage => "new_coverage",
|
||||||
|
Self::new_crashdump => "new_crashdump",
|
||||||
Self::new_result => "new_result",
|
Self::new_result => "new_result",
|
||||||
Self::runtime_stats => "runtime_stats",
|
Self::runtime_stats => "runtime_stats",
|
||||||
Self::new_report => "new_report",
|
Self::new_report => "new_report",
|
||||||
|
@ -24,6 +24,7 @@ type MappingFn<'a> = Box<dyn Fn(&Expand<'a>) -> Result<ExpandedValue<'a>> + Send
|
|||||||
pub enum PlaceHolder {
|
pub enum PlaceHolder {
|
||||||
Input,
|
Input,
|
||||||
Crashes,
|
Crashes,
|
||||||
|
Crashdumps,
|
||||||
InputCorpus,
|
InputCorpus,
|
||||||
GeneratedInputs,
|
GeneratedInputs,
|
||||||
TargetExe,
|
TargetExe,
|
||||||
@ -59,6 +60,7 @@ impl PlaceHolder {
|
|||||||
match self {
|
match self {
|
||||||
Self::Input => "{input}",
|
Self::Input => "{input}",
|
||||||
Self::Crashes => "{crashes}",
|
Self::Crashes => "{crashes}",
|
||||||
|
Self::Crashdumps => "{crashdumps}",
|
||||||
Self::InputCorpus => "{input_corpus}",
|
Self::InputCorpus => "{input_corpus}",
|
||||||
Self::GeneratedInputs => "{generated_inputs}",
|
Self::GeneratedInputs => "{generated_inputs}",
|
||||||
Self::TargetExe => "{target_exe}",
|
Self::TargetExe => "{target_exe}",
|
||||||
@ -234,6 +236,12 @@ impl<'a> Expand<'a> {
|
|||||||
self.set_value(PlaceHolder::Crashes, ExpandedValue::Path(path))
|
self.set_value(PlaceHolder::Crashes, ExpandedValue::Path(path))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn crashdumps(self, arg: impl AsRef<Path>) -> Self {
|
||||||
|
let arg = arg.as_ref();
|
||||||
|
let path = String::from(arg.to_string_lossy());
|
||||||
|
self.set_value(PlaceHolder::Crashdumps, ExpandedValue::Path(path))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn input_path(self, arg: impl AsRef<Path>) -> Self {
|
pub fn input_path(self, arg: impl AsRef<Path>) -> Self {
|
||||||
let arg = arg.as_ref();
|
let arg = arg.as_ref();
|
||||||
let path = String::from(arg.to_string_lossy());
|
let path = String::from(arg.to_string_lossy());
|
||||||
|
@ -1087,11 +1087,10 @@ class Tasks(Endpoint):
|
|||||||
if tags is None:
|
if tags is None:
|
||||||
tags = {}
|
tags = {}
|
||||||
|
|
||||||
containers_submit = []
|
containers_submit = [
|
||||||
for container_type, container in containers:
|
|
||||||
containers_submit.append(
|
|
||||||
models.TaskContainers(name=container, type=container_type)
|
models.TaskContainers(name=container, type=container_type)
|
||||||
)
|
for container_type, container in containers
|
||||||
|
]
|
||||||
|
|
||||||
config = models.TaskConfig(
|
config = models.TaskConfig(
|
||||||
containers=containers_submit,
|
containers=containers_submit,
|
||||||
@ -1210,6 +1209,7 @@ class JobContainers(Endpoint):
|
|||||||
) -> None:
|
) -> None:
|
||||||
SAFE_TO_REMOVE = [
|
SAFE_TO_REMOVE = [
|
||||||
enums.ContainerType.crashes,
|
enums.ContainerType.crashes,
|
||||||
|
enums.ContainerType.crashdumps,
|
||||||
enums.ContainerType.setup,
|
enums.ContainerType.setup,
|
||||||
enums.ContainerType.inputs,
|
enums.ContainerType.inputs,
|
||||||
enums.ContainerType.reports,
|
enums.ContainerType.reports,
|
||||||
|
@ -45,6 +45,7 @@ class AFL(Command):
|
|||||||
supervisor_env: Optional[Dict[str, str]] = None,
|
supervisor_env: Optional[Dict[str, str]] = None,
|
||||||
supervisor_input_marker: str = "@@",
|
supervisor_input_marker: str = "@@",
|
||||||
tags: Optional[Dict[str, str]] = None,
|
tags: Optional[Dict[str, str]] = None,
|
||||||
|
target_env: Optional[Dict[str, str]] = None,
|
||||||
wait_for_running: bool = False,
|
wait_for_running: bool = False,
|
||||||
wait_for_files: Optional[List[ContainerType]] = None,
|
wait_for_files: Optional[List[ContainerType]] = None,
|
||||||
afl_container: Optional[Container] = None,
|
afl_container: Optional[Container] = None,
|
||||||
@ -162,6 +163,7 @@ class AFL(Command):
|
|||||||
stats_format=StatsFormat.AFL,
|
stats_format=StatsFormat.AFL,
|
||||||
task_wait_for_files=ContainerType.inputs,
|
task_wait_for_files=ContainerType.inputs,
|
||||||
tags=helper.tags,
|
tags=helper.tags,
|
||||||
|
target_env=target_env,
|
||||||
debug=debug,
|
debug=debug,
|
||||||
ensemble_sync_delay=ensemble_sync_delay,
|
ensemble_sync_delay=ensemble_sync_delay,
|
||||||
)
|
)
|
||||||
|
@ -132,6 +132,7 @@ class Libfuzzer(Command):
|
|||||||
fuzzer_containers = [
|
fuzzer_containers = [
|
||||||
(ContainerType.setup, containers[ContainerType.setup]),
|
(ContainerType.setup, containers[ContainerType.setup]),
|
||||||
(ContainerType.crashes, containers[ContainerType.crashes]),
|
(ContainerType.crashes, containers[ContainerType.crashes]),
|
||||||
|
(ContainerType.crashdumps, containers[ContainerType.crashdumps]),
|
||||||
(ContainerType.inputs, containers[ContainerType.inputs]),
|
(ContainerType.inputs, containers[ContainerType.inputs]),
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -416,6 +417,7 @@ class Libfuzzer(Command):
|
|||||||
ContainerType.setup,
|
ContainerType.setup,
|
||||||
ContainerType.inputs,
|
ContainerType.inputs,
|
||||||
ContainerType.crashes,
|
ContainerType.crashes,
|
||||||
|
ContainerType.crashdumps,
|
||||||
ContainerType.reports,
|
ContainerType.reports,
|
||||||
ContainerType.unique_reports,
|
ContainerType.unique_reports,
|
||||||
ContainerType.unique_inputs,
|
ContainerType.unique_inputs,
|
||||||
@ -726,6 +728,7 @@ class Libfuzzer(Command):
|
|||||||
ContainerType.setup,
|
ContainerType.setup,
|
||||||
ContainerType.inputs,
|
ContainerType.inputs,
|
||||||
ContainerType.crashes,
|
ContainerType.crashes,
|
||||||
|
ContainerType.crashdumps,
|
||||||
ContainerType.coverage,
|
ContainerType.coverage,
|
||||||
ContainerType.reports,
|
ContainerType.reports,
|
||||||
ContainerType.unique_reports,
|
ContainerType.unique_reports,
|
||||||
@ -753,6 +756,7 @@ class Libfuzzer(Command):
|
|||||||
fuzzer_containers = [
|
fuzzer_containers = [
|
||||||
(ContainerType.setup, containers[ContainerType.setup]),
|
(ContainerType.setup, containers[ContainerType.setup]),
|
||||||
(ContainerType.crashes, containers[ContainerType.crashes]),
|
(ContainerType.crashes, containers[ContainerType.crashes]),
|
||||||
|
(ContainerType.crashdumps, containers[ContainerType.crashdumps]),
|
||||||
(ContainerType.inputs, containers[ContainerType.inputs]),
|
(ContainerType.inputs, containers[ContainerType.inputs]),
|
||||||
(ContainerType.tools, fuzzer_tools_container),
|
(ContainerType.tools, fuzzer_tools_container),
|
||||||
]
|
]
|
||||||
@ -960,6 +964,7 @@ class Libfuzzer(Command):
|
|||||||
ContainerType.setup,
|
ContainerType.setup,
|
||||||
ContainerType.inputs,
|
ContainerType.inputs,
|
||||||
ContainerType.crashes,
|
ContainerType.crashes,
|
||||||
|
ContainerType.crashdumps,
|
||||||
ContainerType.reports,
|
ContainerType.reports,
|
||||||
ContainerType.unique_reports,
|
ContainerType.unique_reports,
|
||||||
ContainerType.no_repro,
|
ContainerType.no_repro,
|
||||||
@ -978,6 +983,7 @@ class Libfuzzer(Command):
|
|||||||
fuzzer_containers = [
|
fuzzer_containers = [
|
||||||
(ContainerType.setup, helper.containers[ContainerType.setup]),
|
(ContainerType.setup, helper.containers[ContainerType.setup]),
|
||||||
(ContainerType.crashes, helper.containers[ContainerType.crashes]),
|
(ContainerType.crashes, helper.containers[ContainerType.crashes]),
|
||||||
|
(ContainerType.crashdumps, helper.containers[ContainerType.crashdumps]),
|
||||||
(ContainerType.inputs, helper.containers[ContainerType.inputs]),
|
(ContainerType.inputs, helper.containers[ContainerType.inputs]),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -87,6 +87,7 @@ class Integration(BaseModel):
|
|||||||
target_class: Optional[str]
|
target_class: Optional[str]
|
||||||
target_method: Optional[str]
|
target_method: Optional[str]
|
||||||
setup_dir: Optional[str]
|
setup_dir: Optional[str]
|
||||||
|
target_env: Optional[Dict[str, str]]
|
||||||
|
|
||||||
|
|
||||||
TARGETS: Dict[str, Integration] = {
|
TARGETS: Dict[str, Integration] = {
|
||||||
@ -106,12 +107,21 @@ TARGETS: Dict[str, Integration] = {
|
|||||||
ContainerType.unique_reports: 1,
|
ContainerType.unique_reports: 1,
|
||||||
ContainerType.coverage: 1,
|
ContainerType.coverage: 1,
|
||||||
ContainerType.inputs: 2,
|
ContainerType.inputs: 2,
|
||||||
|
# TODO: crashdumps are intermittently not captured
|
||||||
|
# during integration tests on Linux. This requires more
|
||||||
|
# investigation before we can fully enable this test.
|
||||||
|
# ContainerType.crashdumps: 1,
|
||||||
ContainerType.extra_output: 1,
|
ContainerType.extra_output: 1,
|
||||||
},
|
},
|
||||||
reboot_after_setup=True,
|
reboot_after_setup=True,
|
||||||
inject_fake_regression=True,
|
inject_fake_regression=True,
|
||||||
|
target_env={
|
||||||
|
# same TODO
|
||||||
|
# "ASAN_OPTIONS": "disable_coredump=0:abort_on_error=1:unmap_shadow_on_exit=1"
|
||||||
|
},
|
||||||
fuzzing_target_options=[
|
fuzzing_target_options=[
|
||||||
"--test:{extra_setup_dir}",
|
"--test:{extra_setup_dir}",
|
||||||
|
"--only_asan_failures",
|
||||||
"--write_test_file={extra_output_dir}/test.txt",
|
"--write_test_file={extra_output_dir}/test.txt",
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
@ -213,11 +223,15 @@ TARGETS: Dict[str, Integration] = {
|
|||||||
ContainerType.inputs: 2,
|
ContainerType.inputs: 2,
|
||||||
ContainerType.unique_reports: 1,
|
ContainerType.unique_reports: 1,
|
||||||
ContainerType.coverage: 1,
|
ContainerType.coverage: 1,
|
||||||
|
ContainerType.crashdumps: 1,
|
||||||
ContainerType.extra_output: 1,
|
ContainerType.extra_output: 1,
|
||||||
},
|
},
|
||||||
inject_fake_regression=True,
|
inject_fake_regression=True,
|
||||||
|
target_env={"ASAN_SAVE_DUMPS": "my_dump.dmp"},
|
||||||
|
# we should set unmap_shadow_on_exit=1 but it fails on Windows at the moment
|
||||||
fuzzing_target_options=[
|
fuzzing_target_options=[
|
||||||
"--test:{extra_setup_dir}",
|
"--test:{extra_setup_dir}",
|
||||||
|
"--only_asan_failures",
|
||||||
"--write_test_file={extra_output_dir}/test.txt",
|
"--write_test_file={extra_output_dir}/test.txt",
|
||||||
],
|
],
|
||||||
),
|
),
|
||||||
@ -631,6 +645,7 @@ class TestOnefuzz:
|
|||||||
fuzzing_target_options=config.fuzzing_target_options,
|
fuzzing_target_options=config.fuzzing_target_options,
|
||||||
extra_setup_container=Container(extra_setup_container.name),
|
extra_setup_container=Container(extra_setup_container.name),
|
||||||
extra_output_container=Container(extra_output_container.name),
|
extra_output_container=Container(extra_output_container.name),
|
||||||
|
target_env=config.target_env,
|
||||||
)
|
)
|
||||||
elif config.template == TemplateType.libfuzzer_dotnet:
|
elif config.template == TemplateType.libfuzzer_dotnet:
|
||||||
if setup is None:
|
if setup is None:
|
||||||
@ -653,6 +668,7 @@ class TestOnefuzz:
|
|||||||
fuzzing_target_options=config.target_options,
|
fuzzing_target_options=config.target_options,
|
||||||
target_class=config.target_class,
|
target_class=config.target_class,
|
||||||
target_method=config.target_method,
|
target_method=config.target_method,
|
||||||
|
target_env=config.target_env,
|
||||||
)
|
)
|
||||||
elif config.template == TemplateType.libfuzzer_qemu_user:
|
elif config.template == TemplateType.libfuzzer_qemu_user:
|
||||||
return self.of.template.libfuzzer.qemu_user(
|
return self.of.template.libfuzzer.qemu_user(
|
||||||
@ -665,6 +681,7 @@ class TestOnefuzz:
|
|||||||
duration=duration,
|
duration=duration,
|
||||||
vm_count=1,
|
vm_count=1,
|
||||||
target_options=config.target_options,
|
target_options=config.target_options,
|
||||||
|
target_env=config.target_env,
|
||||||
)
|
)
|
||||||
elif config.template == TemplateType.radamsa:
|
elif config.template == TemplateType.radamsa:
|
||||||
return self.of.template.radamsa.basic(
|
return self.of.template.radamsa.basic(
|
||||||
@ -679,6 +696,7 @@ class TestOnefuzz:
|
|||||||
disable_check_debugger=config.disable_check_debugger or False,
|
disable_check_debugger=config.disable_check_debugger or False,
|
||||||
duration=duration,
|
duration=duration,
|
||||||
vm_count=1,
|
vm_count=1,
|
||||||
|
target_env=config.target_env,
|
||||||
)
|
)
|
||||||
elif config.template == TemplateType.afl:
|
elif config.template == TemplateType.afl:
|
||||||
return self.of.template.afl.basic(
|
return self.of.template.afl.basic(
|
||||||
@ -692,6 +710,7 @@ class TestOnefuzz:
|
|||||||
duration=duration,
|
duration=duration,
|
||||||
vm_count=1,
|
vm_count=1,
|
||||||
target_options=config.target_options,
|
target_options=config.target_options,
|
||||||
|
target_env=config.target_env,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
@ -798,14 +817,18 @@ class TestOnefuzz:
|
|||||||
return (True, "timed out while checking jobs", False)
|
return (True, "timed out while checking jobs", False)
|
||||||
|
|
||||||
for job_id in check_containers:
|
for job_id in check_containers:
|
||||||
|
job_name = jobs[job_id].config.name
|
||||||
finished_containers: Set[Container] = set()
|
finished_containers: Set[Container] = set()
|
||||||
for container_name, container_impl in check_containers[job_id].items():
|
for container_name, container_impl in check_containers[job_id].items():
|
||||||
container_client, count = container_impl
|
container_client, required_count = container_impl
|
||||||
if len(container_client.list_blobs()) >= count:
|
found_count = len(container_client.list_blobs())
|
||||||
|
if found_count >= required_count:
|
||||||
clear()
|
clear()
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
"found files for %s - %s",
|
"found %d files (needed %d) for %s - %s",
|
||||||
jobs[job_id].config.name,
|
found_count,
|
||||||
|
required_count,
|
||||||
|
job_name,
|
||||||
container_name,
|
container_name,
|
||||||
)
|
)
|
||||||
finished_containers.add(container_name)
|
finished_containers.add(container_name)
|
||||||
@ -813,6 +836,12 @@ class TestOnefuzz:
|
|||||||
for container_name in finished_containers:
|
for container_name in finished_containers:
|
||||||
del check_containers[job_id][container_name]
|
del check_containers[job_id][container_name]
|
||||||
|
|
||||||
|
to_check = check_containers[job_id].keys()
|
||||||
|
if len(to_check) > 0:
|
||||||
|
self.logger.info(
|
||||||
|
"%s - still waiting for %s", job_name, ", ".join(to_check)
|
||||||
|
)
|
||||||
|
|
||||||
scalesets = self.of.scalesets.list()
|
scalesets = self.of.scalesets.list()
|
||||||
for job_id in job_tasks:
|
for job_id in job_tasks:
|
||||||
finished_tasks: Set[UUID] = set()
|
finished_tasks: Set[UUID] = set()
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
CC=clang
|
ifeq ($(OS),Windows_NT)
|
||||||
|
CFLAGS=/O2 /Zi /fsanitize=fuzzer /fsanitize=address
|
||||||
|
CC=cl
|
||||||
|
else
|
||||||
CFLAGS=-g3 -fsanitize=fuzzer -fsanitize=address
|
CFLAGS=-g3 -fsanitize=fuzzer -fsanitize=address
|
||||||
|
CC=clang
|
||||||
|
endif
|
||||||
|
|
||||||
all: fuzz.exe
|
all: fuzz.exe
|
||||||
|
|
||||||
|
@ -7,88 +7,146 @@
|
|||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
// allow an argument --write_test_file=xxx.txt to be set
|
bool only_asan = false;
|
||||||
// which is useful for exercising some OneFuzz features in integration tests
|
|
||||||
int LLVMFuzzerInitialize(int *argc, char ***argv) {
|
int LLVMFuzzerInitialize(int *argc, char ***argv)
|
||||||
|
{
|
||||||
const int num_args = *argc;
|
const int num_args = *argc;
|
||||||
char **args = *argv;
|
char **args = *argv;
|
||||||
|
|
||||||
for (int i = 0; i < num_args; ++i) {
|
for (int i = 0; i < num_args; ++i)
|
||||||
|
{
|
||||||
|
// allow an argument --write_test_file=xxx.txt to be set
|
||||||
|
// which is useful for exercising some OneFuzz features in integration tests
|
||||||
|
const char *test_file_arg = "--write_test_file=";
|
||||||
// look for argument starting with --write_test_file=
|
// look for argument starting with --write_test_file=
|
||||||
const char* arg_name = "--write_test_file=";
|
if (strncmp(args[i], test_file_arg, strlen(test_file_arg)) == 0)
|
||||||
if (strncmp(args[i], arg_name, strlen(arg_name)) == 0) {
|
{
|
||||||
// extract filename part
|
// extract filename part
|
||||||
const char* file_name = args[i] + strlen(arg_name);
|
const char *file_name = args[i] + strlen(test_file_arg);
|
||||||
// write file
|
// write file
|
||||||
FILE *output = fopen(file_name, "a");
|
FILE *output = fopen(file_name, "a");
|
||||||
if (!output) {
|
if (!output)
|
||||||
|
{
|
||||||
perror("failed to open file");
|
perror("failed to open file");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
fputs("Hello from simple fuzzer\n", output);
|
fputs("Hello from simple fuzzer\n", output);
|
||||||
fclose(output);
|
fclose(output);
|
||||||
break;
|
}
|
||||||
|
|
||||||
|
// an argument to only allow generating ASAN failures
|
||||||
|
const char *asan_only_arg = "--only_asan_failures";
|
||||||
|
if (strcmp(args[i], asan_only_arg) == 0)
|
||||||
|
{
|
||||||
|
only_asan = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t len) {
|
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t len)
|
||||||
|
{
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
|
|
||||||
if (len < 4) {
|
if (len < 4)
|
||||||
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data[0] == 'x') { cnt++; }
|
if (data[0] == 'x')
|
||||||
if (data[1] == 'y') { cnt++; }
|
{
|
||||||
if (data[2] == 'z') { cnt++; }
|
cnt++;
|
||||||
|
}
|
||||||
|
if (data[1] == 'y')
|
||||||
|
{
|
||||||
|
cnt++;
|
||||||
|
}
|
||||||
|
if (data[2] == 'z')
|
||||||
|
{
|
||||||
|
cnt++;
|
||||||
|
}
|
||||||
|
|
||||||
if (cnt >= 3) {
|
if (cnt >= 3)
|
||||||
switch (data[3]) {
|
{
|
||||||
case '0': {
|
switch (data[3])
|
||||||
|
{
|
||||||
|
case '0':
|
||||||
|
{
|
||||||
// segv
|
// segv
|
||||||
int *p = NULL; *p = 123;
|
int *p = NULL;
|
||||||
|
*p = 123;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case '1': {
|
case '1':
|
||||||
|
{
|
||||||
// stack-buffer-underflow
|
// stack-buffer-underflow
|
||||||
int* p = &cnt - 32; for (int i = 0; i < 32; i++) { *(p + i) = 0; }
|
int *p = &cnt - 32;
|
||||||
|
for (int i = 0; i < 32; i++)
|
||||||
|
{
|
||||||
|
*(p + i) = 0;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case '2': {
|
case '2':
|
||||||
|
{
|
||||||
// stack-buffer-overflow
|
// stack-buffer-overflow
|
||||||
int* p = &cnt + 32; for (int i = 0; i < 32; i++) { *(p - i) = 0; }
|
int *p = &cnt + 32;
|
||||||
|
for (int i = 0; i < 32; i++)
|
||||||
|
{
|
||||||
|
*(p - i) = 0;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case '3': {
|
case '3':
|
||||||
|
{
|
||||||
// bad-free
|
// bad-free
|
||||||
int *p = &cnt; free(p);
|
int *p = &cnt;
|
||||||
|
free(p);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case '4': {
|
case '4':
|
||||||
|
{
|
||||||
// double-free
|
// double-free
|
||||||
int* p = (int *) malloc(sizeof(int)); free(p); free(p);
|
int *p = (int *)malloc(sizeof(int));
|
||||||
|
free(p);
|
||||||
|
free(p);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case '5': {
|
case '5':
|
||||||
|
{
|
||||||
// heap-use-after-free
|
// heap-use-after-free
|
||||||
int* p = (int *) malloc(sizeof(int)); free(p); *p = 123;
|
int *p = (int *)malloc(sizeof(int));
|
||||||
|
free(p);
|
||||||
|
*p = 123;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case '6': {
|
case '6':
|
||||||
|
{
|
||||||
// heap-buffer-overflow
|
// heap-buffer-overflow
|
||||||
int* p = (int *) malloc(8 * sizeof(int)); for (int i = 0; i < 32; i++) { *(p + i) = 0; }
|
int *p = (int *)malloc(8 * sizeof(int));
|
||||||
|
for (int i = 0; i < 32; i++)
|
||||||
|
{
|
||||||
|
*(p + i) = 0;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case '7': {
|
case '7':
|
||||||
|
{
|
||||||
// fpe
|
// fpe
|
||||||
int x = 0; int y = 123 / x;
|
int x = 0;
|
||||||
|
int y = 123 / x;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case '8': {
|
case '8':
|
||||||
|
{
|
||||||
|
if (only_asan)
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
abort();
|
abort();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -219,6 +219,7 @@ class ContainerType(Enum):
|
|||||||
coverage = "coverage"
|
coverage = "coverage"
|
||||||
crashes = "crashes"
|
crashes = "crashes"
|
||||||
inputs = "inputs"
|
inputs = "inputs"
|
||||||
|
crashdumps = "crashdumps"
|
||||||
no_repro = "no_repro"
|
no_repro = "no_repro"
|
||||||
readonly_inputs = "readonly_inputs"
|
readonly_inputs = "readonly_inputs"
|
||||||
reports = "reports"
|
reports = "reports"
|
||||||
@ -237,6 +238,7 @@ class ContainerType(Enum):
|
|||||||
cls.analysis,
|
cls.analysis,
|
||||||
cls.coverage,
|
cls.coverage,
|
||||||
cls.crashes,
|
cls.crashes,
|
||||||
|
cls.crashdumps,
|
||||||
cls.inputs,
|
cls.inputs,
|
||||||
cls.no_repro,
|
cls.no_repro,
|
||||||
cls.readonly_inputs,
|
cls.readonly_inputs,
|
||||||
|
@ -16,21 +16,24 @@ logger "onefuzz: starting up onefuzz"
|
|||||||
|
|
||||||
#check if we are running in docker
|
#check if we are running in docker
|
||||||
if [ -f /.dockerenv ]; then
|
if [ -f /.dockerenv ]; then
|
||||||
echo "Running in docker:
|
echo "Running in docker: to optimize the experience make sure the host OS is setup properly, use the following commands:
|
||||||
to optimize the experience make sure the host os is setup properly. with the following command
|
# 1) use core files, not external crash handler
|
||||||
# use core files, not external crash handler
|
# 2) suffix core with PID: will be 'core.XXXX'
|
||||||
echo core | sudo tee /proc/sys/kernel/core_pattern
|
# 3) disable ASLR
|
||||||
# disable ASLR
|
# 4) set core dumping to default behavior
|
||||||
echo 0 | sudo tee /proc/sys/kernel/randomize_va_space
|
sudo sysctl -w 'kernel.core_pattern=core' 'kernel.core_uses_pid=1' 'kernel.randomize_va_space=0' 'fs.suid_dumpable=1'
|
||||||
# set core dumping to default behavior
|
|
||||||
echo 1 | sudo tee /proc/sys/fs/suid_dumpable"
|
# unlimit core files
|
||||||
|
ulimit -c unlimited"
|
||||||
else
|
else
|
||||||
# use core files, not external crash handler
|
# 1) use core files, not external crash handler
|
||||||
echo core | sudo tee /proc/sys/kernel/core_pattern
|
# 2) suffix core with PID: will be 'core.XXXX'
|
||||||
# disable ASLR
|
# 3) disable ASLR
|
||||||
echo 0 | sudo tee /proc/sys/kernel/randomize_va_space
|
# 4) set core dumping to default behavior
|
||||||
# set core dumping to default behavior
|
sudo sysctl -w 'kernel.core_pattern=core' 'kernel.core_uses_pid=1' 'kernel.randomize_va_space=0' 'fs.suid_dumpable=1'
|
||||||
echo 1 | sudo tee /proc/sys/fs/suid_dumpable
|
|
||||||
|
# unlimit core files
|
||||||
|
ulimit -c unlimited
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd /onefuzz
|
cd /onefuzz
|
||||||
|
Reference in New Issue
Block a user