Capture crash dumps from libfuzzer, when provided (#2793)

Enables capturing crashdumps generated by ASAN at point of failure.

This helps in several ways:
- provides a crash dump in the case that we cannot repro a failure later
- some people have stated that crash dumps would be more useful to their team than the repro VM
- we should be able to use these for automated submission to Watson or similar

---

Crash dumps are automatically collected (for libfuzzer) jobs, if we find any. They should be activated by enabling crash dumps in ASAN, via:

- On Linux: `ASAN_OPTIONS=disable_coredump=0:abort_on_error=1:unmap_shadow_on_exit=1`
  - OneFuzz will override the (Ubuntu) crash dump options to generate core dumps instead and then upload them.
- On Windows: `ASAN_SAVE_DUMPS=my_dump.dmp`
  - OneFuzz will look for any `*.dmp` files in the working directory and then upload them.

In both cases, the crash dump will be renamed to match the crashing input, if possible, and uploaded to a new `crashdumps` container.

---

Also updated: the “simple” LibFuzzer test has been updated to be compiled with `cl.exe` instead of `clang` on Windows, so that we are exercising the MSVC implementation of ASAN/LibFuzzer, and the CI image has been updated to `windows-2022`. The restriction to an old version of the Windows SDK has been removed.
This commit is contained in:
George Pollard
2023-08-10 09:55:27 +12:00
committed by GitHub
parent 74ae105074
commit a364051923
21 changed files with 437 additions and 168 deletions

View File

@ -527,7 +527,7 @@ jobs:
uses: actions/cache@v3
with:
path: src/integration-tests/artifacts
key: integration-tests|linux|${{ hashFiles('src/integration-tests/**/*') }}
key: integration-tests|linux|${{ env.ACTIONS_CACHE_KEY_DATE }}|${{ hashFiles('src/integration-tests/**/*') }}
- name: Build integration tests
if: steps.cache-integration-tests.outputs.cache-hit != 'true'
run: |
@ -590,7 +590,7 @@ jobs:
name: artifact-integration-tests-linux
path: src/integration-tests/artifacts
build-integration-tests-windows:
runs-on: windows-2019
runs-on: windows-2022
steps:
- uses: actions/checkout@v3
- name: Cache integration tests
@ -598,7 +598,9 @@ jobs:
uses: actions/cache@v3
with:
path: src/integration-tests/artifacts
key: integration-tests|windows|${{ hashFiles('src/integration-tests/**/*') }}
key: integration-tests|windows|${{ env.ACTIONS_CACHE_KEY_DATE }}|${{ hashFiles('src/integration-tests/**/*') }}
- name: Setup C/C++ environment
uses: ilammy/msvc-dev-cmd@cec98b9d092141f74527d0afa6feb2af698cfe89 # pinned to v1.12.1
- name: Build integration tests
if: steps.cache-integration-tests.outputs.cache-hit != 'true'
run: |
@ -609,33 +611,6 @@ jobs:
choco install make
$env:Path += ";C:\Program Files\LLVM\bin;C:\ProgramData\chocolatey\bin"
# WORKAROUND: effectively downgrade the default Windows 10 SDK version.
#
# This ensures we link against a version of the SDK which won't trigger a
# startup bug in the LLVM-shipped ASAN runtime.
# Assume a default MSVC 2019 install path.
$MsvcDir = 'C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/Tools/MSVC'
# Assume that `$MsvcDir` only contains version-named subdirectories.
$MsvcVersion = ((Get-ChildItem $MsvcDir).name | Sort-Object -Descending)[0]
$MsvcLib = "${MsvcDir}/${MsvcVersion}/lib/x64"
# Known "good" (non-bug-surfacing) version.
$WindowsSdkVersion = '10.0.18362.0'
# Assume default install path.
$WindowsSdkDir = 'C:/Program Files (x86)/Windows Kits/10'
$WindowsSdkLib = "${WindowsSdkDir}/Lib/${WindowsSdkVersion}"
$WindowsSdkInclude = "${WindowsSdkDir}/Include/${WindowsSdkVersion}"
# Used by `clang.exe`.
$env:CPATH = $WindowsSdkInclude
$env:LIBRARY_PATH = "${MsvcLib};${WindowsSdkLib}/ucrt/x64;${WindowsSdkLib}/um/x64"
# Used by `link.exe`.
$env:LIB = $env:LIBRARY_PATH
cd src/integration-tests
mkdir artifacts/windows-libfuzzer

View File

@ -143,6 +143,7 @@ If webhook is set to have Event Grid message format then the payload will look a
"coverage",
"crashes",
"inputs",
"crashdumps",
"no_repro",
"readonly_inputs",
"reports",
@ -1956,6 +1957,7 @@ If webhook is set to have Event Grid message format then the payload will look a
"coverage",
"crashes",
"inputs",
"crashdumps",
"no_repro",
"readonly_inputs",
"reports",
@ -2867,6 +2869,7 @@ If webhook is set to have Event Grid message format then the payload will look a
"coverage",
"crashes",
"inputs",
"crashdumps",
"no_repro",
"readonly_inputs",
"reports",
@ -3358,6 +3361,7 @@ If webhook is set to have Event Grid message format then the payload will look a
"coverage",
"crashes",
"inputs",
"crashdumps",
"no_repro",
"readonly_inputs",
"reports",
@ -3867,6 +3871,7 @@ If webhook is set to have Event Grid message format then the payload will look a
"coverage",
"crashes",
"inputs",
"crashdumps",
"no_repro",
"readonly_inputs",
"reports",
@ -4324,6 +4329,7 @@ If webhook is set to have Event Grid message format then the payload will look a
"coverage",
"crashes",
"inputs",
"crashdumps",
"no_repro",
"readonly_inputs",
"reports",
@ -4808,6 +4814,7 @@ If webhook is set to have Event Grid message format then the payload will look a
"coverage",
"crashes",
"inputs",
"crashdumps",
"no_repro",
"readonly_inputs",
"reports",
@ -5422,6 +5429,7 @@ If webhook is set to have Event Grid message format then the payload will look a
"coverage",
"crashes",
"inputs",
"crashdumps",
"no_repro",
"readonly_inputs",
"reports",

View File

@ -106,6 +106,7 @@ public enum ContainerType {
Analysis,
Coverage,
Crashes,
Crashdumps,
Inputs,
NoRepro,
ReadonlyInputs,

View File

@ -1103,6 +1103,7 @@ public record TaskUnitConfig(
public IContainerDef? Analysis { get; set; }
public IContainerDef? Coverage { get; set; }
public IContainerDef? Crashes { get; set; }
public IContainerDef? Crashdumps { get; set; }
public IContainerDef? Inputs { get; set; }
public IContainerDef? NoRepro { get; set; }
public IContainerDef? ReadonlyInputs { get; set; }

View File

@ -118,6 +118,9 @@ public class Config : IConfig {
case ContainerType.Crashes:
config.Crashes = def;
break;
case ContainerType.Crashdumps:
config.Crashdumps = def;
break;
case ContainerType.Inputs:
config.Inputs = def;
break;

View File

@ -190,6 +190,12 @@ public static class Defs {
Value: 1,
Permissions: ContainerPermission.Write
),
new ContainerDefinition(
Type:ContainerType.Crashdumps,
Compare: Compare.Equal,
Value:1,
Permissions: ContainerPermission.Write
),
new ContainerDefinition(
Type: ContainerType.Inputs,
Compare: Compare.Equal,
@ -279,6 +285,12 @@ public static class Defs {
Value: 1,
Permissions: ContainerPermission.Write
),
new ContainerDefinition(
Type:ContainerType.Crashdumps,
Compare: Compare.Equal,
Value:1,
Permissions: ContainerPermission.Write
),
new ContainerDefinition(
Type: ContainerType.Inputs,
Compare: Compare.Equal,

View File

@ -24,6 +24,7 @@ use crate::tasks::utils::parse_key_value;
pub const SETUP_DIR: &str = "setup_dir";
pub const INPUTS_DIR: &str = "inputs_dir";
pub const CRASHES_DIR: &str = "crashes_dir";
pub const CRASHDUMPS_DIR: &str = "crashdumps_dir";
pub const TARGET_WORKERS: &str = "target_workers";
pub const REPORTS_DIR: &str = "reports_dir";
pub const NO_REPRO_DIR: &str = "no_repro_dir";

View File

@ -6,8 +6,8 @@ use std::path::PathBuf;
use crate::{
local::common::{
build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType,
SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CRASHES_DIR, INPUTS_DIR, TARGET_ENV,
TARGET_EXE, TARGET_OPTIONS, TARGET_WORKERS,
SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CRASHDUMPS_DIR, CRASHES_DIR, INPUTS_DIR,
TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_WORKERS,
},
tasks::{
config::CommonConfig,
@ -27,6 +27,8 @@ pub fn build_fuzz_config(
) -> Result<Config> {
let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)?
.monitor_count(&event_sender)?;
let crashdumps = get_synced_dir(CRASHDUMPS_DIR, common.job_id, common.task_id, args)?
.monitor_count(&event_sender)?;
let inputs = get_synced_dir(INPUTS_DIR, common.job_id, common.task_id, args)?
.monitor_count(&event_sender)?;
@ -49,6 +51,7 @@ pub fn build_fuzz_config(
inputs,
readonly_inputs,
crashes,
crashdumps,
target_exe,
target_env,
target_options,
@ -85,6 +88,10 @@ pub fn build_shared_args() -> Vec<Arg> {
.long(CRASHES_DIR)
.required(true)
.value_parser(value_parser!(PathBuf)),
Arg::new(CRASHDUMPS_DIR)
.long(CRASHDUMPS_DIR)
.required(true)
.value_parser(value_parser!(PathBuf)),
Arg::new(TARGET_WORKERS)
.long(TARGET_WORKERS)
.value_parser(value_parser!(u64)),

View File

@ -51,6 +51,7 @@ struct LibFuzzer {
inputs: PathBuf,
readonly_inputs: Vec<PathBuf>,
crashes: PathBuf,
crashdumps: PathBuf,
target_exe: PathBuf,
target_env: HashMap<String, String>,
target_options: Vec<String>,
@ -143,6 +144,7 @@ impl TaskConfig {
inputs: context.to_monitored_sync_dir("inputs", &config.inputs)?,
readonly_inputs: Some(ri?),
crashes: context.to_monitored_sync_dir("crashes", &config.crashes)?,
crashdumps: context.to_monitored_sync_dir("crashdumps", &config.crashdumps)?,
target_exe: config.target_exe.clone(),
target_env: config.target_env.clone(),
target_options: config.target_options.clone(),

View File

@ -13,12 +13,14 @@ use onefuzz::{
syncdir::{continuous_sync, SyncOperation::Pull, SyncedDir},
};
use onefuzz_telemetry::{
Event::{new_coverage, new_result, runtime_stats},
Event::{new_coverage, new_crashdump, new_result, runtime_stats},
EventData,
};
use serde::Deserialize;
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
fmt::Debug,
path::{Path, PathBuf},
sync::Arc,
};
@ -72,6 +74,7 @@ pub struct Config<L: LibFuzzerType + Send + Sync + ?Sized> {
pub inputs: SyncedDir,
pub readonly_inputs: Option<Vec<SyncedDir>>,
pub crashes: SyncedDir,
pub crashdumps: SyncedDir,
pub target_exe: PathBuf,
pub target_env: HashMap<String, String>,
pub target_options: Vec<String>,
@ -96,6 +99,7 @@ pub struct Config<L: LibFuzzerType + Send + Sync + ?Sized> {
pub struct LibFuzzerFuzzTask<L>
where
L: LibFuzzerType,
Config<L>: Debug,
{
config: Config<L>,
}
@ -103,6 +107,7 @@ where
impl<L> LibFuzzerFuzzTask<L>
where
L: LibFuzzerType,
Config<L>: Debug,
{
pub fn new(config: Config<L>) -> Result<Self> {
Ok(Self { config })
@ -126,11 +131,19 @@ where
let resync = self.continuous_sync_inputs();
let new_inputs = self.config.inputs.monitor_results(new_coverage, true);
let new_crashes = self.config.crashes.monitor_results(new_result, true);
let new_crashdumps = self.config.crashdumps.monitor_results(new_crashdump, true);
let (stats_sender, stats_receiver) = mpsc::unbounded_channel();
let report_stats = report_runtime_stats(stats_receiver, hb_client);
let fuzzers = self.run_fuzzers(Some(&stats_sender));
futures::try_join!(resync, new_inputs, new_crashes, fuzzers, report_stats)?;
futures::try_join!(
resync,
new_inputs,
new_crashes,
new_crashdumps,
fuzzers,
report_stats
)?;
Ok(())
}
@ -235,8 +248,16 @@ where
.for_each(|d| inputs.push(&d.local_path));
}
info!("config is: {:?}", self.config);
let fuzzer = L::from_config(&self.config).await?;
let mut running = fuzzer.fuzz(crash_dir.path(), local_inputs, &inputs).await?;
info!("child is: {:?}", running);
#[cfg(target_os = "linux")]
let pid = running.id();
let notify = Arc::new(Notify::new());
// Splitting borrow.
@ -247,28 +268,44 @@ where
let mut stderr = BufReader::new(stderr);
let mut libfuzzer_output: ArrayDeque<_, LOGS_BUFFER_SIZE, Wrapping> = ArrayDeque::new();
loop {
{
let mut buf = vec![];
loop {
buf.clear();
let bytes_read = stderr.read_until(b'\n', &mut buf).await?;
if bytes_read == 0 && buf.is_empty() {
break;
}
let line = String::from_utf8_lossy(&buf).to_string();
if let Some(stats_sender) = stats_sender {
if let Err(err) = try_report_iter_update(stats_sender, worker_id, run_id, &line) {
if let Err(err) = try_report_iter_update(stats_sender, worker_id, run_id, &line)
{
error!("could not parse fuzzing interation update: {}", err);
}
}
libfuzzer_output.push_back(line);
}
}
let exit_status = running.wait().await;
notify.notify_one();
let exit_status: ExitStatus = exit_status?.into();
info!(
"fuzzer exited, here are the last {} lines of stderr:",
libfuzzer_output.len()
);
info!("------------------------");
for line in libfuzzer_output.iter() {
info!("{}", line.trim_end_matches('\n'));
}
info!("------------------------");
let files = list_files(crash_dir.path()).await?;
info!("found {} crashes", files.len());
// If the target exits, crashes are required unless
// 1. Exited cleanly (happens with -runs=N)
// 2. expect_crash_on_failure is disabled
@ -291,10 +328,21 @@ where
}
}
for file in &files {
// name the dumpfile after the crash file (if one)
// otherwise don't rename it
let dump_file_name: Option<OsString> = if files.len() == 1 {
files
.first()
.and_then(|f| f.file_name().map(OsStr::to_os_string))
} else {
None
};
// move crashing inputs to output directory
for file in files {
if let Some(filename) = file.file_name() {
let dest = self.config.crashes.local_path.join(filename);
if let Err(e) = tokio::fs::rename(file.clone(), dest.clone()).await {
if let Err(e) = tokio::fs::rename(file, dest.clone()).await {
if !dest.exists() {
bail!(e)
}
@ -302,17 +350,83 @@ where
}
}
// check for core dumps on Linux:
// note that collecting the dumps must be enabled by the template
#[cfg(target_os = "linux")]
if let Some(pid) = pid {
// expect crash dump to exist in CWD
let filename = format!("core.{pid}");
let dest_filename = dump_file_name.as_deref().unwrap_or(OsStr::new(&filename));
let dest_path = self.config.crashdumps.local_path.join(dest_filename);
match tokio::fs::rename(&filename, &dest_path).await {
Ok(()) => {
info!(
"moved crash dump {} to output directory: {}",
filename,
dest_path.display()
);
}
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound {
// okay, no crash dump found
info!("no crash dump found with name: {}", filename);
} else {
return Err(e).context("moving crash dump to output directory");
}
}
}
} else {
warn!("no PID found for libfuzzer process");
}
// check for crash dumps on Windows:
#[cfg(target_os = "windows")]
{
let dumpfile_extension = Some(std::ffi::OsStr::new("dmp"));
let mut working_dir = tokio::fs::read_dir(".").await?;
let mut found_dump = false;
while let Some(next) = working_dir.next_entry().await? {
if next.path().extension() == dumpfile_extension {
// Windows dumps get a fixed filename so we will generate a random one,
// if there's no valid target crash name:
let dest_filename =
dump_file_name.unwrap_or_else(|| uuid::Uuid::new_v4().to_string().into());
let dest_path = self.config.crashdumps.local_path.join(&dest_filename);
tokio::fs::rename(next.path(), &dest_path)
.await
.context("moving crash dump to output directory")?;
info!(
"moved crash dump {} to output directory: {}",
next.path().display(),
dest_path.display()
);
found_dump = true;
break;
}
}
if !found_dump {
info!("no crash dump found with extension .dmp");
}
}
Ok(())
}
async fn init_directories(&self) -> Result<()> {
// input directories (init_pull):
self.config.inputs.init_pull().await?;
self.config.crashes.init().await?;
if let Some(readonly_inputs) = &self.config.readonly_inputs {
for dir in readonly_inputs {
dir.init_pull().await?;
}
}
// output directories (init):
self.config.crashes.init().await?;
self.config.crashdumps.init().await?;
Ok(())
}

View File

@ -20,7 +20,7 @@ use onefuzz::{
SyncedDir,
},
};
use onefuzz_telemetry::Event::{new_coverage, new_result};
use onefuzz_telemetry::Event::{new_coverage, new_crashdump, new_result};
use serde::Deserialize;
use std::{
collections::HashMap,
@ -41,6 +41,7 @@ use futures::TryFutureExt;
pub struct SupervisorConfig {
pub inputs: SyncedDir,
pub crashes: SyncedDir,
pub crashdumps: SyncedDir,
pub supervisor_exe: String,
pub supervisor_env: HashMap<String, String>,
pub supervisor_options: Vec<String>,
@ -80,6 +81,14 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
crashes.init().await?;
let monitor_crashes = crashes.monitor_results(new_result, false);
// setup crashdumps
let crashdumps = SyncedDir {
local_path: runtime_dir.path().join("crashdumps"),
remote_path: config.crashdumps.remote_path.clone(),
};
crashdumps.init().await?;
let monitor_crashdumps = crashdumps.monitor_results(new_crashdump, false);
// setup coverage
if let Some(coverage) = &config.coverage {
coverage.init_pull().await?;
@ -139,6 +148,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
&runtime_dir.path(),
&config,
&crashes,
&crashdumps,
&inputs,
reports_dir.path().to_path_buf(),
)
@ -170,6 +180,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
monitor_supervisor.map_err(|e| e.context("Failure in monitor_supervisor")),
monitor_stats.map_err(|e| e.context("Failure in monitor_stats")),
monitor_crashes.map_err(|e| e.context("Failure in monitor_crashes")),
monitor_crashdumps.map_err(|e| e.context("Failure in monitor_crashdumps")),
monitor_inputs.map_err(|e| e.context("Failure in monitor_inputs")),
inputs_sync_task.map_err(|e| e.context("Failure in continuous_sync_task")),
monitor_reports_future.map_err(|e| e.context("Failure in monitor_reports_future")),
@ -206,6 +217,7 @@ async fn start_supervisor(
runtime_dir: impl AsRef<Path>,
config: &SupervisorConfig,
crashes: &SyncedDir,
crashdumps: &SyncedDir,
inputs: &SyncedDir,
reports_dir: PathBuf,
) -> Result<Child> {
@ -221,6 +233,7 @@ async fn start_supervisor(
.supervisor_options(&config.supervisor_options)
.runtime_dir(&runtime_dir)
.crashes(&crashes.local_path)
.crashdumps(&crashdumps.local_path)
.input_corpus(&inputs.local_path)
.reports_dir(reports_dir)
.setup_dir(&config.common.setup_dir)
@ -245,15 +258,15 @@ async fn start_supervisor(
.set_optional_ref(&config.target_options, |expand, target_options| {
expand.target_options(target_options)
})
.set_optional_ref(&config.common.microsoft_telemetry_key, |tester, key| {
tester.microsoft_telemetry_key(key)
.set_optional_ref(&config.common.microsoft_telemetry_key, |expand, key| {
expand.microsoft_telemetry_key(key)
})
.set_optional_ref(&config.common.instance_telemetry_key, |tester, key| {
tester.instance_telemetry_key(key)
.set_optional_ref(&config.common.instance_telemetry_key, |expand, key| {
expand.instance_telemetry_key(key)
})
.set_optional_ref(
&config.crashes.remote_path.clone().and_then(|u| u.account()),
|tester, account| tester.crashes_account(account),
|expand, account| expand.crashes_account(account),
)
.set_optional_ref(
&config
@ -261,7 +274,7 @@ async fn start_supervisor(
.remote_path
.clone()
.and_then(|u| u.container()),
|tester, container| tester.crashes_container(container),
|expand, container| expand.crashes_container(container),
);
let supervisor_path = expand.evaluate_value(&config.supervisor_exe)?;
@ -340,7 +353,6 @@ mod tests {
let fault_dir_temp = tempfile::tempdir().unwrap();
let crashes_local = tempfile::tempdir().unwrap().path().into();
let corpus_dir_local = tempfile::tempdir().unwrap().path().into();
let crashes = SyncedDir {
local_path: crashes_local,
remote_path: Some(
@ -348,6 +360,17 @@ mod tests {
),
};
let crashdumps_dir_temp = tempfile::tempdir().unwrap();
let crashdumps_local = tempfile::tempdir().unwrap().path().into();
let crashdumps = SyncedDir {
local_path: crashdumps_local,
remote_path: Some(
BlobContainerUrl::parse(Url::from_directory_path(crashdumps_dir_temp).unwrap())
.unwrap(),
),
};
let corpus_dir_local = tempfile::tempdir().unwrap().path().into();
let corpus_dir_temp = tempfile::tempdir().unwrap();
let corpus_dir = SyncedDir {
local_path: corpus_dir_local,
@ -387,6 +410,7 @@ mod tests {
target_options,
inputs: corpus_dir.clone(),
crashes: crashes.clone(),
crashdumps: crashdumps.clone(),
tools: None,
wait_for_files: None,
stats_file: None,
@ -419,7 +443,14 @@ mod tests {
},
};
let process = start_supervisor(runtime_dir, &config, &crashes, &corpus_dir, reports_dir)
let process = start_supervisor(
runtime_dir,
&config,
&crashes,
&crashdumps,
&corpus_dir,
reports_dir,
)
.await
.unwrap();

View File

@ -77,6 +77,7 @@ pub enum Event {
coverage_data,
coverage_failed,
new_result,
new_crashdump,
new_coverage,
runtime_stats,
new_report,
@ -93,6 +94,7 @@ impl Event {
Self::coverage_data => "coverage_data",
Self::coverage_failed => "coverage_failed",
Self::new_coverage => "new_coverage",
Self::new_crashdump => "new_crashdump",
Self::new_result => "new_result",
Self::runtime_stats => "runtime_stats",
Self::new_report => "new_report",

View File

@ -24,6 +24,7 @@ type MappingFn<'a> = Box<dyn Fn(&Expand<'a>) -> Result<ExpandedValue<'a>> + Send
pub enum PlaceHolder {
Input,
Crashes,
Crashdumps,
InputCorpus,
GeneratedInputs,
TargetExe,
@ -59,6 +60,7 @@ impl PlaceHolder {
match self {
Self::Input => "{input}",
Self::Crashes => "{crashes}",
Self::Crashdumps => "{crashdumps}",
Self::InputCorpus => "{input_corpus}",
Self::GeneratedInputs => "{generated_inputs}",
Self::TargetExe => "{target_exe}",
@ -234,6 +236,12 @@ impl<'a> Expand<'a> {
self.set_value(PlaceHolder::Crashes, ExpandedValue::Path(path))
}
pub fn crashdumps(self, arg: impl AsRef<Path>) -> Self {
let arg = arg.as_ref();
let path = String::from(arg.to_string_lossy());
self.set_value(PlaceHolder::Crashdumps, ExpandedValue::Path(path))
}
pub fn input_path(self, arg: impl AsRef<Path>) -> Self {
let arg = arg.as_ref();
let path = String::from(arg.to_string_lossy());

View File

@ -1087,11 +1087,10 @@ class Tasks(Endpoint):
if tags is None:
tags = {}
containers_submit = []
for container_type, container in containers:
containers_submit.append(
containers_submit = [
models.TaskContainers(name=container, type=container_type)
)
for container_type, container in containers
]
config = models.TaskConfig(
containers=containers_submit,
@ -1210,6 +1209,7 @@ class JobContainers(Endpoint):
) -> None:
SAFE_TO_REMOVE = [
enums.ContainerType.crashes,
enums.ContainerType.crashdumps,
enums.ContainerType.setup,
enums.ContainerType.inputs,
enums.ContainerType.reports,

View File

@ -45,6 +45,7 @@ class AFL(Command):
supervisor_env: Optional[Dict[str, str]] = None,
supervisor_input_marker: str = "@@",
tags: Optional[Dict[str, str]] = None,
target_env: Optional[Dict[str, str]] = None,
wait_for_running: bool = False,
wait_for_files: Optional[List[ContainerType]] = None,
afl_container: Optional[Container] = None,
@ -162,6 +163,7 @@ class AFL(Command):
stats_format=StatsFormat.AFL,
task_wait_for_files=ContainerType.inputs,
tags=helper.tags,
target_env=target_env,
debug=debug,
ensemble_sync_delay=ensemble_sync_delay,
)

View File

@ -132,6 +132,7 @@ class Libfuzzer(Command):
fuzzer_containers = [
(ContainerType.setup, containers[ContainerType.setup]),
(ContainerType.crashes, containers[ContainerType.crashes]),
(ContainerType.crashdumps, containers[ContainerType.crashdumps]),
(ContainerType.inputs, containers[ContainerType.inputs]),
]
@ -416,6 +417,7 @@ class Libfuzzer(Command):
ContainerType.setup,
ContainerType.inputs,
ContainerType.crashes,
ContainerType.crashdumps,
ContainerType.reports,
ContainerType.unique_reports,
ContainerType.unique_inputs,
@ -726,6 +728,7 @@ class Libfuzzer(Command):
ContainerType.setup,
ContainerType.inputs,
ContainerType.crashes,
ContainerType.crashdumps,
ContainerType.coverage,
ContainerType.reports,
ContainerType.unique_reports,
@ -753,6 +756,7 @@ class Libfuzzer(Command):
fuzzer_containers = [
(ContainerType.setup, containers[ContainerType.setup]),
(ContainerType.crashes, containers[ContainerType.crashes]),
(ContainerType.crashdumps, containers[ContainerType.crashdumps]),
(ContainerType.inputs, containers[ContainerType.inputs]),
(ContainerType.tools, fuzzer_tools_container),
]
@ -960,6 +964,7 @@ class Libfuzzer(Command):
ContainerType.setup,
ContainerType.inputs,
ContainerType.crashes,
ContainerType.crashdumps,
ContainerType.reports,
ContainerType.unique_reports,
ContainerType.no_repro,
@ -978,6 +983,7 @@ class Libfuzzer(Command):
fuzzer_containers = [
(ContainerType.setup, helper.containers[ContainerType.setup]),
(ContainerType.crashes, helper.containers[ContainerType.crashes]),
(ContainerType.crashdumps, helper.containers[ContainerType.crashdumps]),
(ContainerType.inputs, helper.containers[ContainerType.inputs]),
]

View File

@ -87,6 +87,7 @@ class Integration(BaseModel):
target_class: Optional[str]
target_method: Optional[str]
setup_dir: Optional[str]
target_env: Optional[Dict[str, str]]
TARGETS: Dict[str, Integration] = {
@ -106,12 +107,21 @@ TARGETS: Dict[str, Integration] = {
ContainerType.unique_reports: 1,
ContainerType.coverage: 1,
ContainerType.inputs: 2,
# TODO: crashdumps are intermittently not captured
# during integration tests on Linux. This requires more
# investigation before we can fully enable this test.
# ContainerType.crashdumps: 1,
ContainerType.extra_output: 1,
},
reboot_after_setup=True,
inject_fake_regression=True,
target_env={
# same TODO
# "ASAN_OPTIONS": "disable_coredump=0:abort_on_error=1:unmap_shadow_on_exit=1"
},
fuzzing_target_options=[
"--test:{extra_setup_dir}",
"--only_asan_failures",
"--write_test_file={extra_output_dir}/test.txt",
],
),
@ -213,11 +223,15 @@ TARGETS: Dict[str, Integration] = {
ContainerType.inputs: 2,
ContainerType.unique_reports: 1,
ContainerType.coverage: 1,
ContainerType.crashdumps: 1,
ContainerType.extra_output: 1,
},
inject_fake_regression=True,
target_env={"ASAN_SAVE_DUMPS": "my_dump.dmp"},
# we should set unmap_shadow_on_exit=1 but it fails on Windows at the moment
fuzzing_target_options=[
"--test:{extra_setup_dir}",
"--only_asan_failures",
"--write_test_file={extra_output_dir}/test.txt",
],
),
@ -631,6 +645,7 @@ class TestOnefuzz:
fuzzing_target_options=config.fuzzing_target_options,
extra_setup_container=Container(extra_setup_container.name),
extra_output_container=Container(extra_output_container.name),
target_env=config.target_env,
)
elif config.template == TemplateType.libfuzzer_dotnet:
if setup is None:
@ -653,6 +668,7 @@ class TestOnefuzz:
fuzzing_target_options=config.target_options,
target_class=config.target_class,
target_method=config.target_method,
target_env=config.target_env,
)
elif config.template == TemplateType.libfuzzer_qemu_user:
return self.of.template.libfuzzer.qemu_user(
@ -665,6 +681,7 @@ class TestOnefuzz:
duration=duration,
vm_count=1,
target_options=config.target_options,
target_env=config.target_env,
)
elif config.template == TemplateType.radamsa:
return self.of.template.radamsa.basic(
@ -679,6 +696,7 @@ class TestOnefuzz:
disable_check_debugger=config.disable_check_debugger or False,
duration=duration,
vm_count=1,
target_env=config.target_env,
)
elif config.template == TemplateType.afl:
return self.of.template.afl.basic(
@ -692,6 +710,7 @@ class TestOnefuzz:
duration=duration,
vm_count=1,
target_options=config.target_options,
target_env=config.target_env,
)
else:
raise NotImplementedError
@ -798,14 +817,18 @@ class TestOnefuzz:
return (True, "timed out while checking jobs", False)
for job_id in check_containers:
job_name = jobs[job_id].config.name
finished_containers: Set[Container] = set()
for container_name, container_impl in check_containers[job_id].items():
container_client, count = container_impl
if len(container_client.list_blobs()) >= count:
container_client, required_count = container_impl
found_count = len(container_client.list_blobs())
if found_count >= required_count:
clear()
self.logger.info(
"found files for %s - %s",
jobs[job_id].config.name,
"found %d files (needed %d) for %s - %s",
found_count,
required_count,
job_name,
container_name,
)
finished_containers.add(container_name)
@ -813,6 +836,12 @@ class TestOnefuzz:
for container_name in finished_containers:
del check_containers[job_id][container_name]
to_check = check_containers[job_id].keys()
if len(to_check) > 0:
self.logger.info(
"%s - still waiting for %s", job_name, ", ".join(to_check)
)
scalesets = self.of.scalesets.list()
for job_id in job_tasks:
finished_tasks: Set[UUID] = set()

View File

@ -1,6 +1,10 @@
CC=clang
CFLAGS=-g3 -fsanitize=fuzzer -fsanitize=address
ifeq ($(OS),Windows_NT)
CFLAGS=/O2 /Zi /fsanitize=fuzzer /fsanitize=address
CC=cl
else
CFLAGS=-g3 -fsanitize=fuzzer -fsanitize=address
CC=clang
endif
all: fuzz.exe

View File

@ -7,88 +7,146 @@
#include <stdbool.h>
#include <string.h>
// allow an argument --write_test_file=xxx.txt to be set
// which is useful for exercising some OneFuzz features in integration tests
int LLVMFuzzerInitialize(int *argc, char ***argv) {
const int num_args = *argc;
char** args = *argv;
bool only_asan = false;
for (int i = 0; i < num_args; ++i) {
int LLVMFuzzerInitialize(int *argc, char ***argv)
{
const int num_args = *argc;
char **args = *argv;
for (int i = 0; i < num_args; ++i)
{
// allow an argument --write_test_file=xxx.txt to be set
// which is useful for exercising some OneFuzz features in integration tests
const char *test_file_arg = "--write_test_file=";
// look for argument starting with --write_test_file=
const char* arg_name = "--write_test_file=";
if (strncmp(args[i], arg_name, strlen(arg_name)) == 0) {
if (strncmp(args[i], test_file_arg, strlen(test_file_arg)) == 0)
{
// extract filename part
const char* file_name = args[i] + strlen(arg_name);
const char *file_name = args[i] + strlen(test_file_arg);
// write file
FILE* output = fopen(file_name, "a");
if (!output) {
FILE *output = fopen(file_name, "a");
if (!output)
{
perror("failed to open file");
return -1;
}
fputs("Hello from simple fuzzer\n", output);
fclose(output);
break;
}
// an argument to only allow generating ASAN failures
const char *asan_only_arg = "--only_asan_failures";
if (strcmp(args[i], asan_only_arg) == 0)
{
only_asan = true;
}
}
return 0;
}
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t len) {
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t len)
{
int cnt = 0;
if (len < 4) {
if (len < 4)
{
return 0;
}
if (data[0] == 'x') { cnt++; }
if (data[1] == 'y') { cnt++; }
if (data[2] == 'z') { cnt++; }
if (data[0] == 'x')
{
cnt++;
}
if (data[1] == 'y')
{
cnt++;
}
if (data[2] == 'z')
{
cnt++;
}
if (cnt >= 3) {
switch (data[3]) {
case '0': {
if (cnt >= 3)
{
switch (data[3])
{
case '0':
{
// segv
int *p = NULL; *p = 123;
int *p = NULL;
*p = 123;
break;
}
case '1': {
case '1':
{
// stack-buffer-underflow
int* p = &cnt - 32; for (int i = 0; i < 32; i++) { *(p + i) = 0; }
int *p = &cnt - 32;
for (int i = 0; i < 32; i++)
{
*(p + i) = 0;
}
break;
}
case '2': {
case '2':
{
// stack-buffer-overflow
int* p = &cnt + 32; for (int i = 0; i < 32; i++) { *(p - i) = 0; }
int *p = &cnt + 32;
for (int i = 0; i < 32; i++)
{
*(p - i) = 0;
}
break;
}
case '3': {
case '3':
{
// bad-free
int *p = &cnt; free(p);
int *p = &cnt;
free(p);
break;
}
case '4': {
case '4':
{
// double-free
int* p = (int *) malloc(sizeof(int)); free(p); free(p);
int *p = (int *)malloc(sizeof(int));
free(p);
free(p);
break;
}
case '5': {
case '5':
{
// heap-use-after-free
int* p = (int *) malloc(sizeof(int)); free(p); *p = 123;
int *p = (int *)malloc(sizeof(int));
free(p);
*p = 123;
break;
}
case '6': {
case '6':
{
// heap-buffer-overflow
int* p = (int *) malloc(8 * sizeof(int)); for (int i = 0; i < 32; i++) { *(p + i) = 0; }
int *p = (int *)malloc(8 * sizeof(int));
for (int i = 0; i < 32; i++)
{
*(p + i) = 0;
}
break;
}
case '7': {
case '7':
{
// fpe
int x = 0; int y = 123 / x;
int x = 0;
int y = 123 / x;
break;
}
case '8': {
case '8':
{
if (only_asan)
{
break;
}
abort();
break;
}

View File

@ -219,6 +219,7 @@ class ContainerType(Enum):
coverage = "coverage"
crashes = "crashes"
inputs = "inputs"
crashdumps = "crashdumps"
no_repro = "no_repro"
readonly_inputs = "readonly_inputs"
reports = "reports"
@ -237,6 +238,7 @@ class ContainerType(Enum):
cls.analysis,
cls.coverage,
cls.crashes,
cls.crashdumps,
cls.inputs,
cls.no_repro,
cls.readonly_inputs,

View File

@ -16,21 +16,24 @@ logger "onefuzz: starting up onefuzz"
#check if we are running in docker
if [ -f /.dockerenv ]; then
echo "Running in docker:
to optimize the experience make sure the host os is setup properly. with the following command
# use core files, not external crash handler
echo core | sudo tee /proc/sys/kernel/core_pattern
# disable ASLR
echo 0 | sudo tee /proc/sys/kernel/randomize_va_space
# set core dumping to default behavior
echo 1 | sudo tee /proc/sys/fs/suid_dumpable"
echo "Running in docker: to optimize the experience make sure the host OS is setup properly, use the following commands:
# 1) use core files, not external crash handler
# 2) suffix core with PID: will be 'core.XXXX'
# 3) disable ASLR
# 4) set core dumping to default behavior
sudo sysctl -w 'kernel.core_pattern=core' 'kernel.core_uses_pid=1' 'kernel.randomize_va_space=0' 'fs.suid_dumpable=1'
# unlimit core files
ulimit -c unlimited"
else
# use core files, not external crash handler
echo core | sudo tee /proc/sys/kernel/core_pattern
# disable ASLR
echo 0 | sudo tee /proc/sys/kernel/randomize_va_space
# set core dumping to default behavior
echo 1 | sudo tee /proc/sys/fs/suid_dumpable
# 1) use core files, not external crash handler
# 2) suffix core with PID: will be 'core.XXXX'
# 3) disable ASLR
# 4) set core dumping to default behavior
sudo sysctl -w 'kernel.core_pattern=core' 'kernel.core_uses_pid=1' 'kernel.randomize_va_space=0' 'fs.suid_dumpable=1'
# unlimit core files
ulimit -c unlimited
fi
cd /onefuzz