mirror of
https://github.com/microsoft/onefuzz.git
synced 2025-06-10 01:01:34 +00:00
Update command variable expansion (#561)
* Documents `crashes_account` and `crashes_container` * Adds `reports_dir` and support for `unique_reports`, `reports`, and `no_repro` containers to the generic analysis task * Adds `microsoft_telemetry_key` and `instance_telemetry_key` to generic supervisor, generator, and analysis tasks
This commit is contained in:
parent
419ca05b28
commit
e3c73d7a10
@ -18,6 +18,11 @@ The following values are replaced with the specific values at runtime.
|
|||||||
* `{setup_dir}` : Path to the setup directory
|
* `{setup_dir}` : Path to the setup directory
|
||||||
* `{job_id}`: UUID that indicates the Job ID
|
* `{job_id}`: UUID that indicates the Job ID
|
||||||
* `{task_id}`: UUID that indicates the Task ID
|
* `{task_id}`: UUID that indicates the Task ID
|
||||||
|
* `{reports_dir}`: Path to the directory to write crash reports
|
||||||
|
* `{crashes_account}`: Azure Storage Account name for the `crashes` container
|
||||||
|
* `{crashes_container}`: Container name for the `crashes` container
|
||||||
|
* `{microsoft_telemetry_key}`: Application Insights key used for collecting [non-attributable telemetry](telemetry.md) to improve OneFuzz.
|
||||||
|
* `{instance_telemetry_key}`: Application Insights key used for private, instance-owned telemetry and logging (See [OneFuzz Telemetry](telemetry.md).
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
@ -45,13 +50,14 @@ These are currently used in the following tasks:
|
|||||||
* libfuzzer\_merge: `target_exe`, `target_options`, `input_corpus`
|
* libfuzzer\_merge: `target_exe`, `target_options`, `input_corpus`
|
||||||
* libfuzzer\_coverage: None
|
* libfuzzer\_coverage: None
|
||||||
* generic\_analysis: `input`, `target_exe`, `target_options`, `analyzer_exe`,
|
* generic\_analysis: `input`, `target_exe`, `target_options`, `analyzer_exe`,
|
||||||
`anayzer_options`, `output_dir`, `tools_dir`, `job_id`, `task_id`
|
`analyzer_options`, `output_dir`, `tools_dir`, `job_id`, `task_id`,
|
||||||
|
`crashes_account`, `crashes_container`, `reports_dir`, `microsoft_telemetry_key`, `instance_telemetry_key`
|
||||||
* generic\_generator: `generated_inputs`, `input_corpus`, `tools_dir`,
|
* generic\_generator: `generated_inputs`, `input_corpus`, `tools_dir`,
|
||||||
`generator_exe`, `generator_options`, `target_exe`, `target_options`,
|
`generator_exe`, `generator_options`, `target_exe`, `target_options`,
|
||||||
`input`, `job_id`, `task_id`
|
`input`, `job_id`, `task_id`, `microsoft_telemetry_key`, `instance_telemetry_key`
|
||||||
* generic\_supervisor: `crashes`, `runtime_dir`, `target_exe`, `target_options`,
|
* generic\_supervisor: `crashes`, `runtime_dir`, `target_exe`, `target_options`,
|
||||||
`input_corpus`, `input`, `supervisor_exe`, `supervisor_options`, `tools_dir`,
|
`input_corpus`, `input`, `supervisor_exe`, `supervisor_options`, `tools_dir`,
|
||||||
`job_id`, `task_id`
|
`job_id`, `task_id`, `crashes_account`, `crashes_container`, `reports_dir`, `microsoft_telemetry_key`, `instance_telemetry_key`
|
||||||
* generic\_merge: `input`, `input_corpus`, `output_dir`, `target_exe`,
|
* generic\_merge: `input`, `input_corpus`, `output_dir`, `target_exe`,
|
||||||
`target_options`, `supervisor_exe`, `supervisor_options`, `tools_dir`,
|
`target_options`, `supervisor_exe`, `supervisor_options`, `tools_dir`,
|
||||||
`job_id`, `task_id`
|
`job_id`, `task_id`, `microsoft_telemetry_key`, `instance_telemetry_key`
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
// Copyright (c) Microsoft Corporation.
|
// Copyright (c) Microsoft Corporation.
|
||||||
// Licensed under the MIT License.
|
// Licensed under the MIT License.
|
||||||
|
|
||||||
use crate::tasks::{config::CommonConfig, heartbeat::HeartbeatSender};
|
use crate::tasks::{
|
||||||
|
config::CommonConfig, heartbeat::HeartbeatSender, report::crash_report::monitor_reports,
|
||||||
|
};
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use futures::stream::StreamExt;
|
use futures::stream::StreamExt;
|
||||||
use onefuzz::{az_copy, blob::url::BlobUrl};
|
use onefuzz::{az_copy, blob::url::BlobUrl};
|
||||||
@ -18,6 +20,7 @@ use std::{
|
|||||||
str,
|
str,
|
||||||
};
|
};
|
||||||
use storage_queue::{QueueClient, EMPTY_QUEUE_DELAY};
|
use storage_queue::{QueueClient, EMPTY_QUEUE_DELAY};
|
||||||
|
use tempfile::tempdir;
|
||||||
use tokio::{fs, process::Command};
|
use tokio::{fs, process::Command};
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
#[derive(Debug, Deserialize)]
|
||||||
@ -34,6 +37,10 @@ pub struct Config {
|
|||||||
pub analysis: SyncedDir,
|
pub analysis: SyncedDir,
|
||||||
pub tools: SyncedDir,
|
pub tools: SyncedDir,
|
||||||
|
|
||||||
|
pub reports: Option<SyncedDir>,
|
||||||
|
pub unique_reports: Option<SyncedDir>,
|
||||||
|
pub no_repro: Option<SyncedDir>,
|
||||||
|
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub common: CommonConfig,
|
pub common: CommonConfig,
|
||||||
}
|
}
|
||||||
@ -46,21 +53,63 @@ pub async fn spawn(config: Config) -> Result<()> {
|
|||||||
config.analysis.init().await?;
|
config.analysis.init().await?;
|
||||||
config.tools.init_pull().await?;
|
config.tools.init_pull().await?;
|
||||||
|
|
||||||
|
// the tempdir is always created, however, the reports_path and
|
||||||
|
// reports_monitor_future are only created if we have one of the three
|
||||||
|
// report SyncedDir. The idea is that the option for where to write reports
|
||||||
|
// is only available for target option / env expansion if one of the reports
|
||||||
|
// SyncedDir is provided.
|
||||||
|
let reports_dir = tempdir()?;
|
||||||
|
let (reports_path, reports_monitor_future) =
|
||||||
|
if config.unique_reports.is_some() || config.reports.is_some() || config.no_repro.is_some()
|
||||||
|
{
|
||||||
|
if let Some(unique_reports) = &config.unique_reports {
|
||||||
|
unique_reports.init().await?;
|
||||||
|
}
|
||||||
|
if let Some(reports) = &config.reports {
|
||||||
|
reports.init().await?;
|
||||||
|
}
|
||||||
|
if let Some(no_repro) = &config.no_repro {
|
||||||
|
no_repro.init().await?;
|
||||||
|
}
|
||||||
|
let monitor_reports_future = monitor_reports(
|
||||||
|
reports_dir.path(),
|
||||||
|
&config.unique_reports,
|
||||||
|
&config.reports,
|
||||||
|
&config.no_repro,
|
||||||
|
);
|
||||||
|
(
|
||||||
|
Some(reports_dir.path().to_path_buf()),
|
||||||
|
Some(monitor_reports_future),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
(None, None)
|
||||||
|
};
|
||||||
|
|
||||||
set_executable(&config.tools.path).await?;
|
set_executable(&config.tools.path).await?;
|
||||||
run_existing(&config).await?;
|
run_existing(&config, &reports_path).await?;
|
||||||
poll_inputs(&config, tmp).await?;
|
let poller = poll_inputs(&config, tmp, &reports_path);
|
||||||
|
|
||||||
|
match reports_monitor_future {
|
||||||
|
Some(monitor) => {
|
||||||
|
futures::try_join!(poller, monitor)?;
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
poller.await?;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_existing(config: &Config) -> Result<()> {
|
async fn run_existing(config: &Config, reports_dir: &Option<PathBuf>) -> Result<()> {
|
||||||
if let Some(crashes) = &config.crashes {
|
if let Some(crashes) = &config.crashes {
|
||||||
crashes.init_pull().await?;
|
crashes.init_pull().await?;
|
||||||
let mut count = 0;
|
let mut count: u64 = 0;
|
||||||
let mut read_dir = fs::read_dir(&crashes.path).await?;
|
let mut read_dir = fs::read_dir(&crashes.path).await?;
|
||||||
while let Some(file) = read_dir.next().await {
|
while let Some(file) = read_dir.next().await {
|
||||||
debug!("Processing file {:?}", file);
|
debug!("Processing file {:?}", file);
|
||||||
let file = file?;
|
let file = file?;
|
||||||
run_tool(file.path(), &config).await?;
|
run_tool(file.path(), &config, &reports_dir).await?;
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
info!("processed {} initial inputs", count);
|
info!("processed {} initial inputs", count);
|
||||||
@ -82,7 +131,11 @@ async fn already_checked(config: &Config, input: &BlobUrl) -> Result<bool> {
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn poll_inputs(config: &Config, tmp_dir: OwnedDir) -> Result<()> {
|
async fn poll_inputs(
|
||||||
|
config: &Config,
|
||||||
|
tmp_dir: OwnedDir,
|
||||||
|
reports_dir: &Option<PathBuf>,
|
||||||
|
) -> Result<()> {
|
||||||
let heartbeat = config.common.init_heartbeat().await?;
|
let heartbeat = config.common.init_heartbeat().await?;
|
||||||
if let Some(queue) = &config.input_queue {
|
if let Some(queue) = &config.input_queue {
|
||||||
let mut input_queue = QueueClient::new(queue.clone());
|
let mut input_queue = QueueClient::new(queue.clone());
|
||||||
@ -104,7 +157,7 @@ async fn poll_inputs(config: &Config, tmp_dir: OwnedDir) -> Result<()> {
|
|||||||
destination_path.push(file_name);
|
destination_path.push(file_name);
|
||||||
az_copy::copy(input_url.url().as_ref(), &destination_path, false).await?;
|
az_copy::copy(input_url.url().as_ref(), &destination_path, false).await?;
|
||||||
|
|
||||||
run_tool(destination_path, &config).await?;
|
run_tool(destination_path, &config, &reports_dir).await?;
|
||||||
config.analysis.sync_push().await?
|
config.analysis.sync_push().await?
|
||||||
}
|
}
|
||||||
input_queue.delete(message).await?;
|
input_queue.delete(message).await?;
|
||||||
@ -118,7 +171,11 @@ async fn poll_inputs(config: &Config, tmp_dir: OwnedDir) -> Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run_tool(input: impl AsRef<Path>, config: &Config) -> Result<()> {
|
pub async fn run_tool(
|
||||||
|
input: impl AsRef<Path>,
|
||||||
|
config: &Config,
|
||||||
|
reports_dir: &Option<PathBuf>,
|
||||||
|
) -> Result<()> {
|
||||||
let expand = Expand::new()
|
let expand = Expand::new()
|
||||||
.input_path(&input)
|
.input_path(&input)
|
||||||
.target_exe(&config.target_exe)
|
.target_exe(&config.target_exe)
|
||||||
@ -130,6 +187,15 @@ pub async fn run_tool(input: impl AsRef<Path>, config: &Config) -> Result<()> {
|
|||||||
.setup_dir(&config.common.setup_dir)
|
.setup_dir(&config.common.setup_dir)
|
||||||
.job_id(&config.common.job_id)
|
.job_id(&config.common.job_id)
|
||||||
.task_id(&config.common.task_id)
|
.task_id(&config.common.task_id)
|
||||||
|
.set_optional_ref(&config.common.microsoft_telemetry_key, |tester, key| {
|
||||||
|
tester.microsoft_telemetry_key(&key)
|
||||||
|
})
|
||||||
|
.set_optional_ref(&config.common.instance_telemetry_key, |tester, key| {
|
||||||
|
tester.instance_telemetry_key(&key)
|
||||||
|
})
|
||||||
|
.set_optional_ref(&reports_dir, |tester, reports_dir| {
|
||||||
|
tester.reports_dir(&reports_dir)
|
||||||
|
})
|
||||||
.set_optional_ref(&config.crashes, |tester, crashes| {
|
.set_optional_ref(&config.crashes, |tester, crashes| {
|
||||||
if let Some(url) = &crashes.url {
|
if let Some(url) = &crashes.url {
|
||||||
tester
|
tester
|
||||||
|
@ -159,6 +159,13 @@ impl GeneratorTask {
|
|||||||
.generator_options(&self.config.generator_options)
|
.generator_options(&self.config.generator_options)
|
||||||
.job_id(&self.config.common.job_id)
|
.job_id(&self.config.common.job_id)
|
||||||
.task_id(&self.config.common.task_id)
|
.task_id(&self.config.common.task_id)
|
||||||
|
.set_optional_ref(
|
||||||
|
&self.config.common.microsoft_telemetry_key,
|
||||||
|
|tester, key| tester.microsoft_telemetry_key(&key),
|
||||||
|
)
|
||||||
|
.set_optional_ref(&self.config.common.instance_telemetry_key, |tester, key| {
|
||||||
|
tester.instance_telemetry_key(&key)
|
||||||
|
})
|
||||||
.set_optional_ref(&self.config.tools, |expand, tools| {
|
.set_optional_ref(&self.config.tools, |expand, tools| {
|
||||||
expand.tools_dir(&tools.path)
|
expand.tools_dir(&tools.path)
|
||||||
});
|
});
|
||||||
|
@ -193,6 +193,12 @@ async fn start_supervisor(
|
|||||||
.set_optional_ref(&config.target_options, |expand, target_options| {
|
.set_optional_ref(&config.target_options, |expand, target_options| {
|
||||||
expand.target_options(target_options)
|
expand.target_options(target_options)
|
||||||
})
|
})
|
||||||
|
.set_optional_ref(&config.common.microsoft_telemetry_key, |tester, key| {
|
||||||
|
tester.microsoft_telemetry_key(&key)
|
||||||
|
})
|
||||||
|
.set_optional_ref(&config.common.instance_telemetry_key, |tester, key| {
|
||||||
|
tester.instance_telemetry_key(&key)
|
||||||
|
})
|
||||||
.set_optional_ref(&config.crashes.url, |tester, url| {
|
.set_optional_ref(&config.crashes.url, |tester, url| {
|
||||||
tester
|
tester
|
||||||
.crashes_account(&url.account())
|
.crashes_account(&url.account())
|
||||||
|
@ -140,7 +140,13 @@ async fn merge(config: &Config, output_dir: impl AsRef<Path>) -> Result<()> {
|
|||||||
.setup_dir(&config.common.setup_dir)
|
.setup_dir(&config.common.setup_dir)
|
||||||
.tools_dir(&config.tools.path)
|
.tools_dir(&config.tools.path)
|
||||||
.job_id(&config.common.job_id)
|
.job_id(&config.common.job_id)
|
||||||
.task_id(&config.common.task_id);
|
.task_id(&config.common.task_id)
|
||||||
|
.set_optional_ref(&config.common.microsoft_telemetry_key, |tester, key| {
|
||||||
|
tester.microsoft_telemetry_key(&key)
|
||||||
|
})
|
||||||
|
.set_optional_ref(&config.common.instance_telemetry_key, |tester, key| {
|
||||||
|
tester.instance_telemetry_key(&key)
|
||||||
|
});
|
||||||
|
|
||||||
let supervisor_path = expand.evaluate_value(&config.supervisor_exe)?;
|
let supervisor_path = expand.evaluate_value(&config.supervisor_exe)?;
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
// Licensed under the MIT License.
|
// Licensed under the MIT License.
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use onefuzz_telemetry::{InstanceTelemetryKey, MicrosoftTelemetryKey};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::{collections::HashMap, hash::Hash};
|
use std::{collections::HashMap, hash::Hash};
|
||||||
use strum::IntoEnumIterator;
|
use strum::IntoEnumIterator;
|
||||||
@ -40,6 +41,8 @@ pub enum PlaceHolder {
|
|||||||
TaskId,
|
TaskId,
|
||||||
CrashesContainer,
|
CrashesContainer,
|
||||||
CrashesAccount,
|
CrashesAccount,
|
||||||
|
MicrosoftTelemetryKey,
|
||||||
|
InstanceTelemetryKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PlaceHolder {
|
impl PlaceHolder {
|
||||||
@ -68,6 +71,8 @@ impl PlaceHolder {
|
|||||||
Self::TaskId => "{task_id}",
|
Self::TaskId => "{task_id}",
|
||||||
Self::CrashesContainer => "{crashes_container}",
|
Self::CrashesContainer => "{crashes_container}",
|
||||||
Self::CrashesAccount => "{crashes_account}",
|
Self::CrashesAccount => "{crashes_account}",
|
||||||
|
Self::MicrosoftTelemetryKey => "{microsoft_telemetry_key}",
|
||||||
|
Self::InstanceTelemetryKey => "{instance_telemetry_key}",
|
||||||
}
|
}
|
||||||
.to_string()
|
.to_string()
|
||||||
}
|
}
|
||||||
@ -255,6 +260,21 @@ impl<'a> Expand<'a> {
|
|||||||
self.set_value(PlaceHolder::JobId, ExpandedValue::Scalar(value))
|
self.set_value(PlaceHolder::JobId, ExpandedValue::Scalar(value))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn microsoft_telemetry_key(self, arg: &MicrosoftTelemetryKey) -> Self {
|
||||||
|
let value = arg.to_string();
|
||||||
|
self.set_value(
|
||||||
|
PlaceHolder::MicrosoftTelemetryKey,
|
||||||
|
ExpandedValue::Scalar(value),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
pub fn instance_telemetry_key(self, arg: &InstanceTelemetryKey) -> Self {
|
||||||
|
let value = arg.to_string();
|
||||||
|
self.set_value(
|
||||||
|
PlaceHolder::InstanceTelemetryKey,
|
||||||
|
ExpandedValue::Scalar(value),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn crashes_account(self, arg: &str) -> Self {
|
pub fn crashes_account(self, arg: &str) -> Self {
|
||||||
self.set_value(
|
self.set_value(
|
||||||
PlaceHolder::CrashesAccount,
|
PlaceHolder::CrashesAccount,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user