mirror of
https://github.com/microsoft/onefuzz.git
synced 2025-06-10 01:01:34 +00:00
Update command variable expansion (#561)
* Documents `crashes_account` and `crashes_container` * Adds `reports_dir` and support for `unique_reports`, `reports`, and `no_repro` containers to the generic analysis task * Adds `microsoft_telemetry_key` and `instance_telemetry_key` to generic supervisor, generator, and analysis tasks
This commit is contained in:
parent
419ca05b28
commit
e3c73d7a10
@ -18,6 +18,11 @@ The following values are replaced with the specific values at runtime.
|
||||
* `{setup_dir}` : Path to the setup directory
|
||||
* `{job_id}`: UUID that indicates the Job ID
|
||||
* `{task_id}`: UUID that indicates the Task ID
|
||||
* `{reports_dir}`: Path to the directory to write crash reports
|
||||
* `{crashes_account}`: Azure Storage Account name for the `crashes` container
|
||||
* `{crashes_container}`: Container name for the `crashes` container
|
||||
* `{microsoft_telemetry_key}`: Application Insights key used for collecting [non-attributable telemetry](telemetry.md) to improve OneFuzz.
|
||||
* `{instance_telemetry_key}`: Application Insights key used for private, instance-owned telemetry and logging (See [OneFuzz Telemetry](telemetry.md).
|
||||
|
||||
## Example
|
||||
|
||||
@ -45,13 +50,14 @@ These are currently used in the following tasks:
|
||||
* libfuzzer\_merge: `target_exe`, `target_options`, `input_corpus`
|
||||
* libfuzzer\_coverage: None
|
||||
* generic\_analysis: `input`, `target_exe`, `target_options`, `analyzer_exe`,
|
||||
`anayzer_options`, `output_dir`, `tools_dir`, `job_id`, `task_id`
|
||||
`analyzer_options`, `output_dir`, `tools_dir`, `job_id`, `task_id`,
|
||||
`crashes_account`, `crashes_container`, `reports_dir`, `microsoft_telemetry_key`, `instance_telemetry_key`
|
||||
* generic\_generator: `generated_inputs`, `input_corpus`, `tools_dir`,
|
||||
`generator_exe`, `generator_options`, `target_exe`, `target_options`,
|
||||
`input`, `job_id`, `task_id`
|
||||
`input`, `job_id`, `task_id`, `microsoft_telemetry_key`, `instance_telemetry_key`
|
||||
* generic\_supervisor: `crashes`, `runtime_dir`, `target_exe`, `target_options`,
|
||||
`input_corpus`, `input`, `supervisor_exe`, `supervisor_options`, `tools_dir`,
|
||||
`job_id`, `task_id`
|
||||
`job_id`, `task_id`, `crashes_account`, `crashes_container`, `reports_dir`, `microsoft_telemetry_key`, `instance_telemetry_key`
|
||||
* generic\_merge: `input`, `input_corpus`, `output_dir`, `target_exe`,
|
||||
`target_options`, `supervisor_exe`, `supervisor_options`, `tools_dir`,
|
||||
`job_id`, `task_id`
|
||||
`job_id`, `task_id`, `microsoft_telemetry_key`, `instance_telemetry_key`
|
||||
|
@ -1,7 +1,9 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use crate::tasks::{config::CommonConfig, heartbeat::HeartbeatSender};
|
||||
use crate::tasks::{
|
||||
config::CommonConfig, heartbeat::HeartbeatSender, report::crash_report::monitor_reports,
|
||||
};
|
||||
use anyhow::{Context, Result};
|
||||
use futures::stream::StreamExt;
|
||||
use onefuzz::{az_copy, blob::url::BlobUrl};
|
||||
@ -18,6 +20,7 @@ use std::{
|
||||
str,
|
||||
};
|
||||
use storage_queue::{QueueClient, EMPTY_QUEUE_DELAY};
|
||||
use tempfile::tempdir;
|
||||
use tokio::{fs, process::Command};
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@ -34,6 +37,10 @@ pub struct Config {
|
||||
pub analysis: SyncedDir,
|
||||
pub tools: SyncedDir,
|
||||
|
||||
pub reports: Option<SyncedDir>,
|
||||
pub unique_reports: Option<SyncedDir>,
|
||||
pub no_repro: Option<SyncedDir>,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
@ -46,21 +53,63 @@ pub async fn spawn(config: Config) -> Result<()> {
|
||||
config.analysis.init().await?;
|
||||
config.tools.init_pull().await?;
|
||||
|
||||
// the tempdir is always created, however, the reports_path and
|
||||
// reports_monitor_future are only created if we have one of the three
|
||||
// report SyncedDir. The idea is that the option for where to write reports
|
||||
// is only available for target option / env expansion if one of the reports
|
||||
// SyncedDir is provided.
|
||||
let reports_dir = tempdir()?;
|
||||
let (reports_path, reports_monitor_future) =
|
||||
if config.unique_reports.is_some() || config.reports.is_some() || config.no_repro.is_some()
|
||||
{
|
||||
if let Some(unique_reports) = &config.unique_reports {
|
||||
unique_reports.init().await?;
|
||||
}
|
||||
if let Some(reports) = &config.reports {
|
||||
reports.init().await?;
|
||||
}
|
||||
if let Some(no_repro) = &config.no_repro {
|
||||
no_repro.init().await?;
|
||||
}
|
||||
let monitor_reports_future = monitor_reports(
|
||||
reports_dir.path(),
|
||||
&config.unique_reports,
|
||||
&config.reports,
|
||||
&config.no_repro,
|
||||
);
|
||||
(
|
||||
Some(reports_dir.path().to_path_buf()),
|
||||
Some(monitor_reports_future),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
set_executable(&config.tools.path).await?;
|
||||
run_existing(&config).await?;
|
||||
poll_inputs(&config, tmp).await?;
|
||||
run_existing(&config, &reports_path).await?;
|
||||
let poller = poll_inputs(&config, tmp, &reports_path);
|
||||
|
||||
match reports_monitor_future {
|
||||
Some(monitor) => {
|
||||
futures::try_join!(poller, monitor)?;
|
||||
}
|
||||
None => {
|
||||
poller.await?;
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_existing(config: &Config) -> Result<()> {
|
||||
async fn run_existing(config: &Config, reports_dir: &Option<PathBuf>) -> Result<()> {
|
||||
if let Some(crashes) = &config.crashes {
|
||||
crashes.init_pull().await?;
|
||||
let mut count = 0;
|
||||
let mut count: u64 = 0;
|
||||
let mut read_dir = fs::read_dir(&crashes.path).await?;
|
||||
while let Some(file) = read_dir.next().await {
|
||||
debug!("Processing file {:?}", file);
|
||||
let file = file?;
|
||||
run_tool(file.path(), &config).await?;
|
||||
run_tool(file.path(), &config, &reports_dir).await?;
|
||||
count += 1;
|
||||
}
|
||||
info!("processed {} initial inputs", count);
|
||||
@ -82,7 +131,11 @@ async fn already_checked(config: &Config, input: &BlobUrl) -> Result<bool> {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn poll_inputs(config: &Config, tmp_dir: OwnedDir) -> Result<()> {
|
||||
async fn poll_inputs(
|
||||
config: &Config,
|
||||
tmp_dir: OwnedDir,
|
||||
reports_dir: &Option<PathBuf>,
|
||||
) -> Result<()> {
|
||||
let heartbeat = config.common.init_heartbeat().await?;
|
||||
if let Some(queue) = &config.input_queue {
|
||||
let mut input_queue = QueueClient::new(queue.clone());
|
||||
@ -104,7 +157,7 @@ async fn poll_inputs(config: &Config, tmp_dir: OwnedDir) -> Result<()> {
|
||||
destination_path.push(file_name);
|
||||
az_copy::copy(input_url.url().as_ref(), &destination_path, false).await?;
|
||||
|
||||
run_tool(destination_path, &config).await?;
|
||||
run_tool(destination_path, &config, &reports_dir).await?;
|
||||
config.analysis.sync_push().await?
|
||||
}
|
||||
input_queue.delete(message).await?;
|
||||
@ -118,7 +171,11 @@ async fn poll_inputs(config: &Config, tmp_dir: OwnedDir) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn run_tool(input: impl AsRef<Path>, config: &Config) -> Result<()> {
|
||||
pub async fn run_tool(
|
||||
input: impl AsRef<Path>,
|
||||
config: &Config,
|
||||
reports_dir: &Option<PathBuf>,
|
||||
) -> Result<()> {
|
||||
let expand = Expand::new()
|
||||
.input_path(&input)
|
||||
.target_exe(&config.target_exe)
|
||||
@ -130,6 +187,15 @@ pub async fn run_tool(input: impl AsRef<Path>, config: &Config) -> Result<()> {
|
||||
.setup_dir(&config.common.setup_dir)
|
||||
.job_id(&config.common.job_id)
|
||||
.task_id(&config.common.task_id)
|
||||
.set_optional_ref(&config.common.microsoft_telemetry_key, |tester, key| {
|
||||
tester.microsoft_telemetry_key(&key)
|
||||
})
|
||||
.set_optional_ref(&config.common.instance_telemetry_key, |tester, key| {
|
||||
tester.instance_telemetry_key(&key)
|
||||
})
|
||||
.set_optional_ref(&reports_dir, |tester, reports_dir| {
|
||||
tester.reports_dir(&reports_dir)
|
||||
})
|
||||
.set_optional_ref(&config.crashes, |tester, crashes| {
|
||||
if let Some(url) = &crashes.url {
|
||||
tester
|
||||
|
@ -159,6 +159,13 @@ impl GeneratorTask {
|
||||
.generator_options(&self.config.generator_options)
|
||||
.job_id(&self.config.common.job_id)
|
||||
.task_id(&self.config.common.task_id)
|
||||
.set_optional_ref(
|
||||
&self.config.common.microsoft_telemetry_key,
|
||||
|tester, key| tester.microsoft_telemetry_key(&key),
|
||||
)
|
||||
.set_optional_ref(&self.config.common.instance_telemetry_key, |tester, key| {
|
||||
tester.instance_telemetry_key(&key)
|
||||
})
|
||||
.set_optional_ref(&self.config.tools, |expand, tools| {
|
||||
expand.tools_dir(&tools.path)
|
||||
});
|
||||
|
@ -193,6 +193,12 @@ async fn start_supervisor(
|
||||
.set_optional_ref(&config.target_options, |expand, target_options| {
|
||||
expand.target_options(target_options)
|
||||
})
|
||||
.set_optional_ref(&config.common.microsoft_telemetry_key, |tester, key| {
|
||||
tester.microsoft_telemetry_key(&key)
|
||||
})
|
||||
.set_optional_ref(&config.common.instance_telemetry_key, |tester, key| {
|
||||
tester.instance_telemetry_key(&key)
|
||||
})
|
||||
.set_optional_ref(&config.crashes.url, |tester, url| {
|
||||
tester
|
||||
.crashes_account(&url.account())
|
||||
|
@ -140,7 +140,13 @@ async fn merge(config: &Config, output_dir: impl AsRef<Path>) -> Result<()> {
|
||||
.setup_dir(&config.common.setup_dir)
|
||||
.tools_dir(&config.tools.path)
|
||||
.job_id(&config.common.job_id)
|
||||
.task_id(&config.common.task_id);
|
||||
.task_id(&config.common.task_id)
|
||||
.set_optional_ref(&config.common.microsoft_telemetry_key, |tester, key| {
|
||||
tester.microsoft_telemetry_key(&key)
|
||||
})
|
||||
.set_optional_ref(&config.common.instance_telemetry_key, |tester, key| {
|
||||
tester.instance_telemetry_key(&key)
|
||||
});
|
||||
|
||||
let supervisor_path = expand.evaluate_value(&config.supervisor_exe)?;
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use anyhow::Result;
|
||||
use onefuzz_telemetry::{InstanceTelemetryKey, MicrosoftTelemetryKey};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{collections::HashMap, hash::Hash};
|
||||
use strum::IntoEnumIterator;
|
||||
@ -40,6 +41,8 @@ pub enum PlaceHolder {
|
||||
TaskId,
|
||||
CrashesContainer,
|
||||
CrashesAccount,
|
||||
MicrosoftTelemetryKey,
|
||||
InstanceTelemetryKey,
|
||||
}
|
||||
|
||||
impl PlaceHolder {
|
||||
@ -68,6 +71,8 @@ impl PlaceHolder {
|
||||
Self::TaskId => "{task_id}",
|
||||
Self::CrashesContainer => "{crashes_container}",
|
||||
Self::CrashesAccount => "{crashes_account}",
|
||||
Self::MicrosoftTelemetryKey => "{microsoft_telemetry_key}",
|
||||
Self::InstanceTelemetryKey => "{instance_telemetry_key}",
|
||||
}
|
||||
.to_string()
|
||||
}
|
||||
@ -255,6 +260,21 @@ impl<'a> Expand<'a> {
|
||||
self.set_value(PlaceHolder::JobId, ExpandedValue::Scalar(value))
|
||||
}
|
||||
|
||||
pub fn microsoft_telemetry_key(self, arg: &MicrosoftTelemetryKey) -> Self {
|
||||
let value = arg.to_string();
|
||||
self.set_value(
|
||||
PlaceHolder::MicrosoftTelemetryKey,
|
||||
ExpandedValue::Scalar(value),
|
||||
)
|
||||
}
|
||||
pub fn instance_telemetry_key(self, arg: &InstanceTelemetryKey) -> Self {
|
||||
let value = arg.to_string();
|
||||
self.set_value(
|
||||
PlaceHolder::InstanceTelemetryKey,
|
||||
ExpandedValue::Scalar(value),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn crashes_account(self, arg: &str) -> Self {
|
||||
self.set_value(
|
||||
PlaceHolder::CrashesAccount,
|
||||
|
Loading…
x
Reference in New Issue
Block a user