mirror of
https://github.com/microsoft/onefuzz.git
synced 2025-06-14 11:08:06 +00:00
Add generic coverage task (#763)
**Todo:** - [x] Finalize format for coverage file(s) - [x] Add service support - [x] Integration test - [x] Merge #926 - [x] Merge #929
This commit is contained in:
@ -349,6 +349,10 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"title": "Check Retry Count",
|
||||
"type": "integer"
|
||||
},
|
||||
"coverage_filter": {
|
||||
"title": "Coverage Filter",
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"title": "Duration",
|
||||
"type": "integer"
|
||||
@ -496,6 +500,7 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"TaskType": {
|
||||
"description": "An enumeration.",
|
||||
"enum": [
|
||||
"coverage",
|
||||
"libfuzzer_fuzz",
|
||||
"libfuzzer_coverage",
|
||||
"libfuzzer_crash_report",
|
||||
@ -837,6 +842,7 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"TaskType": {
|
||||
"description": "An enumeration.",
|
||||
"enum": [
|
||||
"coverage",
|
||||
"libfuzzer_fuzz",
|
||||
"libfuzzer_coverage",
|
||||
"libfuzzer_crash_report",
|
||||
@ -1864,6 +1870,10 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"title": "Check Retry Count",
|
||||
"type": "integer"
|
||||
},
|
||||
"coverage_filter": {
|
||||
"title": "Coverage Filter",
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"title": "Duration",
|
||||
"type": "integer"
|
||||
@ -2011,6 +2021,7 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"TaskType": {
|
||||
"description": "An enumeration.",
|
||||
"enum": [
|
||||
"coverage",
|
||||
"libfuzzer_fuzz",
|
||||
"libfuzzer_coverage",
|
||||
"libfuzzer_crash_report",
|
||||
@ -2525,6 +2536,10 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"title": "Check Retry Count",
|
||||
"type": "integer"
|
||||
},
|
||||
"coverage_filter": {
|
||||
"title": "Coverage Filter",
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"title": "Duration",
|
||||
"type": "integer"
|
||||
@ -2672,6 +2687,7 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"TaskType": {
|
||||
"description": "An enumeration.",
|
||||
"enum": [
|
||||
"coverage",
|
||||
"libfuzzer_fuzz",
|
||||
"libfuzzer_coverage",
|
||||
"libfuzzer_crash_report",
|
||||
@ -3018,6 +3034,10 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"title": "Check Retry Count",
|
||||
"type": "integer"
|
||||
},
|
||||
"coverage_filter": {
|
||||
"title": "Coverage Filter",
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"title": "Duration",
|
||||
"type": "integer"
|
||||
@ -3165,6 +3185,7 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"TaskType": {
|
||||
"description": "An enumeration.",
|
||||
"enum": [
|
||||
"coverage",
|
||||
"libfuzzer_fuzz",
|
||||
"libfuzzer_coverage",
|
||||
"libfuzzer_crash_report",
|
||||
@ -3456,6 +3477,10 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"title": "Check Retry Count",
|
||||
"type": "integer"
|
||||
},
|
||||
"coverage_filter": {
|
||||
"title": "Coverage Filter",
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"title": "Duration",
|
||||
"type": "integer"
|
||||
@ -3603,6 +3628,7 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"TaskType": {
|
||||
"description": "An enumeration.",
|
||||
"enum": [
|
||||
"coverage",
|
||||
"libfuzzer_fuzz",
|
||||
"libfuzzer_coverage",
|
||||
"libfuzzer_crash_report",
|
||||
@ -3868,6 +3894,10 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"title": "Check Retry Count",
|
||||
"type": "integer"
|
||||
},
|
||||
"coverage_filter": {
|
||||
"title": "Coverage Filter",
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"title": "Duration",
|
||||
"type": "integer"
|
||||
@ -4029,6 +4059,7 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"TaskType": {
|
||||
"description": "An enumeration.",
|
||||
"enum": [
|
||||
"coverage",
|
||||
"libfuzzer_fuzz",
|
||||
"libfuzzer_coverage",
|
||||
"libfuzzer_crash_report",
|
||||
@ -4307,6 +4338,10 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"title": "Check Retry Count",
|
||||
"type": "integer"
|
||||
},
|
||||
"coverage_filter": {
|
||||
"title": "Coverage Filter",
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"title": "Duration",
|
||||
"type": "integer"
|
||||
@ -4454,6 +4489,7 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"TaskType": {
|
||||
"description": "An enumeration.",
|
||||
"enum": [
|
||||
"coverage",
|
||||
"libfuzzer_fuzz",
|
||||
"libfuzzer_coverage",
|
||||
"libfuzzer_crash_report",
|
||||
@ -5707,6 +5743,10 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"title": "Check Retry Count",
|
||||
"type": "integer"
|
||||
},
|
||||
"coverage_filter": {
|
||||
"title": "Coverage Filter",
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"title": "Duration",
|
||||
"type": "integer"
|
||||
@ -5868,6 +5908,7 @@ Each event will be submitted via HTTP POST to the user provided URL.
|
||||
"TaskType": {
|
||||
"description": "An enumeration.",
|
||||
"enum": [
|
||||
"coverage",
|
||||
"libfuzzer_fuzz",
|
||||
"libfuzzer_coverage",
|
||||
"libfuzzer_crash_report",
|
||||
|
1
src/agent/Cargo.lock
generated
1
src/agent/Cargo.lock
generated
@ -1525,6 +1525,7 @@ dependencies = [
|
||||
"atexit",
|
||||
"backoff",
|
||||
"clap",
|
||||
"coverage",
|
||||
"crossterm",
|
||||
"env_logger",
|
||||
"flume",
|
||||
|
@ -17,6 +17,7 @@ async-trait = "0.1"
|
||||
atexit = { path = "../atexit" }
|
||||
backoff = { version = "0.3", features = ["tokio"] }
|
||||
clap = "2.33"
|
||||
coverage = { path = "../coverage" }
|
||||
crossterm = "0.18"
|
||||
env_logger = "0.8"
|
||||
flume = "0.10.4"
|
||||
|
@ -61,6 +61,9 @@ impl CommonConfig {
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[serde(tag = "task_type")]
|
||||
pub enum Config {
|
||||
#[serde(alias = "coverage")]
|
||||
Coverage(coverage::generic::Config),
|
||||
|
||||
#[serde(alias = "libfuzzer_fuzz")]
|
||||
LibFuzzerFuzz(fuzz::libfuzzer_fuzz::Config),
|
||||
|
||||
@ -109,6 +112,7 @@ impl Config {
|
||||
|
||||
fn common_mut(&mut self) -> &mut CommonConfig {
|
||||
match self {
|
||||
Config::Coverage(c) => &mut c.common,
|
||||
Config::LibFuzzerFuzz(c) => &mut c.common,
|
||||
Config::LibFuzzerMerge(c) => &mut c.common,
|
||||
Config::LibFuzzerReport(c) => &mut c.common,
|
||||
@ -125,6 +129,7 @@ impl Config {
|
||||
|
||||
pub fn common(&self) -> &CommonConfig {
|
||||
match self {
|
||||
Config::Coverage(c) => &c.common,
|
||||
Config::LibFuzzerFuzz(c) => &c.common,
|
||||
Config::LibFuzzerMerge(c) => &c.common,
|
||||
Config::LibFuzzerReport(c) => &c.common,
|
||||
@ -141,6 +146,7 @@ impl Config {
|
||||
|
||||
pub fn report_event(&self) {
|
||||
let event_type = match self {
|
||||
Config::Coverage(_) => "coverage",
|
||||
Config::LibFuzzerFuzz(_) => "libfuzzer_fuzz",
|
||||
Config::LibFuzzerMerge(_) => "libfuzzer_merge",
|
||||
Config::LibFuzzerReport(_) => "libfuzzer_crash_report",
|
||||
@ -183,6 +189,7 @@ impl Config {
|
||||
self.report_event();
|
||||
|
||||
match self {
|
||||
Config::Coverage(config) => coverage::generic::CoverageTask::new(config).run().await,
|
||||
Config::LibFuzzerFuzz(config) => {
|
||||
fuzz::libfuzzer_fuzz::LibFuzzerFuzzTask::new(config)?
|
||||
.run()
|
||||
@ -200,6 +207,7 @@ impl Config {
|
||||
}
|
||||
Config::LibFuzzerMerge(config) => merge::libfuzzer_merge::spawn(Arc::new(config)).await,
|
||||
Config::GenericAnalysis(config) => analysis::generic::run(config).await,
|
||||
|
||||
Config::GenericGenerator(config) => {
|
||||
fuzz::generator::GeneratorTask::new(config).run().await
|
||||
}
|
||||
|
366
src/agent/onefuzz-agent/src/tasks/coverage/generic.rs
Normal file
366
src/agent/onefuzz-agent/src/tasks/coverage/generic.rs
Normal file
@ -0,0 +1,366 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use coverage::block::CommandBlockCov;
|
||||
use coverage::cache::ModuleCache;
|
||||
use coverage::code::{CmdFilter, CmdFilterDef};
|
||||
use onefuzz::expand::Expand;
|
||||
use onefuzz::syncdir::SyncedDir;
|
||||
use onefuzz_telemetry::{Event::coverage_data, EventData};
|
||||
use serde::de::DeserializeOwned;
|
||||
use storage_queue::{Message, QueueClient};
|
||||
use tokio::fs;
|
||||
use tokio::task::spawn_blocking;
|
||||
use tokio_stream::wrappers::ReadDirStream;
|
||||
use url::Url;
|
||||
|
||||
use crate::tasks::config::CommonConfig;
|
||||
use crate::tasks::generic::input_poller::{CallbackImpl, InputPoller, Processor};
|
||||
use crate::tasks::heartbeat::{HeartbeatSender, TaskHeartbeatClient};
|
||||
|
||||
const COVERAGE_FILE: &str = "coverage.json";
|
||||
const MODULE_CACHE_FILE: &str = "module-cache.json";
|
||||
|
||||
const DEFAULT_TARGET_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Config {
|
||||
pub target_exe: PathBuf,
|
||||
pub target_env: HashMap<String, String>,
|
||||
pub target_options: Vec<String>,
|
||||
pub target_timeout: Option<u64>,
|
||||
|
||||
pub coverage_filter: Option<String>,
|
||||
|
||||
pub input_queue: Option<QueueClient>,
|
||||
pub readonly_inputs: Vec<SyncedDir>,
|
||||
pub coverage: SyncedDir,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub common: CommonConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn timeout(&self) -> Duration {
|
||||
self.target_timeout
|
||||
.map(Duration::from_secs)
|
||||
.unwrap_or(DEFAULT_TARGET_TIMEOUT)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CoverageTask {
|
||||
config: Config,
|
||||
poller: InputPoller<Message>,
|
||||
}
|
||||
|
||||
impl CoverageTask {
|
||||
pub fn new(config: Config) -> Self {
|
||||
let poller = InputPoller::new("coverage");
|
||||
Self { config, poller }
|
||||
}
|
||||
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
info!("starting coverage task");
|
||||
|
||||
self.config.coverage.init_pull().await?;
|
||||
|
||||
let cache = deserialize_or_default(MODULE_CACHE_FILE).await?;
|
||||
|
||||
let coverage_file = self.config.coverage.local_path.join(COVERAGE_FILE);
|
||||
let coverage = deserialize_or_default(coverage_file).await?;
|
||||
|
||||
let filter = self.load_filter().await?;
|
||||
let heartbeat = self.config.common.init_heartbeat(None).await?;
|
||||
let mut context = TaskContext::new(cache, &self.config, coverage, filter, heartbeat);
|
||||
|
||||
context.heartbeat.alive();
|
||||
|
||||
let mut seen_inputs = false;
|
||||
|
||||
for dir in &self.config.readonly_inputs {
|
||||
debug!("recording coverage for {}", dir.local_path.display());
|
||||
|
||||
dir.init_pull().await?;
|
||||
let dir_count = context.record_corpus(&dir.local_path).await?;
|
||||
|
||||
if dir_count > 0 {
|
||||
seen_inputs = true;
|
||||
}
|
||||
|
||||
info!(
|
||||
"recorded coverage for {} inputs from {}",
|
||||
dir_count,
|
||||
dir.local_path.display()
|
||||
);
|
||||
|
||||
context.heartbeat.alive();
|
||||
}
|
||||
|
||||
if seen_inputs {
|
||||
context.report_coverage_stats().await?;
|
||||
context.save_and_sync_coverage().await?;
|
||||
}
|
||||
|
||||
context.heartbeat.alive();
|
||||
|
||||
if let Some(queue) = &self.config.input_queue {
|
||||
info!("polling queue for new coverage inputs");
|
||||
|
||||
let callback = CallbackImpl::new(queue.clone(), context)?;
|
||||
self.poller.run(callback).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn load_filter(&self) -> Result<CmdFilter> {
|
||||
let raw_filter_path = if let Some(raw_path) = &self.config.coverage_filter {
|
||||
raw_path
|
||||
} else {
|
||||
return Ok(CmdFilter::default());
|
||||
};
|
||||
|
||||
// Ensure users can locate the filter relative to the setup container.
|
||||
let expand = Expand::new().setup_dir(&self.config.common.setup_dir);
|
||||
let filter_path = expand.evaluate_value(raw_filter_path)?;
|
||||
|
||||
let data = fs::read(&filter_path).await?;
|
||||
let def: CmdFilterDef = serde_json::from_slice(&data)?;
|
||||
let filter = CmdFilter::new(def)?;
|
||||
|
||||
Ok(filter)
|
||||
}
|
||||
}
|
||||
|
||||
async fn deserialize_or_default<T>(path: impl AsRef<Path>) -> Result<T>
|
||||
where
|
||||
T: Default + DeserializeOwned,
|
||||
{
|
||||
use tokio::io::ErrorKind::NotFound;
|
||||
|
||||
let data = fs::read(path).await;
|
||||
|
||||
if let Err(err) = &data {
|
||||
if err.kind() == NotFound {
|
||||
return Ok(T::default());
|
||||
}
|
||||
}
|
||||
|
||||
let data = data?;
|
||||
|
||||
Ok(serde_json::from_slice(&data)?)
|
||||
}
|
||||
|
||||
struct TaskContext<'a> {
|
||||
// Optional only to enable temporary move into blocking thread.
|
||||
cache: Option<ModuleCache>,
|
||||
|
||||
config: &'a Config,
|
||||
coverage: CommandBlockCov,
|
||||
filter: CmdFilter,
|
||||
heartbeat: Option<TaskHeartbeatClient>,
|
||||
}
|
||||
|
||||
impl<'a> TaskContext<'a> {
|
||||
pub fn new(
|
||||
cache: ModuleCache,
|
||||
config: &'a Config,
|
||||
coverage: CommandBlockCov,
|
||||
filter: CmdFilter,
|
||||
heartbeat: Option<TaskHeartbeatClient>,
|
||||
) -> Self {
|
||||
let cache = Some(cache);
|
||||
|
||||
Self {
|
||||
cache,
|
||||
config,
|
||||
coverage,
|
||||
filter,
|
||||
heartbeat,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn record_input(&mut self, input: &Path) -> Result<()> {
|
||||
let coverage = self.record_impl(input).await?;
|
||||
self.coverage.merge_max(&coverage);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn record_impl(&mut self, input: &Path) -> Result<CommandBlockCov> {
|
||||
// Invariant: `self.cache` must be present on method enter and exit.
|
||||
let cache = self.cache.take().expect("module cache not present");
|
||||
|
||||
let filter = self.filter.clone();
|
||||
let cmd = self.command_for_input(input)?;
|
||||
let timeout = self.config.timeout();
|
||||
let recorded =
|
||||
spawn_blocking(move || record_os_impl(cmd, timeout, cache, filter)).await??;
|
||||
|
||||
// Maintain invariant.
|
||||
self.cache = Some(recorded.cache);
|
||||
|
||||
Ok(recorded.coverage)
|
||||
}
|
||||
|
||||
fn command_for_input(&self, input: &Path) -> Result<Command> {
|
||||
let expand = Expand::new()
|
||||
.input_path(input)
|
||||
.job_id(&self.config.common.job_id)
|
||||
.setup_dir(&self.config.common.setup_dir)
|
||||
.target_exe(&self.config.target_exe)
|
||||
.target_options(&self.config.target_options)
|
||||
.task_id(&self.config.common.task_id);
|
||||
|
||||
let mut cmd = Command::new(&self.config.target_exe);
|
||||
|
||||
let target_options = expand.evaluate(&self.config.target_options)?;
|
||||
cmd.args(target_options);
|
||||
|
||||
for (k, v) in &self.config.target_env {
|
||||
cmd.env(k, expand.evaluate_value(v)?);
|
||||
}
|
||||
|
||||
cmd.env_remove("RUST_LOG");
|
||||
cmd.stdin(Stdio::null());
|
||||
cmd.stdout(Stdio::piped());
|
||||
cmd.stderr(Stdio::piped());
|
||||
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
pub async fn record_corpus(&mut self, dir: &Path) -> Result<usize> {
|
||||
use futures::stream::StreamExt;
|
||||
|
||||
let mut corpus = fs::read_dir(dir)
|
||||
.await
|
||||
.map(ReadDirStream::new)
|
||||
.with_context(|| format!("unable to read corpus directory: {}", dir.display()))?;
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
while let Some(entry) = corpus.next().await {
|
||||
match entry {
|
||||
Ok(entry) => {
|
||||
if entry.file_type().await?.is_file() {
|
||||
self.record_input(&entry.path()).await?;
|
||||
count += 1;
|
||||
} else {
|
||||
warn!("skipping non-file dir entry: {}", entry.path().display());
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!("{:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
pub async fn report_coverage_stats(&self) -> Result<()> {
|
||||
use EventData::*;
|
||||
|
||||
let s = CoverageStats::new(&self.coverage);
|
||||
event!(coverage_data; Covered = s.covered, Features = s.features, Rate = s.rate);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn save_and_sync_coverage(&self) -> Result<()> {
|
||||
let path = self.config.coverage.local_path.join(COVERAGE_FILE);
|
||||
let text = serde_json::to_string(&self.coverage).context("serializing coverage to JSON")?;
|
||||
|
||||
fs::write(&path, &text)
|
||||
.await
|
||||
.with_context(|| format!("writing coverage to {}", path.display()))?;
|
||||
self.config.coverage.sync_push().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct Recorded {
|
||||
pub cache: ModuleCache,
|
||||
pub coverage: CommandBlockCov,
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn record_os_impl(
|
||||
cmd: Command,
|
||||
timeout: Duration,
|
||||
mut cache: ModuleCache,
|
||||
filter: CmdFilter,
|
||||
) -> Result<Recorded> {
|
||||
use coverage::block::linux::Recorder;
|
||||
|
||||
let coverage = Recorder::record(cmd, timeout, &mut cache, filter)?;
|
||||
|
||||
Ok(Recorded { cache, coverage })
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn record_os_impl(
|
||||
cmd: Command,
|
||||
timeout: Duration,
|
||||
mut cache: ModuleCache,
|
||||
filter: CmdFilter,
|
||||
) -> Result<Recorded> {
|
||||
use coverage::block::windows::{Recorder, RecorderEventHandler};
|
||||
|
||||
let mut recorder = Recorder::new(&mut cache, filter);
|
||||
let mut handler = RecorderEventHandler::new(&mut recorder, timeout);
|
||||
handler.run(cmd)?;
|
||||
let coverage = recorder.into_coverage();
|
||||
|
||||
Ok(Recorded { cache, coverage })
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<'a> Processor for TaskContext<'a> {
|
||||
async fn process(&mut self, _url: Option<Url>, input: &Path) -> Result<()> {
|
||||
self.heartbeat.alive();
|
||||
|
||||
self.record_input(input).await?;
|
||||
self.report_coverage_stats().await?;
|
||||
self.save_and_sync_coverage().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct CoverageStats {
|
||||
covered: u64,
|
||||
features: u64,
|
||||
rate: f64,
|
||||
}
|
||||
|
||||
impl CoverageStats {
|
||||
pub fn new(coverage: &CommandBlockCov) -> Self {
|
||||
let mut stats = CoverageStats::default();
|
||||
|
||||
for (_, module) in coverage.iter() {
|
||||
for block in module.blocks.values() {
|
||||
stats.features += 1;
|
||||
|
||||
if block.count > 0 {
|
||||
stats.covered += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stats.features > 0 {
|
||||
stats.rate = (stats.covered as f64) / (stats.features as f64)
|
||||
}
|
||||
|
||||
stats
|
||||
}
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
// Copyright (c) Microsoft Corporation.
|
||||
// Licensed under the MIT License.
|
||||
|
||||
pub mod generic;
|
||||
pub mod libfuzzer_coverage;
|
||||
pub mod recorder;
|
||||
pub mod total;
|
||||
|
@ -361,6 +361,12 @@ def build_task_config(
|
||||
else True
|
||||
)
|
||||
|
||||
if TaskFeature.coverage_filter in definition.features:
|
||||
coverage_filter = task_config.task.coverage_filter
|
||||
|
||||
if coverage_filter is not None:
|
||||
config.coverage_filter = "setup/%s" % coverage_filter
|
||||
|
||||
return config
|
||||
|
||||
|
||||
|
@ -14,6 +14,41 @@ from onefuzztypes.models import ContainerDefinition, TaskDefinition, VmDefinitio
|
||||
|
||||
# all tasks are required to have a 'setup' container
|
||||
TASK_DEFINITIONS = {
|
||||
TaskType.coverage: TaskDefinition(
|
||||
features=[
|
||||
TaskFeature.target_exe,
|
||||
TaskFeature.target_env,
|
||||
TaskFeature.target_options,
|
||||
TaskFeature.target_timeout,
|
||||
TaskFeature.coverage_filter,
|
||||
],
|
||||
vm=VmDefinition(compare=Compare.Equal, value=1),
|
||||
containers=[
|
||||
ContainerDefinition(
|
||||
type=ContainerType.setup,
|
||||
compare=Compare.Equal,
|
||||
value=1,
|
||||
permissions=[ContainerPermission.Read, ContainerPermission.List],
|
||||
),
|
||||
ContainerDefinition(
|
||||
type=ContainerType.readonly_inputs,
|
||||
compare=Compare.AtLeast,
|
||||
value=1,
|
||||
permissions=[ContainerPermission.Read, ContainerPermission.List],
|
||||
),
|
||||
ContainerDefinition(
|
||||
type=ContainerType.coverage,
|
||||
compare=Compare.Equal,
|
||||
value=1,
|
||||
permissions=[
|
||||
ContainerPermission.List,
|
||||
ContainerPermission.Read,
|
||||
ContainerPermission.Write,
|
||||
],
|
||||
),
|
||||
],
|
||||
monitor_queue=ContainerType.readonly_inputs,
|
||||
),
|
||||
TaskType.generic_analysis: TaskDefinition(
|
||||
features=[
|
||||
TaskFeature.target_exe,
|
||||
|
@ -850,6 +850,7 @@ class Tasks(Endpoint):
|
||||
colocate: bool = False,
|
||||
report_list: Optional[List[str]] = None,
|
||||
minimized_stack_depth: Optional[int] = None,
|
||||
coverage_filter: Optional[str] = None,
|
||||
) -> models.Task:
|
||||
"""
|
||||
Create a task
|
||||
@ -915,6 +916,7 @@ class Tasks(Endpoint):
|
||||
report_list=report_list,
|
||||
preserve_existing_outputs=preserve_existing_outputs,
|
||||
minimized_stack_depth=minimized_stack_depth,
|
||||
coverage_filter=coverage_filter,
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -50,6 +50,7 @@ class Libfuzzer(Command):
|
||||
target_workers: Optional[int] = None,
|
||||
target_options: Optional[List[str]] = None,
|
||||
target_env: Optional[Dict[str, str]] = None,
|
||||
target_timeout: Optional[int] = None,
|
||||
tags: Optional[Dict[str, str]] = None,
|
||||
check_retry_count: Optional[int] = None,
|
||||
crash_report_timeout: Optional[int] = None,
|
||||
@ -60,6 +61,7 @@ class Libfuzzer(Command):
|
||||
check_fuzzer_help: bool = True,
|
||||
expect_crash_on_failure: bool = False,
|
||||
minimized_stack_depth: Optional[int] = None,
|
||||
coverage_filter: Optional[str] = None,
|
||||
) -> None:
|
||||
|
||||
regression_containers = [
|
||||
@ -72,6 +74,12 @@ class Libfuzzer(Command):
|
||||
),
|
||||
]
|
||||
|
||||
# We don't really need a separate timeout for crash reporting, and we could just
|
||||
# use `target_timeout`. But `crash_report_timeout` was introduced first, so we
|
||||
# can't remove it without a breaking change. Since both timeouts may be present,
|
||||
# prefer the more task-specific timeout.
|
||||
effective_crash_report_timeout = crash_report_timeout or target_timeout
|
||||
|
||||
self.logger.info("creating libfuzzer_regression task")
|
||||
regression_task = self.onefuzz.tasks.create(
|
||||
job.job_id,
|
||||
@ -85,7 +93,7 @@ class Libfuzzer(Command):
|
||||
target_options=target_options,
|
||||
target_env=target_env,
|
||||
tags=tags,
|
||||
target_timeout=crash_report_timeout,
|
||||
target_timeout=effective_crash_report_timeout,
|
||||
check_retry_count=check_retry_count,
|
||||
check_fuzzer_help=check_fuzzer_help,
|
||||
debug=debug,
|
||||
@ -131,23 +139,47 @@ class Libfuzzer(Command):
|
||||
(ContainerType.coverage, containers[ContainerType.coverage]),
|
||||
(ContainerType.readonly_inputs, containers[ContainerType.inputs]),
|
||||
]
|
||||
self.logger.info("creating libfuzzer_coverage task")
|
||||
self.logger.info("creating coverage task")
|
||||
|
||||
# The `coverage` task is not libFuzzer-aware, so invocations of the target fuzzer
|
||||
# against an input do not automatically add an `{input}` specifier to the command
|
||||
# args. That means on the VM, the fuzzer will get run in fuzzing mode each time we
|
||||
# try to test an input.
|
||||
#
|
||||
# We cannot require `{input}` occur in `target_options`, since that would break
|
||||
# the current assumptions of the libFuzzer-aware tasks, as well as be a breaking
|
||||
# API change.
|
||||
#
|
||||
# For now, locally extend the `target_options` for this task only, to ensure that
|
||||
# test case invocations work as expected.
|
||||
coverage_target_options = target_options or []
|
||||
coverage_target_options.append("{input}")
|
||||
|
||||
# Opposite precedence to `effective_crash_report_timeout`.
|
||||
#
|
||||
# If the user specified a timeout for crash reporting but not a general target
|
||||
# timeout, consider that to be a better (more target-aware) default than the
|
||||
# default in the agent.
|
||||
coverage_timeout = target_timeout or crash_report_timeout
|
||||
|
||||
self.onefuzz.tasks.create(
|
||||
job.job_id,
|
||||
TaskType.libfuzzer_coverage,
|
||||
TaskType.coverage,
|
||||
target_exe,
|
||||
coverage_containers,
|
||||
pool_name=pool_name,
|
||||
duration=duration,
|
||||
vm_count=1,
|
||||
reboot_after_setup=reboot_after_setup,
|
||||
target_options=target_options,
|
||||
target_options=coverage_target_options,
|
||||
target_env=target_env,
|
||||
target_timeout=coverage_timeout,
|
||||
tags=tags,
|
||||
prereq_tasks=prereq_tasks,
|
||||
debug=debug,
|
||||
colocate=colocate_all_tasks or colocate_secondary_tasks,
|
||||
check_fuzzer_help=check_fuzzer_help,
|
||||
coverage_filter=coverage_filter,
|
||||
)
|
||||
|
||||
report_containers = [
|
||||
@ -172,7 +204,7 @@ class Libfuzzer(Command):
|
||||
target_env=target_env,
|
||||
tags=tags,
|
||||
prereq_tasks=prereq_tasks,
|
||||
target_timeout=crash_report_timeout,
|
||||
target_timeout=effective_crash_report_timeout,
|
||||
check_retry_count=check_retry_count,
|
||||
check_fuzzer_help=check_fuzzer_help,
|
||||
debug=debug,
|
||||
@ -196,6 +228,7 @@ class Libfuzzer(Command):
|
||||
target_workers: Optional[int] = None,
|
||||
target_options: Optional[List[str]] = None,
|
||||
target_env: Optional[Dict[str, str]] = None,
|
||||
target_timeout: Optional[int] = None,
|
||||
check_retry_count: Optional[int] = None,
|
||||
crash_report_timeout: Optional[int] = None,
|
||||
tags: Optional[Dict[str, str]] = None,
|
||||
@ -212,6 +245,7 @@ class Libfuzzer(Command):
|
||||
check_fuzzer_help: bool = True,
|
||||
expect_crash_on_failure: bool = False,
|
||||
minimized_stack_depth: Optional[int] = None,
|
||||
coverage_filter: Optional[File] = None,
|
||||
) -> Optional[Job]:
|
||||
"""
|
||||
Basic libfuzzer job
|
||||
@ -272,6 +306,13 @@ class Libfuzzer(Command):
|
||||
|
||||
target_exe_blob_name = helper.setup_relative_blob_name(target_exe, setup_dir)
|
||||
|
||||
if coverage_filter:
|
||||
coverage_filter_blob_name: Optional[str] = helper.setup_relative_blob_name(
|
||||
coverage_filter, setup_dir
|
||||
)
|
||||
else:
|
||||
coverage_filter_blob_name = None
|
||||
|
||||
self._create_tasks(
|
||||
job=helper.job,
|
||||
containers=helper.containers,
|
||||
@ -293,6 +334,7 @@ class Libfuzzer(Command):
|
||||
check_fuzzer_help=check_fuzzer_help,
|
||||
expect_crash_on_failure=expect_crash_on_failure,
|
||||
minimized_stack_depth=minimized_stack_depth,
|
||||
coverage_filter=coverage_filter_blob_name,
|
||||
)
|
||||
|
||||
self.logger.info("done creating tasks")
|
||||
|
@ -80,6 +80,7 @@ class TaskFeature(Enum):
|
||||
expect_crash_on_failure = "expect_crash_on_failure"
|
||||
report_list = "report_list"
|
||||
minimized_stack_depth = "minimized_stack_depth"
|
||||
coverage_filter = "coverage_filter"
|
||||
|
||||
|
||||
# Permissions for an Azure Blob Storage Container.
|
||||
@ -147,6 +148,7 @@ class TaskState(Enum):
|
||||
|
||||
|
||||
class TaskType(Enum):
|
||||
coverage = "coverage"
|
||||
libfuzzer_fuzz = "libfuzzer_fuzz"
|
||||
libfuzzer_coverage = "libfuzzer_coverage"
|
||||
libfuzzer_crash_report = "libfuzzer_crash_report"
|
||||
|
@ -172,6 +172,7 @@ class TaskDetails(BaseModel):
|
||||
preserve_existing_outputs: Optional[bool]
|
||||
report_list: Optional[List[str]]
|
||||
minimized_stack_depth: Optional[int]
|
||||
coverage_filter: Optional[str]
|
||||
|
||||
@validator("check_retry_count", allow_reuse=True)
|
||||
def validate_check_retry_count(cls, value: int) -> int:
|
||||
@ -406,6 +407,7 @@ class TaskUnitConfig(BaseModel):
|
||||
ensemble_sync_delay: Optional[int]
|
||||
report_list: Optional[List[str]]
|
||||
minimized_stack_depth: Optional[int]
|
||||
coverage_filter: Optional[str]
|
||||
|
||||
# from here forwards are Container definitions. These need to be inline
|
||||
# with TaskDefinitions and ContainerTypes
|
||||
|
Reference in New Issue
Block a user