Release 8.7.1 (hotfix) (#3459)

* Remove the retention policy setting (#3452)

---------

Co-authored-by: Cheick Keita <chkeita@microsoft.com>
This commit is contained in:
Adam
2023-08-29 12:57:19 -07:00
committed by GitHub
parent 2a88838160
commit c69deed50e
59 changed files with 1390 additions and 873 deletions

View File

@ -13,7 +13,6 @@
"**/target/**": true
},
"lldb.executable": "/usr/bin/lldb",
"dotnet.server.useOmnisharp": true,
"omnisharp.enableEditorConfigSupport": true,
"omnisharp.enableRoslynAnalyzers": true,
"python.defaultInterpreterPath": "/workspaces/onefuzz/src/venv/bin/python",
@ -49,4 +48,4 @@
"features": {
"ghcr.io/devcontainers/features/azure-cli:1": {}
}
}
}

View File

@ -542,11 +542,9 @@ jobs:
mkdir -p artifacts/linux-libfuzzer
mkdir -p artifacts/linux-libfuzzer-with-options
mkdir -p artifacts/mariner-libfuzzer
(cd libfuzzer ; make )
cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer
cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/linux-libfuzzer-with-options
cp -r libfuzzer/fuzz.exe libfuzzer/seeds artifacts/mariner-libfuzzer
mkdir -p artifacts/linux-libfuzzer-regression
(cd libfuzzer-regression ; make )

View File

@ -7,6 +7,12 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## 8.7.1
### Fixed
* Service: Removed deprecated Azure retention policy setting that was causing scaleset deployment errors [#3452](https://github.com/microsoft/onefuzz/pull/3452)
## 8.7.0
### Added

View File

@ -1 +1 @@
8.7.0
8.7.1

View File

@ -1,60 +0,0 @@
using System.Text.Json;
using Microsoft.Azure.Functions.Worker;
using Microsoft.Extensions.Logging;
using Microsoft.OneFuzz.Service.OneFuzzLib.Orm;
namespace Microsoft.OneFuzz.Service.Functions;
public class QueueJobResult {
private readonly ILogger _log;
private readonly IOnefuzzContext _context;
public QueueJobResult(ILogger<QueueJobResult> logTracer, IOnefuzzContext context) {
_log = logTracer;
_context = context;
}
[Function("QueueJobResult")]
public async Async.Task Run([QueueTrigger("job-result", Connection = "AzureWebJobsStorage")] string msg) {
var _tasks = _context.TaskOperations;
var _jobs = _context.JobOperations;
_log.LogInformation("job result: {msg}", msg);
var jr = JsonSerializer.Deserialize<TaskJobResultEntry>(msg, EntityConverter.GetJsonSerializerOptions()).EnsureNotNull($"wrong data {msg}");
var task = await _tasks.GetByTaskId(jr.TaskId);
if (task == null) {
_log.LogWarning("invalid {TaskId}", jr.TaskId);
return;
}
var job = await _jobs.Get(task.JobId);
if (job == null) {
_log.LogWarning("invalid {JobId}", task.JobId);
return;
}
JobResultData? data = jr.Data;
if (data == null) {
_log.LogWarning($"job result data is empty, throwing out: {jr}");
return;
}
var jobResultType = data.Type;
_log.LogInformation($"job result data type: {jobResultType}");
Dictionary<string, double> value;
if (jr.Value.Count > 0) {
value = jr.Value;
} else {
_log.LogWarning($"job result data is empty, throwing out: {jr}");
return;
}
var jobResult = await _context.JobResultOperations.CreateOrUpdate(job.JobId, jobResultType, value);
if (!jobResult.IsOk) {
_log.LogError("failed to create or update with job result {JobId}", job.JobId);
}
}
}

View File

@ -33,19 +33,6 @@ public enum HeartbeatType {
TaskAlive,
}
[SkipRename]
public enum JobResultType {
NewCrashingInput,
NoReproCrashingInput,
NewReport,
NewUniqueReport,
NewRegressionReport,
NewCoverage,
NewCrashDump,
CoverageData,
RuntimeStats,
}
public record HeartbeatData(HeartbeatType Type);
public record TaskHeartbeatEntry(
@ -54,16 +41,6 @@ public record TaskHeartbeatEntry(
Guid MachineId,
HeartbeatData[] Data);
public record JobResultData(JobResultType Type);
public record TaskJobResultEntry(
Guid TaskId,
Guid? JobId,
Guid MachineId,
JobResultData Data,
Dictionary<string, double> Value
);
public record NodeHeartbeatEntry(Guid NodeId, HeartbeatData[] Data);
public record NodeCommandStopIfFree();
@ -915,27 +892,6 @@ public record SecretAddress<T>(Uri Url) : ISecret<T> {
public record SecretData<T>(ISecret<T> Secret) {
}
public record JobResult(
[PartitionKey][RowKey] Guid JobId,
string Project,
string Name,
double NewCrashingInput = 0,
double NoReproCrashingInput = 0,
double NewReport = 0,
double NewUniqueReport = 0,
double NewRegressionReport = 0,
double NewCrashDump = 0,
double InstructionsCovered = 0,
double TotalInstructions = 0,
double CoverageRate = 0,
double IterationCount = 0
) : EntityBase() {
public JobResult(Guid JobId, string Project, string Name) : this(
JobId: JobId,
Project: Project,
Name: Name, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) { }
}
public record JobConfig(
string Project,
string Name,
@ -1100,7 +1056,6 @@ public record TaskUnitConfig(
string? InstanceTelemetryKey,
string? MicrosoftTelemetryKey,
Uri HeartbeatQueue,
Uri JobResultQueue,
Dictionary<string, string> Tags
) {
public Uri? inputQueue { get; set; }

View File

@ -118,7 +118,6 @@ public class Program {
.AddScoped<IVmOperations, VmOperations>()
.AddScoped<ISecretsOperations, SecretsOperations>()
.AddScoped<IJobOperations, JobOperations>()
.AddScoped<IJobResultOperations, JobResultOperations>()
.AddScoped<INsgOperations, NsgOperations>()
.AddScoped<IScheduler, Scheduler>()
.AddScoped<IConfig, Config>()

View File

@ -71,7 +71,6 @@ public class Config : IConfig {
InstanceTelemetryKey: _serviceConfig.ApplicationInsightsInstrumentationKey,
MicrosoftTelemetryKey: _serviceConfig.OneFuzzTelemetry,
HeartbeatQueue: await _queue.GetQueueSas("task-heartbeat", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"),
JobResultQueue: await _queue.GetQueueSas("job-result", StorageType.Config, QueueSasPermissions.Add) ?? throw new Exception("unable to get heartbeat queue sas"),
Tags: task.Config.Tags ?? new Dictionary<string, string>()
);

View File

@ -36,9 +36,7 @@ public class Extensions : IExtensions {
var extensions = new List<VMExtensionWrapper>();
var instanceConfig = await _context.ConfigOperations.Fetch();
if (vmOs == Os.Windows) {
extensions.Add(await MonitorExtension(region));
}
extensions.Add(await MonitorExtension(region, vmOs));
var depenency = DependencyExtension(region, vmOs);
if (depenency is not null) {
@ -331,21 +329,37 @@ public class Extensions : IExtensions {
throw new NotSupportedException($"unsupported OS: {vmOs}");
}
public async Async.Task<VMExtensionWrapper> MonitorExtension(AzureLocation region) {
public async Async.Task<VMExtensionWrapper> MonitorExtension(AzureLocation region, Os vmOs) {
var settings = await _context.LogAnalytics.GetMonitorSettings();
var extensionSettings = JsonSerializer.Serialize(new { WorkspaceId = settings.Id }, _extensionSerializerOptions);
var protectedExtensionSettings = JsonSerializer.Serialize(new { WorkspaceKey = settings.Key }, _extensionSerializerOptions);
return new VMExtensionWrapper {
Location = region,
Name = "OMSExtension",
TypePropertiesType = "MicrosoftMonitoringAgent",
Publisher = "Microsoft.EnterpriseCloud.Monitoring",
TypeHandlerVersion = "1.0",
AutoUpgradeMinorVersion = true,
Settings = new BinaryData(extensionSettings),
ProtectedSettings = new BinaryData(protectedExtensionSettings),
EnableAutomaticUpgrade = false
};
if (vmOs == Os.Windows) {
return new VMExtensionWrapper {
Location = region,
Name = "OMSExtension",
TypePropertiesType = "MicrosoftMonitoringAgent",
Publisher = "Microsoft.EnterpriseCloud.Monitoring",
TypeHandlerVersion = "1.0",
AutoUpgradeMinorVersion = true,
Settings = new BinaryData(extensionSettings),
ProtectedSettings = new BinaryData(protectedExtensionSettings),
EnableAutomaticUpgrade = false
};
} else if (vmOs == Os.Linux) {
return new VMExtensionWrapper {
Location = region,
Name = "OmsAgentForLinux",
TypePropertiesType = "OmsAgentForLinux",
Publisher = "Microsoft.EnterpriseCloud.Monitoring",
TypeHandlerVersion = "1.0",
AutoUpgradeMinorVersion = true,
Settings = new BinaryData(extensionSettings),
ProtectedSettings = new BinaryData(protectedExtensionSettings),
EnableAutomaticUpgrade = false
};
} else {
throw new NotSupportedException($"unsupported os: {vmOs}");
}
}

View File

@ -1,121 +0,0 @@
using ApiService.OneFuzzLib.Orm;
using Microsoft.Extensions.Logging;
using Polly;
namespace Microsoft.OneFuzz.Service;
public interface IJobResultOperations : IOrm<JobResult> {
Async.Task<JobResult?> GetJobResult(Guid jobId);
Async.Task<OneFuzzResultVoid> CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary<string, double> resultValue);
}
public class JobResultOperations : Orm<JobResult>, IJobResultOperations {
public JobResultOperations(ILogger<JobResultOperations> log, IOnefuzzContext context)
: base(log, context) {
}
public async Async.Task<JobResult?> GetJobResult(Guid jobId) {
return await SearchByPartitionKeys(new[] { jobId.ToString() }).SingleOrDefaultAsync();
}
private JobResult UpdateResult(JobResult result, JobResultType type, Dictionary<string, double> resultValue) {
var newResult = result;
double newValue;
switch (type) {
case JobResultType.NewCrashingInput:
newValue = result.NewCrashingInput + resultValue["count"];
newResult = result with { NewCrashingInput = newValue };
break;
case JobResultType.NewReport:
newValue = result.NewReport + resultValue["count"];
newResult = result with { NewReport = newValue };
break;
case JobResultType.NewUniqueReport:
newValue = result.NewUniqueReport + resultValue["count"];
newResult = result with { NewUniqueReport = newValue };
break;
case JobResultType.NewRegressionReport:
newValue = result.NewRegressionReport + resultValue["count"];
newResult = result with { NewRegressionReport = newValue };
break;
case JobResultType.NewCrashDump:
newValue = result.NewCrashDump + resultValue["count"];
newResult = result with { NewCrashDump = newValue };
break;
case JobResultType.CoverageData:
double newCovered = resultValue["covered"];
double newTotalCovered = resultValue["features"];
double newCoverageRate = resultValue["rate"];
newResult = result with { InstructionsCovered = newCovered, TotalInstructions = newTotalCovered, CoverageRate = newCoverageRate };
break;
case JobResultType.RuntimeStats:
double newTotalIterations = resultValue["total_count"];
newResult = result with { IterationCount = newTotalIterations };
break;
default:
_logTracer.LogWarning($"Invalid Field {type}.");
break;
}
_logTracer.LogInformation($"Attempting to log new result: {newResult}");
return newResult;
}
private async Async.Task<bool> TryUpdate(Job job, JobResultType resultType, Dictionary<string, double> resultValue) {
var jobId = job.JobId;
var jobResult = await GetJobResult(jobId);
if (jobResult == null) {
_logTracer.LogInformation("Creating new JobResult for Job {JobId}", jobId);
var entry = new JobResult(JobId: jobId, Project: job.Config.Project, Name: job.Config.Name);
jobResult = UpdateResult(entry, resultType, resultValue);
var r = await Insert(jobResult);
if (!r.IsOk) {
throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}");
}
_logTracer.LogInformation("created job result {JobId}", jobResult.JobId);
} else {
_logTracer.LogInformation("Updating existing JobResult entry for Job {JobId}", jobId);
jobResult = UpdateResult(jobResult, resultType, resultValue);
var r = await Update(jobResult);
if (!r.IsOk) {
throw new InvalidOperationException($"failed to insert job result {jobResult.JobId}");
}
_logTracer.LogInformation("updated job result {JobId}", jobResult.JobId);
}
return true;
}
public async Async.Task<OneFuzzResultVoid> CreateOrUpdate(Guid jobId, JobResultType resultType, Dictionary<string, double> resultValue) {
var job = await _context.JobOperations.Get(jobId);
if (job == null) {
return OneFuzzResultVoid.Error(ErrorCode.INVALID_REQUEST, "invalid job");
}
var success = false;
try {
_logTracer.LogInformation("attempt to update job result {JobId}", job.JobId);
var policy = Policy.Handle<InvalidOperationException>().WaitAndRetryAsync(50, _ => new TimeSpan(0, 0, 5));
await policy.ExecuteAsync(async () => {
success = await TryUpdate(job, resultType, resultValue);
_logTracer.LogInformation("attempt {success}", success);
});
return OneFuzzResultVoid.Ok;
} catch (Exception e) {
return OneFuzzResultVoid.Error(ErrorCode.UNABLE_TO_UPDATE, new string[] {
$"Unexpected failure when attempting to update job result for {job.JobId}",
$"Exception: {e}"
});
}
}
}

View File

@ -19,7 +19,6 @@ public interface IOnefuzzContext {
IExtensions Extensions { get; }
IIpOperations IpOperations { get; }
IJobOperations JobOperations { get; }
IJobResultOperations JobResultOperations { get; }
ILogAnalytics LogAnalytics { get; }
INodeMessageOperations NodeMessageOperations { get; }
INodeOperations NodeOperations { get; }
@ -84,7 +83,6 @@ public class OnefuzzContext : IOnefuzzContext {
public IVmOperations VmOperations => _serviceProvider.GetRequiredService<IVmOperations>();
public ISecretsOperations SecretsOperations => _serviceProvider.GetRequiredService<ISecretsOperations>();
public IJobOperations JobOperations => _serviceProvider.GetRequiredService<IJobOperations>();
public IJobResultOperations JobResultOperations => _serviceProvider.GetRequiredService<IJobResultOperations>();
public IScheduler Scheduler => _serviceProvider.GetRequiredService<IScheduler>();
public IConfig Config => _serviceProvider.GetRequiredService<IConfig>();
public ILogAnalytics LogAnalytics => _serviceProvider.GetRequiredService<ILogAnalytics>();

View File

@ -32,7 +32,6 @@ public sealed class TestContext : IOnefuzzContext {
TaskOperations = new TaskOperations(provider.CreateLogger<TaskOperations>(), Cache, this);
NodeOperations = new NodeOperations(provider.CreateLogger<NodeOperations>(), this);
JobOperations = new JobOperations(provider.CreateLogger<JobOperations>(), this);
JobResultOperations = new JobResultOperations(provider.CreateLogger<JobResultOperations>(), this);
NodeTasksOperations = new NodeTasksOperations(provider.CreateLogger<NodeTasksOperations>(), this);
TaskEventOperations = new TaskEventOperations(provider.CreateLogger<TaskEventOperations>(), this);
NodeMessageOperations = new NodeMessageOperations(provider.CreateLogger<NodeMessageOperations>(), this);
@ -58,7 +57,6 @@ public sealed class TestContext : IOnefuzzContext {
Node n => NodeOperations.Insert(n),
Pool p => PoolOperations.Insert(p),
Job j => JobOperations.Insert(j),
JobResult jr => JobResultOperations.Insert(jr),
Repro r => ReproOperations.Insert(r),
Scaleset ss => ScalesetOperations.Insert(ss),
NodeTasks nt => NodeTasksOperations.Insert(nt),
@ -86,7 +84,6 @@ public sealed class TestContext : IOnefuzzContext {
public ITaskOperations TaskOperations { get; }
public IJobOperations JobOperations { get; }
public IJobResultOperations JobResultOperations { get; }
public INodeOperations NodeOperations { get; }
public INodeTasksOperations NodeTasksOperations { get; }
public ITaskEventOperations TaskEventOperations { get; }

16
src/agent/Cargo.lock generated
View File

@ -2123,7 +2123,6 @@ dependencies = [
"log",
"nix",
"notify",
"onefuzz-result",
"onefuzz-telemetry",
"pete",
"pretty_assertions",
@ -2198,20 +2197,6 @@ dependencies = [
"serde_json",
]
[[package]]
name = "onefuzz-result"
version = "0.2.0"
dependencies = [
"anyhow",
"async-trait",
"log",
"onefuzz-telemetry",
"reqwest",
"serde",
"storage-queue",
"uuid",
]
[[package]]
name = "onefuzz-task"
version = "0.2.0"
@ -2241,7 +2226,6 @@ dependencies = [
"num_cpus",
"onefuzz",
"onefuzz-file-format",
"onefuzz-result",
"onefuzz-telemetry",
"path-absolutize",
"pretty_assertions",

View File

@ -10,7 +10,6 @@ members = [
"onefuzz",
"onefuzz-task",
"onefuzz-agent",
"onefuzz-result",
"onefuzz-file-format",
"onefuzz-telemetry",
"reqwest-retry",

View File

@ -34,8 +34,6 @@ pub struct StaticConfig {
pub heartbeat_queue: Option<Url>,
pub job_result_queue: Option<Url>,
pub instance_id: Uuid,
#[serde(default = "default_as_true")]
@ -73,8 +71,6 @@ struct RawStaticConfig {
pub heartbeat_queue: Option<Url>,
pub job_result_queue: Option<Url>,
pub instance_id: Uuid,
#[serde(default = "default_as_true")]
@ -121,7 +117,6 @@ impl StaticConfig {
microsoft_telemetry_key: config.microsoft_telemetry_key,
instance_telemetry_key: config.instance_telemetry_key,
heartbeat_queue: config.heartbeat_queue,
job_result_queue: config.job_result_queue,
instance_id: config.instance_id,
managed: config.managed,
machine_identity,
@ -157,12 +152,6 @@ impl StaticConfig {
None
};
let job_result_queue = if let Ok(key) = std::env::var("ONEFUZZ_JOB_RESULT") {
Some(Url::parse(&key)?)
} else {
None
};
let instance_telemetry_key =
if let Ok(key) = std::env::var("ONEFUZZ_INSTANCE_TELEMETRY_KEY") {
Some(InstanceTelemetryKey::new(Uuid::parse_str(&key)?))
@ -194,7 +183,6 @@ impl StaticConfig {
instance_telemetry_key,
microsoft_telemetry_key,
heartbeat_queue,
job_result_queue,
instance_id,
managed: !is_unmanaged,
machine_identity,

View File

@ -210,3 +210,32 @@ async fn sync_file(
blob_client.append_block(Body::from(f)).await?;
Ok(len)
}
#[cfg(test)]
mod tests {
use std::io::Seek;
use anyhow::Result;
use tokio::io::{AsyncReadExt, AsyncSeekExt};
#[allow(clippy::unused_io_amount)]
#[tokio::test]
#[ignore]
async fn test_seek_behavior() -> Result<()> {
let path = "C:\\temp\\test.ps1";
let mut std_file = std::fs::File::open(path)?;
std_file.seek(std::io::SeekFrom::Start(3))?;
let mut tokio_file = tokio::fs::File::from_std(std_file);
let buf = &mut [0u8; 5];
tokio_file.read(buf).await?;
println!("******** buf {:?}", buf);
tokio_file.seek(std::io::SeekFrom::Start(0)).await?;
tokio_file.read(buf).await?;
println!("******** buf {:?}", buf);
Ok(())
}
}

View File

@ -91,10 +91,7 @@ impl WorkSet {
pub fn setup_dir(&self) -> Result<PathBuf> {
let root = self.get_root_folder()?;
// Putting the setup container at the root for backward compatibility.
// The path of setup folder can be used as part of the deduplication logic in the bug filing service
let setup_root = root.parent().ok_or_else(|| anyhow!("Invalid root"))?;
self.setup_url.as_path(setup_root)
self.setup_url.as_path(root)
}
pub fn extra_setup_dir(&self) -> Result<Option<PathBuf>> {

View File

@ -1,18 +0,0 @@
[package]
name = "onefuzz-result"
version = "0.2.0"
authors = ["fuzzing@microsoft.com"]
edition = "2021"
publish = false
license = "MIT"
[dependencies]
anyhow = { version = "1.0", features = ["backtrace"] }
async-trait = "0.1"
reqwest = "0.11"
serde = "1.0"
storage-queue = { path = "../storage-queue" }
uuid = { version = "1.4", features = ["serde", "v4"] }
onefuzz-telemetry = { path = "../onefuzz-telemetry" }
log = "0.4"

View File

@ -1,129 +0,0 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use anyhow::Result;
use async_trait::async_trait;
use onefuzz_telemetry::warn;
use reqwest::Url;
use serde::{self, Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use storage_queue::QueueClient;
use uuid::Uuid;
#[derive(Debug, Deserialize, Serialize, Hash, Eq, PartialEq, Clone)]
#[serde(tag = "type")]
pub enum JobResultData {
NewCrashingInput,
NoReproCrashingInput,
NewReport,
NewUniqueReport,
NewRegressionReport,
NewCoverage,
NewCrashDump,
CoverageData,
RuntimeStats,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
struct JobResult {
task_id: Uuid,
job_id: Uuid,
machine_id: Uuid,
machine_name: String,
data: JobResultData,
value: HashMap<String, f64>,
}
#[derive(Clone)]
pub struct TaskContext {
task_id: Uuid,
job_id: Uuid,
machine_id: Uuid,
machine_name: String,
}
pub struct JobResultContext<TaskContext> {
pub state: TaskContext,
pub queue_client: QueueClient,
}
pub struct JobResultClient<TaskContext> {
pub context: Arc<JobResultContext<TaskContext>>,
}
impl<TaskContext> JobResultClient<TaskContext> {
pub fn init_job_result(
context: TaskContext,
queue_url: Url,
) -> Result<JobResultClient<TaskContext>>
where
TaskContext: Send + Sync + 'static,
{
let context = Arc::new(JobResultContext {
state: context,
queue_client: QueueClient::new(queue_url)?,
});
Ok(JobResultClient { context })
}
}
pub type TaskJobResultClient = JobResultClient<TaskContext>;
pub async fn init_job_result(
queue_url: Url,
task_id: Uuid,
job_id: Uuid,
machine_id: Uuid,
machine_name: String,
) -> Result<TaskJobResultClient> {
let hb = JobResultClient::init_job_result(
TaskContext {
task_id,
job_id,
machine_id,
machine_name,
},
queue_url,
)?;
Ok(hb)
}
#[async_trait]
pub trait JobResultSender {
async fn send_direct(&self, data: JobResultData, value: HashMap<String, f64>);
}
#[async_trait]
impl JobResultSender for TaskJobResultClient {
async fn send_direct(&self, data: JobResultData, value: HashMap<String, f64>) {
let task_id = self.context.state.task_id;
let job_id = self.context.state.job_id;
let machine_id = self.context.state.machine_id;
let machine_name = self.context.state.machine_name.clone();
let _ = self
.context
.queue_client
.enqueue(JobResult {
task_id,
job_id,
machine_id,
machine_name,
data,
value,
})
.await;
}
}
#[async_trait]
impl JobResultSender for Option<TaskJobResultClient> {
async fn send_direct(&self, data: JobResultData, value: HashMap<String, f64>) {
match self {
Some(client) => client.send_direct(data, value).await,
None => warn!("Failed to send Job Result message data from agent."),
}
}
}

View File

@ -1,4 +0,0 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
pub mod job_result;

View File

@ -39,7 +39,6 @@ serde_json = "1.0"
serde_yaml = "0.9.21"
onefuzz = { path = "../onefuzz" }
onefuzz-telemetry = { path = "../onefuzz-telemetry" }
onefuzz-result = { path = "../onefuzz-result" }
path-absolutize = "3.1"
reqwest-retry = { path = "../reqwest-retry" }
strum = "0.25"

View File

@ -3,7 +3,11 @@
#[cfg(any(target_os = "linux", target_os = "windows"))]
use crate::local::coverage;
use crate::local::{common::add_common_config, libfuzzer_fuzz, tui::TerminalUi};
use crate::local::{
common::add_common_config, generic_analysis, generic_crash_report, generic_generator,
libfuzzer, libfuzzer_crash_report, libfuzzer_fuzz, libfuzzer_merge, libfuzzer_regression,
libfuzzer_test_input, radamsa, test_input, tui::TerminalUi,
};
use anyhow::{Context, Result};
use clap::{Arg, ArgAction, Command};
use std::time::Duration;
@ -17,9 +21,19 @@ use super::template;
#[derive(Debug, PartialEq, Eq, EnumString, IntoStaticStr, EnumIter)]
#[strum(serialize_all = "kebab-case")]
enum Commands {
Radamsa,
#[cfg(any(target_os = "linux", target_os = "windows"))]
Coverage,
LibfuzzerFuzz,
LibfuzzerMerge,
LibfuzzerCrashReport,
LibfuzzerTestInput,
LibfuzzerRegression,
Libfuzzer,
CrashReport,
Generator,
Analysis,
TestInput,
Template,
}
@ -54,7 +68,23 @@ pub async fn run(args: clap::ArgMatches) -> Result<()> {
match command {
#[cfg(any(target_os = "linux", target_os = "windows"))]
Commands::Coverage => coverage::run(&sub_args, event_sender).await,
Commands::Radamsa => radamsa::run(&sub_args, event_sender).await,
Commands::LibfuzzerCrashReport => {
libfuzzer_crash_report::run(&sub_args, event_sender).await
}
Commands::LibfuzzerFuzz => libfuzzer_fuzz::run(&sub_args, event_sender).await,
Commands::LibfuzzerMerge => libfuzzer_merge::run(&sub_args, event_sender).await,
Commands::LibfuzzerTestInput => {
libfuzzer_test_input::run(&sub_args, event_sender).await
}
Commands::LibfuzzerRegression => {
libfuzzer_regression::run(&sub_args, event_sender).await
}
Commands::Libfuzzer => libfuzzer::run(&sub_args, event_sender).await,
Commands::CrashReport => generic_crash_report::run(&sub_args, event_sender).await,
Commands::Generator => generic_generator::run(&sub_args, event_sender).await,
Commands::Analysis => generic_analysis::run(&sub_args, event_sender).await,
Commands::TestInput => test_input::run(&sub_args, event_sender).await,
Commands::Template => {
let config = sub_args
.get_one::<PathBuf>("config")
@ -110,7 +140,17 @@ pub fn args(name: &'static str) -> Command {
let app = match subcommand {
#[cfg(any(target_os = "linux", target_os = "windows"))]
Commands::Coverage => coverage::args(subcommand.into()),
Commands::Radamsa => radamsa::args(subcommand.into()),
Commands::LibfuzzerCrashReport => libfuzzer_crash_report::args(subcommand.into()),
Commands::LibfuzzerFuzz => libfuzzer_fuzz::args(subcommand.into()),
Commands::LibfuzzerMerge => libfuzzer_merge::args(subcommand.into()),
Commands::LibfuzzerTestInput => libfuzzer_test_input::args(subcommand.into()),
Commands::LibfuzzerRegression => libfuzzer_regression::args(subcommand.into()),
Commands::Libfuzzer => libfuzzer::args(subcommand.into()),
Commands::CrashReport => generic_crash_report::args(subcommand.into()),
Commands::Generator => generic_generator::args(subcommand.into()),
Commands::Analysis => generic_analysis::args(subcommand.into()),
Commands::TestInput => test_input::args(subcommand.into()),
Commands::Template => Command::new("template")
.about("uses the template to generate a run")
.args(vec![Arg::new("config")

View File

@ -26,10 +26,20 @@ pub const INPUTS_DIR: &str = "inputs_dir";
pub const CRASHES_DIR: &str = "crashes_dir";
pub const CRASHDUMPS_DIR: &str = "crashdumps_dir";
pub const TARGET_WORKERS: &str = "target_workers";
pub const REPORTS_DIR: &str = "reports_dir";
pub const NO_REPRO_DIR: &str = "no_repro_dir";
pub const TARGET_TIMEOUT: &str = "target_timeout";
pub const CHECK_RETRY_COUNT: &str = "check_retry_count";
pub const DISABLE_CHECK_QUEUE: &str = "disable_check_queue";
pub const UNIQUE_REPORTS_DIR: &str = "unique_reports_dir";
pub const COVERAGE_DIR: &str = "coverage_dir";
pub const READONLY_INPUTS: &str = "readonly_inputs_dir";
pub const CHECK_ASAN_LOG: &str = "check_asan_log";
pub const TOOLS_DIR: &str = "tools_dir";
pub const RENAME_OUTPUT: &str = "rename_output";
pub const CHECK_FUZZER_HELP: &str = "check_fuzzer_help";
pub const DISABLE_CHECK_DEBUGGER: &str = "disable_check_debugger";
pub const REGRESSION_REPORTS_DIR: &str = "regression_reports_dir";
pub const TARGET_EXE: &str = "target_exe";
pub const TARGET_ENV: &str = "target_env";
@ -37,6 +47,17 @@ pub const TARGET_OPTIONS: &str = "target_options";
// pub const SUPERVISOR_EXE: &str = "supervisor_exe";
// pub const SUPERVISOR_ENV: &str = "supervisor_env";
// pub const SUPERVISOR_OPTIONS: &str = "supervisor_options";
pub const GENERATOR_EXE: &str = "generator_exe";
pub const GENERATOR_ENV: &str = "generator_env";
pub const GENERATOR_OPTIONS: &str = "generator_options";
pub const ANALYZER_EXE: &str = "analyzer_exe";
pub const ANALYZER_OPTIONS: &str = "analyzer_options";
pub const ANALYZER_ENV: &str = "analyzer_env";
pub const ANALYSIS_DIR: &str = "analysis_dir";
pub const ANALYSIS_INPUTS: &str = "analysis_inputs";
pub const ANALYSIS_UNIQUE_INPUTS: &str = "analysis_unique_inputs";
pub const PRESERVE_EXISTING_OUTPUTS: &str = "preserve_existing_outputs";
pub const CREATE_JOB_DIR: &str = "create_job_dir";
@ -45,6 +66,7 @@ const WAIT_FOR_DIR_DELAY: Duration = Duration::from_secs(1);
pub enum CmdType {
Target,
Generator,
// Supervisor,
}
@ -68,6 +90,7 @@ pub fn get_cmd_exe(cmd_type: CmdType, args: &clap::ArgMatches) -> Result<String>
let name = match cmd_type {
CmdType::Target => TARGET_EXE,
// CmdType::Supervisor => SUPERVISOR_EXE,
CmdType::Generator => GENERATOR_EXE,
};
args.get_one::<String>(name)
@ -79,6 +102,7 @@ pub fn get_cmd_arg(cmd_type: CmdType, args: &clap::ArgMatches) -> Vec<String> {
let name = match cmd_type {
CmdType::Target => TARGET_OPTIONS,
// CmdType::Supervisor => SUPERVISOR_OPTIONS,
CmdType::Generator => GENERATOR_OPTIONS,
};
args.get_many::<String>(name)
@ -91,6 +115,7 @@ pub fn get_cmd_env(cmd_type: CmdType, args: &clap::ArgMatches) -> Result<HashMap
let env_name = match cmd_type {
CmdType::Target => TARGET_ENV,
// CmdType::Supervisor => SUPERVISOR_ENV,
CmdType::Generator => GENERATOR_ENV,
};
get_hash_map(args, env_name)
}
@ -240,7 +265,6 @@ pub async fn build_local_context(
},
instance_telemetry_key: None,
heartbeat_queue: None,
job_result_queue: None,
microsoft_telemetry_key: None,
logs: None,
min_available_memory_mb: 0,

View File

@ -5,31 +5,28 @@
# 2. Install llvm and export LLVM_SYMBOLIZER_PATH like we do in setup.sh
required_args: &required_args
target_exe: "REPLACE_ME" # The path to your target
inputs: &inputs "REPLACE_ME" # A folder containining your inputs
crashes: &crashes "REPLACE_ME" # The folder where you want the crashing inputs to be output
crashdumps: "REPLACE_ME" # The folder where you want the crash dumps to be output
coverage: "REPLACE_ME" # The folder where you want the code coverage to be output
regression_reports: "REPLACE_ME" # The folder where you want the regression reports to be output
target_args: &target_args
<<: *required_args
target_env: {}
target_exe: "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\fuzz.exe"
target_options: []
inputs: &inputs "C:\\temp\\onefuzz\\integration\\windows-libfuzzer\\seeds"
tasks:
- type: LibFuzzer
<<: *target_args
inputs: *inputs
crashes: &crash "./crashes"
readonly_inputs: []
check_fuzzer_help: true
- type: LibfuzzerRegression
- type: "Report"
<<: *target_args
- type: "LibfuzzerCrashReport"
<<: *target_args
input_queue: *crashes
input_queue: *crash
crashes: *crash
reports: "./reports"
unique_reports: "./unique_reports"
no_repro: "./no_repro"
check_fuzzer_help: true
- type: "Coverage"
@ -38,11 +35,4 @@ tasks:
- "{input}"
input_queue: *inputs
readonly_inputs: [*inputs]
# The analysis task is optional in the libfuzzer_basic template
# - type: Analysis
# <<: *target_args
# analysis: "REPLACE_ME" # The folder where you want the analysis results to be output
# analyzer_exe: "REPLACE_ME"
# analyzer_options: []
# analyzer_env: {}
coverage: "./coverage"

View File

@ -3,13 +3,139 @@
use std::{collections::HashMap, path::PathBuf};
use crate::tasks::config::CommonConfig;
use crate::{
local::common::{
build_local_context, get_cmd_arg, get_cmd_exe, get_hash_map, get_synced_dir, CmdType,
SyncCountDirMonitor, UiEvent, ANALYSIS_DIR, ANALYZER_ENV, ANALYZER_EXE, ANALYZER_OPTIONS,
CRASHES_DIR, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TOOLS_DIR,
UNIQUE_REPORTS_DIR,
},
tasks::{
analysis::generic::{run as run_analysis, Config},
config::CommonConfig,
},
};
use anyhow::Result;
use async_trait::async_trait;
use clap::{Arg, Command};
use flume::Sender;
use schemars::JsonSchema;
use storage_queue::QueueClient;
use super::template::{RunContext, Template};
pub fn build_analysis_config(
args: &clap::ArgMatches,
input_queue: Option<QueueClient>,
common: CommonConfig,
event_sender: Option<Sender<UiEvent>>,
) -> Result<Config> {
let target_exe = get_cmd_exe(CmdType::Target, args)?.into();
let target_options = get_cmd_arg(CmdType::Target, args);
let analyzer_exe = args
.get_one::<String>(ANALYZER_EXE)
.cloned()
.ok_or_else(|| format_err!("expected {ANALYZER_EXE}"))?;
let analyzer_options = args
.get_many::<String>(ANALYZER_OPTIONS)
.unwrap_or_default()
.map(|x| x.to_string())
.collect();
let analyzer_env = get_hash_map(args, ANALYZER_ENV)?;
let analysis = get_synced_dir(ANALYSIS_DIR, common.job_id, common.task_id, args)?
.monitor_count(&event_sender)?;
let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args)?;
let crashes = if input_queue.is_none() {
get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?
} else {
None
};
let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let config = Config {
analyzer_exe,
analyzer_options,
analyzer_env,
target_exe,
target_options,
input_queue,
crashes,
analysis,
tools: Some(tools),
reports,
unique_reports,
no_repro,
common,
};
Ok(config)
}
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, true, event_sender.clone()).await?;
let config = build_analysis_config(args, None, context.common_config.clone(), event_sender)?;
run_analysis(config).await
}
pub fn build_shared_args(required_task: bool) -> Vec<Arg> {
vec![
Arg::new(TARGET_EXE).long(TARGET_EXE).required(true),
Arg::new(TARGET_ENV)
.long(TARGET_ENV)
.requires(TARGET_EXE)
.num_args(0..),
Arg::new(TARGET_OPTIONS)
.long(TARGET_OPTIONS)
.default_value("{input}")
.value_delimiter(' ')
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(CRASHES_DIR)
.long(CRASHES_DIR)
.value_parser(value_parser!(PathBuf)),
Arg::new(ANALYZER_OPTIONS)
.long(ANALYZER_OPTIONS)
.requires(ANALYZER_EXE)
.value_delimiter(' ')
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(ANALYZER_ENV)
.long(ANALYZER_ENV)
.requires(ANALYZER_EXE)
.num_args(0..),
Arg::new(TOOLS_DIR)
.long(TOOLS_DIR)
.value_parser(value_parser!(PathBuf)),
Arg::new(ANALYZER_EXE)
.long(ANALYZER_EXE)
.requires(ANALYSIS_DIR)
.requires(CRASHES_DIR)
.required(required_task),
Arg::new(ANALYSIS_DIR)
.long(ANALYSIS_DIR)
.requires(ANALYZER_EXE)
.requires(CRASHES_DIR)
.required(required_task),
]
}
pub fn args(name: &'static str) -> Command {
Command::new(name)
.about("execute a local-only generic analysis")
.args(&build_shared_args(true))
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
pub struct Analysis {
analyzer_exe: String,
@ -20,7 +146,7 @@ pub struct Analysis {
input_queue: Option<PathBuf>,
crashes: Option<PathBuf>,
analysis: PathBuf,
tools: Option<PathBuf>,
tools: PathBuf,
reports: Option<PathBuf>,
unique_reports: Option<PathBuf>,
no_repro: Option<PathBuf>,
@ -49,10 +175,9 @@ impl Template for Analysis {
.and_then(|path| context.to_monitored_sync_dir("crashes", path).ok()),
analysis: context.to_monitored_sync_dir("analysis", self.analysis.clone())?,
tools: self
.tools
.as_ref()
.and_then(|path| context.to_monitored_sync_dir("tools", path).ok()),
tools: context
.to_monitored_sync_dir("tools", self.tools.clone())
.ok(),
reports: self
.reports

View File

@ -3,14 +3,150 @@
use std::{collections::HashMap, path::PathBuf};
use crate::tasks::{config::CommonConfig, utils::default_bool_true};
use crate::{
local::common::{
build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType,
SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT, CRASHES_DIR,
DISABLE_CHECK_DEBUGGER, DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV,
TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR,
},
tasks::{
config::CommonConfig,
report::generic::{Config, ReportTask},
utils::default_bool_true,
},
};
use anyhow::Result;
use async_trait::async_trait;
use clap::{Arg, ArgAction, Command};
use flume::Sender;
use futures::future::OptionFuture;
use schemars::JsonSchema;
use storage_queue::QueueClient;
use super::template::{RunContext, Template};
pub fn build_report_config(
args: &clap::ArgMatches,
input_queue: Option<QueueClient>,
common: CommonConfig,
event_sender: Option<Sender<UiEvent>>,
) -> Result<Config> {
let target_exe = get_cmd_exe(CmdType::Target, args)?.into();
let target_env = get_cmd_env(CmdType::Target, args)?;
let target_options = get_cmd_arg(CmdType::Target, args);
let crashes = Some(get_synced_dir(
CRASHES_DIR,
common.job_id,
common.task_id,
args,
)?)
.monitor_count(&event_sender)?;
let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let unique_reports = Some(get_synced_dir(
UNIQUE_REPORTS_DIR,
common.job_id,
common.task_id,
args,
)?)
.monitor_count(&event_sender)?;
let target_timeout = args.get_one::<u64>(TARGET_TIMEOUT).copied();
let check_retry_count = args
.get_one::<u64>(CHECK_RETRY_COUNT)
.copied()
.expect("has a default");
let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE);
let check_asan_log = args.get_flag(CHECK_ASAN_LOG);
let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER);
let config = Config {
target_exe,
target_env,
target_options,
target_timeout,
check_asan_log,
check_debugger,
check_retry_count,
check_queue,
crashes,
minimized_stack_depth: None,
input_queue,
no_repro,
reports,
unique_reports,
common,
};
Ok(config)
}
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, true, event_sender.clone()).await?;
let config = build_report_config(args, None, context.common_config.clone(), event_sender)?;
ReportTask::new(config).managed_run().await
}
pub fn build_shared_args() -> Vec<Arg> {
vec![
Arg::new(TARGET_EXE).long(TARGET_EXE).required(true),
Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..),
Arg::new(TARGET_OPTIONS)
.default_value("{input}")
.long(TARGET_OPTIONS)
.value_delimiter(' ')
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(CRASHES_DIR)
.long(CRASHES_DIR)
.required(true)
.value_parser(value_parser!(PathBuf)),
Arg::new(REPORTS_DIR)
.long(REPORTS_DIR)
.required(false)
.value_parser(value_parser!(PathBuf)),
Arg::new(NO_REPRO_DIR)
.long(NO_REPRO_DIR)
.required(false)
.value_parser(value_parser!(PathBuf)),
Arg::new(UNIQUE_REPORTS_DIR)
.long(UNIQUE_REPORTS_DIR)
.value_parser(value_parser!(PathBuf))
.required(true),
Arg::new(TARGET_TIMEOUT)
.long(TARGET_TIMEOUT)
.value_parser(value_parser!(u64))
.default_value("30"),
Arg::new(CHECK_RETRY_COUNT)
.long(CHECK_RETRY_COUNT)
.value_parser(value_parser!(u64))
.default_value("0"),
Arg::new(DISABLE_CHECK_QUEUE)
.action(ArgAction::SetTrue)
.long(DISABLE_CHECK_QUEUE),
Arg::new(CHECK_ASAN_LOG)
.action(ArgAction::SetTrue)
.long(CHECK_ASAN_LOG),
Arg::new(DISABLE_CHECK_DEBUGGER)
.action(ArgAction::SetTrue)
.long(DISABLE_CHECK_DEBUGGER),
]
}
pub fn args(name: &'static str) -> Command {
Command::new(name)
.about("execute a local-only generic crash report")
.args(&build_shared_args())
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
pub struct CrashReport {
target_exe: PathBuf,

View File

@ -3,14 +3,154 @@
use std::{collections::HashMap, path::PathBuf};
use crate::tasks::{config::CommonConfig, utils::default_bool_true};
use crate::{
local::common::{
build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir,
get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, CHECK_ASAN_LOG, CHECK_RETRY_COUNT,
CRASHES_DIR, DISABLE_CHECK_DEBUGGER, GENERATOR_ENV, GENERATOR_EXE, GENERATOR_OPTIONS,
READONLY_INPUTS, RENAME_OUTPUT, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT,
TOOLS_DIR,
},
tasks::{
config::CommonConfig,
fuzz::generator::{Config, GeneratorTask},
utils::default_bool_true,
},
};
use anyhow::Result;
use async_trait::async_trait;
use clap::{Arg, ArgAction, Command};
use flume::Sender;
use onefuzz::syncdir::SyncedDir;
use schemars::JsonSchema;
use super::template::{RunContext, Template};
pub fn build_fuzz_config(
args: &clap::ArgMatches,
common: CommonConfig,
event_sender: Option<Sender<UiEvent>>,
) -> Result<Config> {
let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)?
.monitor_count(&event_sender)?;
let target_exe = get_cmd_exe(CmdType::Target, args)?.into();
let target_options = get_cmd_arg(CmdType::Target, args);
let target_env = get_cmd_env(CmdType::Target, args)?;
let generator_exe = get_cmd_exe(CmdType::Generator, args)?;
let generator_options = get_cmd_arg(CmdType::Generator, args);
let generator_env = get_cmd_env(CmdType::Generator, args)?;
let readonly_inputs = get_synced_dirs(READONLY_INPUTS, common.job_id, common.task_id, args)?
.into_iter()
.map(|sd| sd.monitor_count(&event_sender))
.collect::<Result<Vec<_>>>()?;
let rename_output = args.get_flag(RENAME_OUTPUT);
let check_asan_log = args.get_flag(CHECK_ASAN_LOG);
let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER);
let check_retry_count = args
.get_one::<u64>(CHECK_RETRY_COUNT)
.copied()
.expect("has a default");
let target_timeout = Some(
args.get_one::<u64>(TARGET_TIMEOUT)
.copied()
.expect("has a default"),
);
let tools = get_synced_dir(TOOLS_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let ensemble_sync_delay = None;
let config = Config {
generator_exe,
generator_env,
generator_options,
readonly_inputs,
crashes,
tools,
target_exe,
target_env,
target_options,
target_timeout,
check_asan_log,
check_debugger,
check_retry_count,
rename_output,
ensemble_sync_delay,
common,
};
Ok(config)
}
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, true, event_sender.clone()).await?;
let config = build_fuzz_config(args, context.common_config.clone(), event_sender)?;
GeneratorTask::new(config).run().await
}
pub fn build_shared_args() -> Vec<Arg> {
vec![
Arg::new(TARGET_EXE).long(TARGET_EXE).required(true),
Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..),
Arg::new(TARGET_OPTIONS)
.default_value("{input}")
.long(TARGET_OPTIONS)
.value_delimiter(' ')
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(GENERATOR_EXE)
.long(GENERATOR_EXE)
.default_value("radamsa")
.required(true),
Arg::new(GENERATOR_ENV).long(GENERATOR_ENV).num_args(0..),
Arg::new(GENERATOR_OPTIONS)
.long(GENERATOR_OPTIONS)
.value_delimiter(' ')
.default_value("-H sha256 -o {generated_inputs}/input-%h.%s -n 100 -r {input_corpus}")
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(CRASHES_DIR)
.required(true)
.long(CRASHES_DIR)
.value_parser(value_parser!(PathBuf)),
Arg::new(READONLY_INPUTS)
.required(true)
.num_args(1..)
.value_parser(value_parser!(PathBuf))
.long(READONLY_INPUTS),
Arg::new(TOOLS_DIR)
.long(TOOLS_DIR)
.value_parser(value_parser!(PathBuf)),
Arg::new(CHECK_RETRY_COUNT)
.long(CHECK_RETRY_COUNT)
.value_parser(value_parser!(u64))
.default_value("0"),
Arg::new(CHECK_ASAN_LOG)
.action(ArgAction::SetTrue)
.long(CHECK_ASAN_LOG),
Arg::new(RENAME_OUTPUT)
.action(ArgAction::SetTrue)
.long(RENAME_OUTPUT),
Arg::new(TARGET_TIMEOUT)
.long(TARGET_TIMEOUT)
.value_parser(value_parser!(u64))
.default_value("30"),
Arg::new(DISABLE_CHECK_DEBUGGER)
.action(ArgAction::SetTrue)
.long(DISABLE_CHECK_DEBUGGER),
]
}
pub fn args(name: &'static str) -> Command {
Command::new(name)
.about("execute a local-only generator fuzzing task")
.args(&build_shared_args())
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
pub struct Generator {
generator_exe: String,

View File

@ -1,19 +1,168 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use crate::tasks::{
config::CommonConfig,
fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask},
utils::default_bool_true,
#[cfg(any(target_os = "linux", target_os = "windows"))]
use crate::{
local::{common::COVERAGE_DIR, coverage, coverage::build_shared_args as build_coverage_args},
tasks::coverage::generic::CoverageTask,
};
use crate::{
local::{
common::{
build_local_context, wait_for_dir, DirectoryMonitorQueue, UiEvent, ANALYZER_EXE,
REGRESSION_REPORTS_DIR, UNIQUE_REPORTS_DIR,
},
generic_analysis::{build_analysis_config, build_shared_args as build_analysis_args},
libfuzzer_crash_report::{build_report_config, build_shared_args as build_crash_args},
libfuzzer_fuzz::{build_fuzz_config, build_shared_args as build_fuzz_args},
libfuzzer_regression::{
build_regression_config, build_shared_args as build_regression_args,
},
},
tasks::{
analysis::generic::run as run_analysis,
config::CommonConfig,
fuzz::libfuzzer::{common::default_workers, generic::LibFuzzerFuzzTask},
regression::libfuzzer::LibFuzzerRegressionTask,
report::libfuzzer_report::ReportTask,
utils::default_bool_true,
},
};
use anyhow::Result;
use async_trait::async_trait;
use onefuzz::syncdir::SyncedDir;
use clap::Command;
use flume::Sender;
use onefuzz::{syncdir::SyncedDir, utils::try_wait_all_join_handles};
use schemars::JsonSchema;
use std::{collections::HashMap, path::PathBuf};
use std::{
collections::{HashMap, HashSet},
path::PathBuf,
};
use tokio::task::spawn;
use uuid::Uuid;
use super::template::{RunContext, Template};
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, true, event_sender.clone()).await?;
let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?;
let crash_dir = fuzz_config
.crashes
.remote_url()?
.as_file_path()
.expect("invalid crash dir remote location");
let fuzzer = LibFuzzerFuzzTask::new(fuzz_config)?;
let mut task_handles = vec![];
let fuzz_task = spawn(async move { fuzzer.run().await });
wait_for_dir(&crash_dir).await?;
task_handles.push(fuzz_task);
if args.contains_id(UNIQUE_REPORTS_DIR) {
let crash_report_input_monitor =
DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?;
let report_config = build_report_config(
args,
Some(crash_report_input_monitor.queue_client),
CommonConfig {
task_id: Uuid::new_v4(),
..context.common_config.clone()
},
event_sender.clone(),
)?;
let mut report = ReportTask::new(report_config);
let report_task = spawn(async move { report.managed_run().await });
task_handles.push(report_task);
task_handles.push(crash_report_input_monitor.handle);
}
#[cfg(any(target_os = "linux", target_os = "windows"))]
if args.contains_id(COVERAGE_DIR) {
let coverage_input_monitor =
DirectoryMonitorQueue::start_monitoring(crash_dir.clone()).await?;
let coverage_config = coverage::build_coverage_config(
args,
true,
Some(coverage_input_monitor.queue_client),
CommonConfig {
task_id: Uuid::new_v4(),
..context.common_config.clone()
},
event_sender.clone(),
)?;
let mut coverage = CoverageTask::new(coverage_config);
let coverage_task = spawn(async move { coverage.run().await });
task_handles.push(coverage_task);
task_handles.push(coverage_input_monitor.handle);
}
if args.contains_id(ANALYZER_EXE) {
let analysis_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir).await?;
let analysis_config = build_analysis_config(
args,
Some(analysis_input_monitor.queue_client),
CommonConfig {
task_id: Uuid::new_v4(),
..context.common_config.clone()
},
event_sender.clone(),
)?;
let analysis_task = spawn(async move { run_analysis(analysis_config).await });
task_handles.push(analysis_task);
task_handles.push(analysis_input_monitor.handle);
}
if args.contains_id(REGRESSION_REPORTS_DIR) {
let regression_config = build_regression_config(
args,
CommonConfig {
task_id: Uuid::new_v4(),
..context.common_config.clone()
},
event_sender,
)?;
let regression = LibFuzzerRegressionTask::new(regression_config);
let regression_task = spawn(async move { regression.run().await });
task_handles.push(regression_task);
}
try_wait_all_join_handles(task_handles).await?;
Ok(())
}
pub fn args(name: &'static str) -> Command {
let mut app = Command::new(name).about("run a local libfuzzer & crash reporting task");
let mut used = HashSet::new();
for args in &[
build_fuzz_args(),
build_crash_args(),
build_analysis_args(false),
#[cfg(any(target_os = "linux", target_os = "windows"))]
build_coverage_args(true),
build_regression_args(false),
] {
for arg in args {
if used.insert(arg.get_id()) {
app = app.arg(arg);
}
}
}
app
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
pub struct LibFuzzer {
inputs: PathBuf,

View File

@ -3,13 +3,139 @@
use std::{collections::HashMap, path::PathBuf};
use crate::tasks::{config::CommonConfig, utils::default_bool_true};
use crate::{
local::common::{
build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType,
SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, CRASHES_DIR,
DISABLE_CHECK_QUEUE, NO_REPRO_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS,
TARGET_TIMEOUT, UNIQUE_REPORTS_DIR,
},
tasks::{
config::CommonConfig,
report::libfuzzer_report::{Config, ReportTask},
utils::default_bool_true,
},
};
use anyhow::Result;
use async_trait::async_trait;
use clap::{Arg, ArgAction, Command};
use flume::Sender;
use futures::future::OptionFuture;
use schemars::JsonSchema;
use storage_queue::QueueClient;
use super::template::{RunContext, Template};
pub fn build_report_config(
args: &clap::ArgMatches,
input_queue: Option<QueueClient>,
common: CommonConfig,
event_sender: Option<Sender<UiEvent>>,
) -> Result<Config> {
let target_exe = get_cmd_exe(CmdType::Target, args)?.into();
let target_env = get_cmd_env(CmdType::Target, args)?;
let target_options = get_cmd_arg(CmdType::Target, args);
let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let target_timeout = args.get_one::<u64>(TARGET_TIMEOUT).copied();
let check_retry_count = args
.get_one::<u64>(CHECK_RETRY_COUNT)
.copied()
.expect("has a default");
let check_queue = !args.get_flag(DISABLE_CHECK_QUEUE);
let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP);
let crashes = if input_queue.is_none() { crashes } else { None };
let config = Config {
target_exe,
target_env,
target_options,
target_timeout,
check_retry_count,
check_fuzzer_help,
minimized_stack_depth: None,
input_queue,
check_queue,
crashes,
reports,
no_repro,
unique_reports,
common,
};
Ok(config)
}
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, true, event_sender.clone()).await?;
let config = build_report_config(args, None, context.common_config.clone(), event_sender)?;
ReportTask::new(config).managed_run().await
}
pub fn build_shared_args() -> Vec<Arg> {
vec![
Arg::new(TARGET_EXE).long(TARGET_EXE).required(true),
Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..),
Arg::new(TARGET_OPTIONS)
.long(TARGET_OPTIONS)
.value_delimiter(' ')
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(CRASHES_DIR)
.long(CRASHES_DIR)
.required(true)
.value_parser(value_parser!(PathBuf)),
Arg::new(REPORTS_DIR)
.long(REPORTS_DIR)
.required(false)
.value_parser(value_parser!(PathBuf)),
Arg::new(NO_REPRO_DIR)
.long(NO_REPRO_DIR)
.required(false)
.value_parser(value_parser!(PathBuf)),
Arg::new(UNIQUE_REPORTS_DIR)
.long(UNIQUE_REPORTS_DIR)
.required(true)
.value_parser(value_parser!(PathBuf)),
Arg::new(TARGET_TIMEOUT)
.value_parser(value_parser!(u64))
.long(TARGET_TIMEOUT),
Arg::new(CHECK_RETRY_COUNT)
.long(CHECK_RETRY_COUNT)
.value_parser(value_parser!(u64))
.default_value("0"),
Arg::new(DISABLE_CHECK_QUEUE)
.action(ArgAction::SetTrue)
.long(DISABLE_CHECK_QUEUE),
Arg::new(CHECK_FUZZER_HELP)
.action(ArgAction::SetTrue)
.long(CHECK_FUZZER_HELP),
]
}
pub fn args(name: &'static str) -> Command {
Command::new(name)
.about("execute a local-only libfuzzer crash report task")
.args(&build_shared_args())
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
pub struct LibfuzzerCrashReport {
target_exe: PathBuf,

View File

@ -3,15 +3,97 @@
use std::{collections::HashMap, path::PathBuf};
use crate::tasks::{config::CommonConfig, utils::default_bool_true};
use crate::{
local::common::{
build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir,
get_synced_dirs, CmdType, SyncCountDirMonitor, UiEvent, ANALYSIS_INPUTS,
ANALYSIS_UNIQUE_INPUTS, CHECK_FUZZER_HELP, INPUTS_DIR, PRESERVE_EXISTING_OUTPUTS,
TARGET_ENV, TARGET_EXE, TARGET_OPTIONS,
},
tasks::{
config::CommonConfig,
merge::libfuzzer_merge::{spawn, Config},
utils::default_bool_true,
},
};
use anyhow::Result;
use async_trait::async_trait;
use clap::{Arg, ArgAction, Command};
use flume::Sender;
use futures::future::OptionFuture;
use onefuzz::syncdir::SyncedDir;
use schemars::JsonSchema;
use storage_queue::QueueClient;
use super::template::{RunContext, Template};
pub fn build_merge_config(
args: &clap::ArgMatches,
input_queue: Option<QueueClient>,
common: CommonConfig,
event_sender: Option<Sender<UiEvent>>,
) -> Result<Config> {
let target_exe = get_cmd_exe(CmdType::Target, args)?.into();
let target_env = get_cmd_env(CmdType::Target, args)?;
let target_options = get_cmd_arg(CmdType::Target, args);
let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP);
let inputs = get_synced_dirs(ANALYSIS_INPUTS, common.job_id, common.task_id, args)?
.into_iter()
.map(|sd| sd.monitor_count(&event_sender))
.collect::<Result<Vec<_>>>()?;
let unique_inputs =
get_synced_dir(ANALYSIS_UNIQUE_INPUTS, common.job_id, common.task_id, args)?
.monitor_count(&event_sender)?;
let preserve_existing_outputs = args
.get_one::<bool>(PRESERVE_EXISTING_OUTPUTS)
.copied()
.unwrap_or_default();
let config = Config {
target_exe,
target_env,
target_options,
input_queue,
inputs,
unique_inputs,
preserve_existing_outputs,
check_fuzzer_help,
common,
};
Ok(config)
}
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, true, event_sender.clone()).await?;
let config = build_merge_config(args, None, context.common_config.clone(), event_sender)?;
spawn(config).await
}
pub fn build_shared_args() -> Vec<Arg> {
vec![
Arg::new(TARGET_EXE).long(TARGET_EXE).required(true),
Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..),
Arg::new(TARGET_OPTIONS)
.long(TARGET_OPTIONS)
.value_delimiter(' ')
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(CHECK_FUZZER_HELP)
.action(ArgAction::SetTrue)
.long(CHECK_FUZZER_HELP),
Arg::new(INPUTS_DIR)
.long(INPUTS_DIR)
.value_parser(value_parser!(PathBuf))
.num_args(0..),
]
}
pub fn args(name: &'static str) -> Command {
Command::new(name)
.about("execute a local-only libfuzzer crash report task")
.args(&build_shared_args())
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
pub struct LibfuzzerMerge {
target_exe: PathBuf,

View File

@ -3,13 +3,145 @@
use std::{collections::HashMap, path::PathBuf};
use crate::tasks::{config::CommonConfig, utils::default_bool_true};
use crate::{
local::common::{
build_local_context, get_cmd_arg, get_cmd_env, get_cmd_exe, get_synced_dir, CmdType,
SyncCountDirMonitor, UiEvent, CHECK_FUZZER_HELP, CHECK_RETRY_COUNT, COVERAGE_DIR,
CRASHES_DIR, NO_REPRO_DIR, REGRESSION_REPORTS_DIR, REPORTS_DIR, TARGET_ENV, TARGET_EXE,
TARGET_OPTIONS, TARGET_TIMEOUT, UNIQUE_REPORTS_DIR,
},
tasks::{
config::CommonConfig,
regression::libfuzzer::{Config, LibFuzzerRegressionTask},
utils::default_bool_true,
},
};
use anyhow::Result;
use async_trait::async_trait;
use clap::{Arg, ArgAction, Command};
use flume::Sender;
use schemars::JsonSchema;
use super::template::{RunContext, Template};
const REPORT_NAMES: &str = "report_names";
pub fn build_regression_config(
args: &clap::ArgMatches,
common: CommonConfig,
event_sender: Option<Sender<UiEvent>>,
) -> Result<Config> {
let target_exe = get_cmd_exe(CmdType::Target, args)?.into();
let target_env = get_cmd_env(CmdType::Target, args)?;
let target_options = get_cmd_arg(CmdType::Target, args);
let target_timeout = args.get_one::<u64>(TARGET_TIMEOUT).copied();
let crashes = get_synced_dir(CRASHES_DIR, common.job_id, common.task_id, args)?
.monitor_count(&event_sender)?;
let regression_reports =
get_synced_dir(REGRESSION_REPORTS_DIR, common.job_id, common.task_id, args)?
.monitor_count(&event_sender)?;
let check_retry_count = args
.get_one::<u64>(CHECK_RETRY_COUNT)
.copied()
.expect("has a default value");
let reports = get_synced_dir(REPORTS_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let no_repro = get_synced_dir(NO_REPRO_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let unique_reports = get_synced_dir(UNIQUE_REPORTS_DIR, common.job_id, common.task_id, args)
.ok()
.monitor_count(&event_sender)?;
let report_list: Option<Vec<String>> = args
.get_many::<String>(REPORT_NAMES)
.map(|x| x.cloned().collect());
let check_fuzzer_help = args.get_flag(CHECK_FUZZER_HELP);
let config = Config {
target_exe,
target_env,
target_options,
target_timeout,
check_fuzzer_help,
check_retry_count,
crashes,
regression_reports,
reports,
no_repro,
unique_reports,
readonly_inputs: None,
report_list,
minimized_stack_depth: None,
common,
};
Ok(config)
}
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, true, event_sender.clone()).await?;
let config = build_regression_config(args, context.common_config.clone(), event_sender)?;
LibFuzzerRegressionTask::new(config).run().await
}
pub fn build_shared_args(local_job: bool) -> Vec<Arg> {
let mut args = vec![
Arg::new(TARGET_EXE).long(TARGET_EXE).required(true),
Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..),
Arg::new(TARGET_OPTIONS)
.long(TARGET_OPTIONS)
.value_delimiter(' ')
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(COVERAGE_DIR)
.required(!local_job)
.long(COVERAGE_DIR)
.value_parser(value_parser!(PathBuf)),
Arg::new(CHECK_FUZZER_HELP)
.action(ArgAction::SetTrue)
.long(CHECK_FUZZER_HELP),
Arg::new(TARGET_TIMEOUT)
.long(TARGET_TIMEOUT)
.value_parser(value_parser!(u64)),
Arg::new(CRASHES_DIR)
.long(CRASHES_DIR)
.required(true)
.value_parser(value_parser!(PathBuf)),
Arg::new(REGRESSION_REPORTS_DIR)
.long(REGRESSION_REPORTS_DIR)
.required(local_job)
.value_parser(value_parser!(PathBuf)),
Arg::new(REPORTS_DIR)
.long(REPORTS_DIR)
.required(false)
.value_parser(value_parser!(PathBuf)),
Arg::new(NO_REPRO_DIR)
.long(NO_REPRO_DIR)
.required(false)
.value_parser(value_parser!(PathBuf)),
Arg::new(UNIQUE_REPORTS_DIR)
.long(UNIQUE_REPORTS_DIR)
.value_parser(value_parser!(PathBuf))
.required(true),
Arg::new(CHECK_RETRY_COUNT)
.long(CHECK_RETRY_COUNT)
.value_parser(value_parser!(u64))
.default_value("0"),
];
if local_job {
args.push(Arg::new(REPORT_NAMES).long(REPORT_NAMES).num_args(0..))
}
args
}
pub fn args(name: &'static str) -> Command {
Command::new(name)
.about("execute a local-only libfuzzer regression task")
.args(&build_shared_args(true))
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
pub struct LibfuzzerRegression {
target_exe: PathBuf,

View File

@ -1,14 +1,97 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use crate::{
local::common::{
build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_RETRY_COUNT,
TARGET_ENV, TARGET_EXE, TARGET_OPTIONS, TARGET_TIMEOUT,
},
tasks::report::libfuzzer_report::{test_input, TestInputArgs},
};
use anyhow::Result;
use async_trait::async_trait;
use clap::{Arg, Command};
use flume::Sender;
use onefuzz::machine_id::MachineIdentity;
use schemars::JsonSchema;
use std::{collections::HashMap, path::PathBuf};
use super::template::{RunContext, Template};
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, true, event_sender).await?;
let target_exe = args
.get_one::<PathBuf>(TARGET_EXE)
.expect("marked as required");
let target_env = get_cmd_env(CmdType::Target, args)?;
let target_options = get_cmd_arg(CmdType::Target, args);
let input = args
.get_one::<PathBuf>("input")
.expect("marked as required");
let target_timeout = args.get_one::<u64>(TARGET_TIMEOUT).copied();
let check_retry_count = args
.get_one::<u64>(CHECK_RETRY_COUNT)
.copied()
.expect("has a default value");
let extra_setup_dir = context.common_config.extra_setup_dir.as_deref();
let extra_output_dir = context
.common_config
.extra_output
.as_ref()
.map(|x| x.local_path.as_path());
let config = TestInputArgs {
target_exe: target_exe.as_path(),
target_env: &target_env,
target_options: &target_options,
input_url: None,
input: input.as_path(),
job_id: context.common_config.job_id,
task_id: context.common_config.task_id,
target_timeout,
check_retry_count,
setup_dir: &context.common_config.setup_dir,
extra_setup_dir,
extra_output_dir,
minimized_stack_depth: None,
machine_identity: context.common_config.machine_identity,
};
let result = test_input(config).await?;
println!("{}", serde_json::to_string_pretty(&result)?);
Ok(())
}
pub fn build_shared_args() -> Vec<Arg> {
vec![
Arg::new(TARGET_EXE).required(true),
Arg::new("input")
.required(true)
.value_parser(value_parser!(PathBuf)),
Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..),
Arg::new(TARGET_OPTIONS)
.default_value("{input}")
.long(TARGET_OPTIONS)
.value_delimiter(' ')
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(TARGET_TIMEOUT)
.long(TARGET_TIMEOUT)
.value_parser(value_parser!(u64)),
Arg::new(CHECK_RETRY_COUNT)
.long(CHECK_RETRY_COUNT)
.value_parser(value_parser!(u64))
.default_value("0"),
]
}
pub fn args(name: &'static str) -> Command {
Command::new(name)
.about("test a libfuzzer application with a specific input")
.args(&build_shared_args())
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
pub struct LibfuzzerTestInput {
input: PathBuf,

View File

@ -14,6 +14,7 @@ pub mod libfuzzer_fuzz;
pub mod libfuzzer_merge;
pub mod libfuzzer_regression;
pub mod libfuzzer_test_input;
pub mod radamsa;
pub mod template;
pub mod test_input;
pub mod tui;

View File

@ -0,0 +1,78 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use crate::{
local::{
common::{build_local_context, DirectoryMonitorQueue, UiEvent},
generic_crash_report::{build_report_config, build_shared_args as build_crash_args},
generic_generator::{build_fuzz_config, build_shared_args as build_fuzz_args},
},
tasks::{config::CommonConfig, fuzz::generator::GeneratorTask, report::generic::ReportTask},
};
use anyhow::{Context, Result};
use clap::Command;
use flume::Sender;
use onefuzz::utils::try_wait_all_join_handles;
use std::collections::HashSet;
use tokio::task::spawn;
use uuid::Uuid;
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, true, event_sender.clone()).await?;
let fuzz_config = build_fuzz_config(args, context.common_config.clone(), event_sender.clone())?;
let crash_dir = fuzz_config
.crashes
.remote_url()?
.as_file_path()
.ok_or_else(|| format_err!("invalid crash directory"))?;
tokio::fs::create_dir_all(&crash_dir)
.await
.with_context(|| {
format!(
"unable to create crashes directory: {}",
crash_dir.display()
)
})?;
let fuzzer = GeneratorTask::new(fuzz_config);
let fuzz_task = spawn(async move { fuzzer.run().await });
let crash_report_input_monitor = DirectoryMonitorQueue::start_monitoring(crash_dir)
.await
.context("directory monitor failed")?;
let report_config = build_report_config(
args,
Some(crash_report_input_monitor.queue_client),
CommonConfig {
task_id: Uuid::new_v4(),
..context.common_config.clone()
},
event_sender,
)?;
let report_task = spawn(async move { ReportTask::new(report_config).managed_run().await });
try_wait_all_join_handles(vec![
fuzz_task,
report_task,
crash_report_input_monitor.handle,
])
.await?;
Ok(())
}
pub fn args(name: &'static str) -> Command {
let mut app = Command::new(name).about("run a local generator & crash reporting job");
let mut used = HashSet::new();
for args in &[build_fuzz_args(), build_crash_args()] {
for arg in args {
if used.insert(arg.get_id()) {
app = app.arg(arg);
}
}
}
app
}

View File

@ -126,6 +126,7 @@
"analyzer_options",
"target_exe",
"target_options",
"tools",
"type"
],
"properties": {
@ -181,10 +182,7 @@
}
},
"tools": {
"type": [
"string",
"null"
]
"type": "string"
},
"type": {
"type": "string",
@ -895,4 +893,4 @@
]
}
}
}
}

View File

@ -196,7 +196,6 @@ pub async fn launch(
job_id: Uuid::new_v4(),
instance_id: Uuid::new_v4(),
heartbeat_queue: None,
job_result_queue: None,
instance_telemetry_key: None,
microsoft_telemetry_key: None,
logs: None,
@ -242,10 +241,12 @@ mod test {
.expect("Couldn't find checked-in schema.json")
.replace("\r\n", "\n");
if schema_str.replace('\n', "") != checked_in_schema.replace('\n', "") {
std::fs::write("src/local/new.schema.json", schema_str)
.expect("The schemas did not match but failed to write new schema to file.");
panic!("The checked-in local fuzzing schema did not match the generated schema. The generated schema can be found at src/local/new.schema.json");
}
println!("{}", schema_str);
assert_eq!(
schema_str.replace('\n', ""),
checked_in_schema.replace('\n', ""),
"The checked-in local fuzzing schema did not match the generated schema."
);
}
}

View File

@ -1,8 +1,18 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use crate::{
local::common::{
build_local_context, get_cmd_arg, get_cmd_env, CmdType, UiEvent, CHECK_ASAN_LOG,
CHECK_RETRY_COUNT, DISABLE_CHECK_DEBUGGER, TARGET_ENV, TARGET_EXE, TARGET_OPTIONS,
TARGET_TIMEOUT,
},
tasks::report::generic::{test_input, TestInputArgs},
};
use anyhow::Result;
use async_trait::async_trait;
use clap::{Arg, ArgAction, Command};
use flume::Sender;
use onefuzz::machine_id::MachineIdentity;
use schemars::JsonSchema;
use std::{collections::HashMap, path::PathBuf};
@ -10,6 +20,82 @@ use uuid::Uuid;
use super::template::{RunContext, Template};
pub async fn run(args: &clap::ArgMatches, event_sender: Option<Sender<UiEvent>>) -> Result<()> {
let context = build_local_context(args, false, event_sender).await?;
let target_exe = args
.get_one::<PathBuf>(TARGET_EXE)
.expect("is marked required");
let target_env = get_cmd_env(CmdType::Target, args)?;
let target_options = get_cmd_arg(CmdType::Target, args);
let input = args
.get_one::<PathBuf>("input")
.expect("is marked required");
let target_timeout = args.get_one::<u64>(TARGET_TIMEOUT).copied();
let check_retry_count = args
.get_one::<u64>(CHECK_RETRY_COUNT)
.copied()
.expect("has default value");
let check_asan_log = args.get_flag(CHECK_ASAN_LOG);
let check_debugger = !args.get_flag(DISABLE_CHECK_DEBUGGER);
let config = TestInputArgs {
target_exe: target_exe.as_path(),
target_env: &target_env,
target_options: &target_options,
input_url: None,
input: input.as_path(),
job_id: context.common_config.job_id,
task_id: context.common_config.task_id,
target_timeout,
check_retry_count,
setup_dir: &context.common_config.setup_dir,
extra_setup_dir: context.common_config.extra_setup_dir.as_deref(),
minimized_stack_depth: None,
check_asan_log,
check_debugger,
machine_identity: context.common_config.machine_identity.clone(),
};
let result = test_input(config).await?;
println!("{}", serde_json::to_string_pretty(&result)?);
Ok(())
}
pub fn build_shared_args() -> Vec<Arg> {
vec![
Arg::new(TARGET_EXE).required(true),
Arg::new("input")
.required(true)
.value_parser(value_parser!(PathBuf)),
Arg::new(TARGET_ENV).long(TARGET_ENV).num_args(0..),
Arg::new(TARGET_OPTIONS)
.default_value("{input}")
.long(TARGET_OPTIONS)
.value_delimiter(' ')
.help("Use a quoted string with space separation to denote multiple arguments"),
Arg::new(TARGET_TIMEOUT)
.long(TARGET_TIMEOUT)
.value_parser(value_parser!(u64)),
Arg::new(CHECK_RETRY_COUNT)
.long(CHECK_RETRY_COUNT)
.value_parser(value_parser!(u64))
.default_value("0"),
Arg::new(CHECK_ASAN_LOG)
.action(ArgAction::SetTrue)
.long(CHECK_ASAN_LOG),
Arg::new(DISABLE_CHECK_DEBUGGER)
.action(ArgAction::SetTrue)
.long("disable_check_debugger"),
]
}
pub fn args(name: &'static str) -> Command {
Command::new(name)
.about("test an application with a specific input")
.args(&build_shared_args())
}
#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)]
pub struct TestInput {
input: PathBuf,

View File

@ -65,8 +65,6 @@ pub async fn run(config: Config) -> Result<()> {
tools.init_pull().await?;
}
let job_result_client = config.common.init_job_result().await?;
// the tempdir is always created, however, the reports_path and
// reports_monitor_future are only created if we have one of the three
// report SyncedDir. The idea is that the option for where to write reports
@ -90,7 +88,6 @@ pub async fn run(config: Config) -> Result<()> {
&config.unique_reports,
&config.reports,
&config.no_repro,
&job_result_client,
);
(
Some(reports_dir.path().to_path_buf()),
@ -174,7 +171,7 @@ async fn poll_inputs(
}
message.delete().await?;
} else {
debug!("no new candidate inputs found, sleeping");
warn!("no new candidate inputs found, sleeping");
delay_with_jitter(EMPTY_QUEUE_DELAY).await;
}
}

View File

@ -14,7 +14,6 @@ use onefuzz::{
machine_id::MachineIdentity,
syncdir::{SyncOperation, SyncedDir},
};
use onefuzz_result::job_result::{init_job_result, TaskJobResultClient};
use onefuzz_telemetry::{
self as telemetry, Event::task_start, EventData, InstanceTelemetryKey, MicrosoftTelemetryKey,
Role,
@ -51,8 +50,6 @@ pub struct CommonConfig {
pub heartbeat_queue: Option<Url>,
pub job_result_queue: Option<Url>,
pub instance_telemetry_key: Option<InstanceTelemetryKey>,
pub microsoft_telemetry_key: Option<MicrosoftTelemetryKey>,
@ -106,23 +103,6 @@ impl CommonConfig {
None => Ok(None),
}
}
pub async fn init_job_result(&self) -> Result<Option<TaskJobResultClient>> {
match &self.job_result_queue {
Some(url) => {
let result = init_job_result(
url.clone(),
self.task_id,
self.job_id,
self.machine_identity.machine_id,
self.machine_identity.machine_name.clone(),
)
.await?;
Ok(Some(result))
}
None => Ok(None),
}
}
}
#[derive(Debug, Deserialize)]

View File

@ -26,8 +26,6 @@ use onefuzz_file_format::coverage::{
binary::{v1::BinaryCoverageJson as BinaryCoverageJsonV1, BinaryCoverageJson},
source::{v1::SourceCoverageJson as SourceCoverageJsonV1, SourceCoverageJson},
};
use onefuzz_result::job_result::JobResultData;
use onefuzz_result::job_result::{JobResultSender, TaskJobResultClient};
use onefuzz_telemetry::{event, warn, Event::coverage_data, Event::coverage_failed, EventData};
use storage_queue::{Message, QueueClient};
use tokio::fs;
@ -116,7 +114,7 @@ impl CoverageTask {
let allowlist = self.load_target_allowlist().await?;
let heartbeat = self.config.common.init_heartbeat(None).await?;
let job_result = self.config.common.init_job_result().await?;
let mut seen_inputs = false;
let target_exe_path =
@ -131,7 +129,6 @@ impl CoverageTask {
coverage,
allowlist,
heartbeat,
job_result,
target_exe.to_string(),
)?;
@ -222,7 +219,6 @@ struct TaskContext<'a> {
module_allowlist: AllowList,
source_allowlist: Arc<AllowList>,
heartbeat: Option<TaskHeartbeatClient>,
job_result: Option<TaskJobResultClient>,
cache: Arc<DebugInfoCache>,
}
@ -232,7 +228,6 @@ impl<'a> TaskContext<'a> {
coverage: BinaryCoverage,
allowlist: TargetAllowList,
heartbeat: Option<TaskHeartbeatClient>,
job_result: Option<TaskJobResultClient>,
target_exe: String,
) -> Result<Self> {
let cache = DebugInfoCache::new(allowlist.source_files.clone());
@ -252,7 +247,6 @@ impl<'a> TaskContext<'a> {
module_allowlist: allowlist.modules,
source_allowlist: Arc::new(allowlist.source_files),
heartbeat,
job_result,
cache: Arc::new(cache),
})
}
@ -461,16 +455,7 @@ impl<'a> TaskContext<'a> {
let s = CoverageStats::new(&coverage);
event!(coverage_data; Covered = s.covered, Features = s.features, Rate = s.rate);
metric!(coverage_data; 1.0; Covered = s.covered, Features = s.features, Rate = s.rate);
self.job_result
.send_direct(
JobResultData::CoverageData,
HashMap::from([
("covered".to_string(), s.covered as f64),
("features".to_string(), s.features as f64),
("rate".to_string(), s.rate),
]),
)
.await;
Ok(())
}

View File

@ -73,7 +73,6 @@ impl GeneratorTask {
}
let hb_client = self.config.common.init_heartbeat(None).await?;
let jr_client = self.config.common.init_job_result().await?;
for dir in &self.config.readonly_inputs {
dir.init_pull().await?;
@ -85,10 +84,7 @@ impl GeneratorTask {
self.config.ensemble_sync_delay,
);
let crash_dir_monitor = self
.config
.crashes
.monitor_results(new_result, false, &jr_client);
let crash_dir_monitor = self.config.crashes.monitor_results(new_result, false);
let fuzzer = self.fuzzing_loop(hb_client);
@ -302,7 +298,6 @@ mod tests {
task_id: Default::default(),
instance_id: Default::default(),
heartbeat_queue: Default::default(),
job_result_queue: Default::default(),
instance_telemetry_key: Default::default(),
microsoft_telemetry_key: Default::default(),
logs: Default::default(),

View File

@ -1,11 +1,7 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use crate::tasks::{
config::CommonConfig,
heartbeat::{HeartbeatSender, TaskHeartbeatClient},
utils::default_bool_true,
};
use crate::tasks::{config::CommonConfig, heartbeat::HeartbeatSender, utils::default_bool_true};
use anyhow::{Context, Result};
use arraydeque::{ArrayDeque, Wrapping};
use async_trait::async_trait;
@ -16,7 +12,6 @@ use onefuzz::{
process::ExitStatus,
syncdir::{continuous_sync, SyncOperation::Pull, SyncedDir},
};
use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient};
use onefuzz_telemetry::{
Event::{new_coverage, new_crashdump, new_result, runtime_stats},
EventData,
@ -131,31 +126,21 @@ where
self.verify().await?;
let hb_client = self.config.common.init_heartbeat(None).await?;
let jr_client = self.config.common.init_job_result().await?;
// To be scheduled.
let resync = self.continuous_sync_inputs();
let new_inputs = self
.config
.inputs
.monitor_results(new_coverage, true, &jr_client);
let new_crashes = self
.config
.crashes
.monitor_results(new_result, true, &jr_client);
let new_inputs = self.config.inputs.monitor_results(new_coverage, true);
let new_crashes = self.config.crashes.monitor_results(new_result, true);
let new_crashdumps = async {
if let Some(crashdumps) = &self.config.crashdumps {
crashdumps
.monitor_results(new_crashdump, true, &jr_client)
.await
crashdumps.monitor_results(new_crashdump, true).await
} else {
Ok(())
}
};
let (stats_sender, stats_receiver) = mpsc::unbounded_channel();
let report_stats = report_runtime_stats(stats_receiver, &hb_client, &jr_client);
let report_stats = report_runtime_stats(stats_receiver, hb_client);
let fuzzers = self.run_fuzzers(Some(&stats_sender));
futures::try_join!(
resync,
@ -198,7 +183,7 @@ where
.inputs
.local_path
.parent()
.ok_or_else(|| anyhow!("invalid input path"))?;
.ok_or_else(|| anyhow!("Invalid input path"))?;
let temp_path = task_dir.join(".temp");
tokio::fs::create_dir_all(&temp_path).await?;
let temp_dir = tempdir_in(temp_path)?;
@ -516,7 +501,7 @@ impl TotalStats {
self.execs_sec = self.worker_stats.values().map(|x| x.execs_sec).sum();
}
async fn report(&self, jr_client: &Option<TaskJobResultClient>) {
fn report(&self) {
event!(
runtime_stats;
EventData::Count = self.count,
@ -528,17 +513,6 @@ impl TotalStats {
EventData::Count = self.count,
EventData::ExecsSecond = self.execs_sec
);
if let Some(jr_client) = jr_client {
let _ = jr_client
.send_direct(
JobResultData::RuntimeStats,
HashMap::from([
("total_count".to_string(), self.count as f64),
("execs_sec".to_string(), self.execs_sec),
]),
)
.await;
}
}
}
@ -568,8 +542,7 @@ impl Timer {
// are approximating nearest-neighbor interpolation on the runtime stats time series.
async fn report_runtime_stats(
mut stats_channel: mpsc::UnboundedReceiver<RuntimeStats>,
heartbeat_client: &Option<TaskHeartbeatClient>,
jr_client: &Option<TaskJobResultClient>,
heartbeat_client: impl HeartbeatSender,
) -> Result<()> {
// Cache the last-reported stats for a given worker.
//
@ -578,7 +551,7 @@ async fn report_runtime_stats(
let mut total = TotalStats::default();
// report all zeros to start
total.report(jr_client).await;
total.report();
let timer = Timer::new(RUNTIME_STATS_PERIOD);
@ -587,10 +560,10 @@ async fn report_runtime_stats(
Some(stats) = stats_channel.recv() => {
heartbeat_client.alive();
total.update(stats);
total.report(jr_client).await
total.report()
}
_ = timer.wait() => {
total.report(jr_client).await
total.report()
}
}
}

View File

@ -79,10 +79,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
remote_path: config.crashes.remote_path.clone(),
};
crashes.init().await?;
let jr_client = config.common.init_job_result().await?;
let monitor_crashes = crashes.monitor_results(new_result, false, &jr_client);
let monitor_crashes = crashes.monitor_results(new_result, false);
// setup crashdumps
let (crashdump_dir, monitor_crashdumps) = {
@ -98,12 +95,9 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
};
let monitor_dir = crashdump_dir.clone();
let monitor_jr_client = config.common.init_job_result().await?;
let monitor_crashdumps = async move {
if let Some(crashdumps) = monitor_dir {
crashdumps
.monitor_results(new_crashdump, false, &monitor_jr_client)
.await
crashdumps.monitor_results(new_crashdump, false).await
} else {
Ok(())
}
@ -135,13 +129,11 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
if let Some(no_repro) = &config.no_repro {
no_repro.init().await?;
}
let monitor_reports_future = monitor_reports(
reports_dir.path(),
&config.unique_reports,
&config.reports,
&config.no_repro,
&jr_client,
);
let inputs = SyncedDir {
@ -164,7 +156,7 @@ pub async fn spawn(config: SupervisorConfig) -> Result<(), Error> {
delay_with_jitter(delay).await;
}
}
let monitor_inputs = inputs.monitor_results(new_coverage, false, &jr_client);
let monitor_inputs = inputs.monitor_results(new_coverage, false);
let inputs_sync_cancellation = CancellationToken::new(); // never actually cancelled
let inputs_sync_task =
inputs.continuous_sync(Pull, config.ensemble_sync_delay, &inputs_sync_cancellation);
@ -452,7 +444,6 @@ mod tests {
task_id: Default::default(),
instance_id: Default::default(),
heartbeat_queue: Default::default(),
job_result_queue: Default::default(),
instance_telemetry_key: Default::default(),
microsoft_telemetry_key: Default::default(),
logs: Default::default(),

View File

@ -1,8 +1,8 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
use crate::onefuzz::heartbeat::HeartbeatClient;
use anyhow::Result;
use onefuzz::heartbeat::HeartbeatClient;
use reqwest::Url;
use serde::{self, Deserialize, Serialize};
use std::time::Duration;

View File

@ -83,7 +83,7 @@ pub async fn spawn(config: &Config) -> Result<()> {
}
}
} else {
debug!("no new candidate inputs found, sleeping");
warn!("no new candidate inputs found, sleeping");
delay_with_jitter(EMPTY_QUEUE_DELAY).await;
};
}

View File

@ -120,7 +120,7 @@ async fn process_message(config: &Config, input_queue: QueueClient) -> Result<()
}
Ok(())
} else {
debug!("no new candidate inputs found, sleeping");
warn!("no new candidate inputs found, sleeping");
delay_with_jitter(EMPTY_QUEUE_DELAY).await;
Ok(())
}

View File

@ -2,14 +2,12 @@
// Licensed under the MIT License.
use crate::tasks::{
config::CommonConfig,
heartbeat::{HeartbeatSender, TaskHeartbeatClient},
report::crash_report::{parse_report_file, CrashTestResult, RegressionReport},
};
use anyhow::{Context, Result};
use async_trait::async_trait;
use onefuzz::syncdir::SyncedDir;
use onefuzz_result::job_result::TaskJobResultClient;
use reqwest::Url;
use std::path::PathBuf;
@ -26,7 +24,7 @@ pub trait RegressionHandler {
/// Runs the regression task
pub async fn run(
common_config: &CommonConfig,
heartbeat_client: Option<TaskHeartbeatClient>,
regression_reports: &SyncedDir,
crashes: &SyncedDir,
report_dirs: &[&SyncedDir],
@ -37,9 +35,6 @@ pub async fn run(
info!("starting regression task");
regression_reports.init().await?;
let heartbeat_client = common_config.init_heartbeat(None).await?;
let job_result_client = common_config.init_job_result().await?;
handle_crash_reports(
handler,
crashes,
@ -47,7 +42,6 @@ pub async fn run(
report_list,
regression_reports,
&heartbeat_client,
&job_result_client,
)
.await
.context("handling crash reports")?;
@ -58,7 +52,6 @@ pub async fn run(
readonly_inputs,
regression_reports,
&heartbeat_client,
&job_result_client,
)
.await
.context("handling inputs")?;
@ -78,7 +71,6 @@ pub async fn handle_inputs(
readonly_inputs: &SyncedDir,
regression_reports: &SyncedDir,
heartbeat_client: &Option<TaskHeartbeatClient>,
job_result_client: &Option<TaskJobResultClient>,
) -> Result<()> {
readonly_inputs.init_pull().await?;
let mut input_files = tokio::fs::read_dir(&readonly_inputs.local_path).await?;
@ -103,7 +95,7 @@ pub async fn handle_inputs(
crash_test_result,
original_crash_test_result: None,
}
.save(None, regression_reports, job_result_client)
.save(None, regression_reports)
.await?
}
@ -117,7 +109,6 @@ pub async fn handle_crash_reports(
report_list: &Option<Vec<String>>,
regression_reports: &SyncedDir,
heartbeat_client: &Option<TaskHeartbeatClient>,
job_result_client: &Option<TaskJobResultClient>,
) -> Result<()> {
// without crash report containers, skip this method
if report_dirs.is_empty() {
@ -167,7 +158,7 @@ pub async fn handle_crash_reports(
crash_test_result,
original_crash_test_result: Some(original_crash_test_result),
}
.save(Some(file_name), regression_reports, job_result_client)
.save(Some(file_name), regression_reports)
.await?
}
}

View File

@ -89,6 +89,7 @@ impl GenericRegressionTask {
pub async fn run(&self) -> Result<()> {
info!("Starting generic regression task");
let heartbeat_client = self.config.common.init_heartbeat(None).await?;
let mut report_dirs = vec![];
for dir in vec![
@ -102,7 +103,7 @@ impl GenericRegressionTask {
report_dirs.push(dir);
}
common::run(
&self.config.common,
heartbeat_client,
&self.config.regression_reports,
&self.config.crashes,
&report_dirs,

View File

@ -103,8 +103,9 @@ impl LibFuzzerRegressionTask {
report_dirs.push(dir);
}
let heartbeat_client = self.config.common.init_heartbeat(None).await?;
common::run(
&self.config.common,
heartbeat_client,
&self.config.regression_reports,
&self.config.crashes,
&report_dirs,

View File

@ -3,7 +3,6 @@
use anyhow::{Context, Result};
use onefuzz::{blob::BlobUrl, monitor::DirectoryMonitor, syncdir::SyncedDir};
use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient};
use onefuzz_telemetry::{
Event::{
new_report, new_unable_to_reproduce, new_unique_report, regression_report,
@ -13,7 +12,6 @@ use onefuzz_telemetry::{
};
use serde::{Deserialize, Serialize};
use stacktrace_parser::CrashLog;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use uuid::Uuid;
@ -113,7 +111,6 @@ impl RegressionReport {
self,
report_name: Option<String>,
regression_reports: &SyncedDir,
jr_client: &Option<TaskJobResultClient>,
) -> Result<()> {
let (event, name) = match &self.crash_test_result {
CrashTestResult::CrashReport(report) => {
@ -129,15 +126,6 @@ impl RegressionReport {
if upload_or_save_local(&self, &name, regression_reports).await? {
event!(event; EventData::Path = name.clone());
metric!(event; 1.0; EventData::Path = name.clone());
if let Some(jr_client) = jr_client {
let _ = jr_client
.send_direct(
JobResultData::NewRegressionReport,
HashMap::from([("count".to_string(), 1.0)]),
)
.await;
}
}
Ok(())
}
@ -161,7 +149,6 @@ impl CrashTestResult {
unique_reports: &Option<SyncedDir>,
reports: &Option<SyncedDir>,
no_repro: &Option<SyncedDir>,
jr_client: &Option<TaskJobResultClient>,
) -> Result<()> {
match self {
Self::CrashReport(report) => {
@ -171,15 +158,6 @@ impl CrashTestResult {
if upload_or_save_local(&report, &name, unique_reports).await? {
event!(new_unique_report; EventData::Path = report.unique_blob_name());
metric!(new_unique_report; 1.0; EventData::Path = report.unique_blob_name());
if let Some(jr_client) = jr_client {
let _ = jr_client
.send_direct(
JobResultData::NewUniqueReport,
HashMap::from([("count".to_string(), 1.0)]),
)
.await;
}
}
}
@ -188,15 +166,6 @@ impl CrashTestResult {
if upload_or_save_local(&report, &name, reports).await? {
event!(new_report; EventData::Path = report.blob_name());
metric!(new_report; 1.0; EventData::Path = report.blob_name());
if let Some(jr_client) = jr_client {
let _ = jr_client
.send_direct(
JobResultData::NewReport,
HashMap::from([("count".to_string(), 1.0)]),
)
.await;
}
}
}
}
@ -207,15 +176,6 @@ impl CrashTestResult {
if upload_or_save_local(&report, &name, no_repro).await? {
event!(new_unable_to_reproduce; EventData::Path = report.blob_name());
metric!(new_unable_to_reproduce; 1.0; EventData::Path = report.blob_name());
if let Some(jr_client) = jr_client {
let _ = jr_client
.send_direct(
JobResultData::NoReproCrashingInput,
HashMap::from([("count".to_string(), 1.0)]),
)
.await;
}
}
}
}
@ -364,7 +324,6 @@ pub async fn monitor_reports(
unique_reports: &Option<SyncedDir>,
reports: &Option<SyncedDir>,
no_crash: &Option<SyncedDir>,
jr_client: &Option<TaskJobResultClient>,
) -> Result<()> {
if unique_reports.is_none() && reports.is_none() && no_crash.is_none() {
debug!("no report directories configured");
@ -375,9 +334,7 @@ pub async fn monitor_reports(
while let Some(file) = monitor.next_file().await? {
let result = parse_report_file(file).await?;
result
.save(unique_reports, reports, no_crash, jr_client)
.await?;
result.save(unique_reports, reports, no_crash).await?;
}
Ok(())

View File

@ -8,6 +8,16 @@ use std::{
sync::Arc,
};
use anyhow::{Context, Result};
use async_trait::async_trait;
use onefuzz::expand::Expand;
use onefuzz::fs::set_executable;
use onefuzz::{blob::BlobUrl, sha256, syncdir::SyncedDir};
use reqwest::Url;
use serde::Deserialize;
use storage_queue::{Message, QueueClient};
use tokio::fs;
use crate::tasks::report::crash_report::*;
use crate::tasks::report::dotnet::common::collect_exception_info;
use crate::tasks::{
@ -16,16 +26,6 @@ use crate::tasks::{
heartbeat::{HeartbeatSender, TaskHeartbeatClient},
utils::{default_bool_true, try_resolve_setup_relative_path},
};
use anyhow::{Context, Result};
use async_trait::async_trait;
use onefuzz::expand::Expand;
use onefuzz::fs::set_executable;
use onefuzz::{blob::BlobUrl, sha256, syncdir::SyncedDir};
use onefuzz_result::job_result::TaskJobResultClient;
use reqwest::Url;
use serde::Deserialize;
use storage_queue::{Message, QueueClient};
use tokio::fs;
const DOTNET_DUMP_TOOL_NAME: &str = "dotnet-dump";
@ -114,18 +114,15 @@ impl DotnetCrashReportTask {
pub struct AsanProcessor {
config: Arc<Config>,
heartbeat_client: Option<TaskHeartbeatClient>,
job_result_client: Option<TaskJobResultClient>,
}
impl AsanProcessor {
pub async fn new(config: Arc<Config>) -> Result<Self> {
let heartbeat_client = config.common.init_heartbeat(None).await?;
let job_result_client = config.common.init_job_result().await?;
Ok(Self {
config,
heartbeat_client,
job_result_client,
})
}
@ -263,7 +260,6 @@ impl Processor for AsanProcessor {
&self.config.unique_reports,
&self.config.reports,
&self.config.no_repro,
&self.job_result_client,
)
.await;

View File

@ -13,7 +13,6 @@ use async_trait::async_trait;
use onefuzz::{
blob::BlobUrl, input_tester::Tester, machine_id::MachineIdentity, sha256, syncdir::SyncedDir,
};
use onefuzz_result::job_result::TaskJobResultClient;
use reqwest::Url;
use serde::Deserialize;
use std::{
@ -74,9 +73,7 @@ impl ReportTask {
pub async fn managed_run(&mut self) -> Result<()> {
info!("Starting generic crash report task");
let heartbeat_client = self.config.common.init_heartbeat(None).await?;
let job_result_client = self.config.common.init_job_result().await?;
let mut processor =
GenericReportProcessor::new(&self.config, heartbeat_client, job_result_client);
let mut processor = GenericReportProcessor::new(&self.config, heartbeat_client);
#[allow(clippy::manual_flatten)]
for entry in [
@ -186,19 +183,13 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result<CrashTestResult> {
pub struct GenericReportProcessor<'a> {
config: &'a Config,
heartbeat_client: Option<TaskHeartbeatClient>,
job_result_client: Option<TaskJobResultClient>,
}
impl<'a> GenericReportProcessor<'a> {
pub fn new(
config: &'a Config,
heartbeat_client: Option<TaskHeartbeatClient>,
job_result_client: Option<TaskJobResultClient>,
) -> Self {
pub fn new(config: &'a Config, heartbeat_client: Option<TaskHeartbeatClient>) -> Self {
Self {
config,
heartbeat_client,
job_result_client,
}
}
@ -248,7 +239,6 @@ impl<'a> Processor for GenericReportProcessor<'a> {
&self.config.unique_reports,
&self.config.reports,
&self.config.no_repro,
&self.job_result_client,
)
.await
.context("saving report failed")

View File

@ -13,7 +13,6 @@ use async_trait::async_trait;
use onefuzz::{
blob::BlobUrl, libfuzzer::LibFuzzer, machine_id::MachineIdentity, sha256, syncdir::SyncedDir,
};
use onefuzz_result::job_result::TaskJobResultClient;
use reqwest::Url;
use serde::Deserialize;
use std::{
@ -197,18 +196,15 @@ pub async fn test_input(args: TestInputArgs<'_>) -> Result<CrashTestResult> {
pub struct AsanProcessor {
config: Arc<Config>,
heartbeat_client: Option<TaskHeartbeatClient>,
job_result_client: Option<TaskJobResultClient>,
}
impl AsanProcessor {
pub async fn new(config: Arc<Config>) -> Result<Self> {
let heartbeat_client = config.common.init_heartbeat(None).await?;
let job_result_client = config.common.init_job_result().await?;
Ok(Self {
config,
heartbeat_client,
job_result_client,
})
}
@ -261,7 +257,6 @@ impl Processor for AsanProcessor {
&self.config.unique_reports,
&self.config.reports,
&self.config.no_repro,
&self.job_result_client,
)
.await
}

View File

@ -44,7 +44,6 @@ tempfile = "3.7.0"
process_control = "4.0"
reqwest-retry = { path = "../reqwest-retry" }
onefuzz-telemetry = { path = "../onefuzz-telemetry" }
onefuzz-result = { path = "../onefuzz-result" }
stacktrace-parser = { path = "../stacktrace-parser" }
backoff = { version = "0.4", features = ["tokio"] }

View File

@ -192,15 +192,10 @@ impl BlobContainerUrl {
}
pub fn as_path(&self, prefix: impl AsRef<Path>) -> Result<PathBuf> {
match (self.account(), self.container()) {
(Some(account), Some(container)) => {
let mut path = PathBuf::new();
path.push(account);
path.push(container);
Ok(prefix.as_ref().join(path))
}
_ => bail!("Invalid container Url"),
}
let dir = self
.account()
.ok_or_else(|| anyhow!("Invalid container Url"))?;
Ok(prefix.as_ref().join(dir))
}
}
@ -531,14 +526,4 @@ mod tests {
"id:000000,sig:06,src:000000,op:havoc,rep:128"
);
}
#[test]
fn test_as_path() -> Result<()> {
let root = PathBuf::from(r"/onefuzz");
let url = BlobContainerUrl::parse("https://myaccount.blob.core.windows.net/mycontainer")?;
let path = url.as_path(root)?;
assert_eq!(PathBuf::from(r"/onefuzz/myaccount/mycontainer"), path);
Ok(())
}
}

View File

@ -11,12 +11,10 @@ use crate::{
};
use anyhow::{Context, Result};
use dunce::canonicalize;
use onefuzz_result::job_result::{JobResultData, JobResultSender, TaskJobResultClient};
use onefuzz_telemetry::{Event, EventData};
use reqwest::{StatusCode, Url};
use reqwest_retry::{RetryCheck, SendRetry, DEFAULT_RETRY_PERIOD, MAX_RETRY_ATTEMPTS};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::{env::current_dir, path::PathBuf, str, time::Duration};
use tokio::{fs, select};
use tokio_util::sync::CancellationToken;
@ -243,7 +241,6 @@ impl SyncedDir {
url: BlobContainerUrl,
event: Event,
ignore_dotfiles: bool,
jr_client: &Option<TaskJobResultClient>,
) -> Result<()> {
debug!("monitoring {}", path.display());
@ -268,39 +265,9 @@ impl SyncedDir {
if ignore_dotfiles && file_name_event_str.starts_with('.') {
continue;
}
event!(event.clone(); EventData::Path = file_name_event_str);
metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str);
if let Some(jr_client) = jr_client {
match event {
Event::new_result => {
jr_client
.send_direct(
JobResultData::NewCrashingInput,
HashMap::from([("count".to_string(), 1.0)]),
)
.await;
}
Event::new_coverage => {
jr_client
.send_direct(
JobResultData::CoverageData,
HashMap::from([("count".to_string(), 1.0)]),
)
.await;
}
Event::new_crashdump => {
jr_client
.send_direct(
JobResultData::NewCrashDump,
HashMap::from([("count".to_string(), 1.0)]),
)
.await;
}
_ => {
warn!("Unhandled job result!");
}
}
}
let destination = path.join(file_name);
if let Err(err) = fs::copy(&item, &destination).await {
let error_message = format!(
@ -338,29 +305,6 @@ impl SyncedDir {
event!(event.clone(); EventData::Path = file_name_event_str);
metric!(event.clone(); 1.0; EventData::Path = file_name_str_metric_str);
if let Some(jr_client) = jr_client {
match event {
Event::new_result => {
jr_client
.send_direct(
JobResultData::NewCrashingInput,
HashMap::from([("count".to_string(), 1.0)]),
)
.await;
}
Event::new_coverage => {
jr_client
.send_direct(
JobResultData::CoverageData,
HashMap::from([("count".to_string(), 1.0)]),
)
.await;
}
_ => {
warn!("Unhandled job result!");
}
}
}
if let Err(err) = uploader.upload(item.clone()).await {
let error_message = format!(
"Couldn't upload file. path:{} dir:{} err:{:?}",
@ -392,12 +336,7 @@ impl SyncedDir {
/// The intent of this is to support use cases where we usually want a directory
/// to be initialized, but a user-supplied binary, (such as AFL) logically owns
/// a directory, and may reset it.
pub async fn monitor_results(
&self,
event: Event,
ignore_dotfiles: bool,
job_result_client: &Option<TaskJobResultClient>,
) -> Result<()> {
pub async fn monitor_results(&self, event: Event, ignore_dotfiles: bool) -> Result<()> {
if let Some(url) = self.remote_path.clone() {
loop {
debug!("waiting to monitor {}", self.local_path.display());
@ -416,7 +355,6 @@ impl SyncedDir {
url.clone(),
event.clone(),
ignore_dotfiles,
job_result_client,
)
.await?;
}

View File

@ -33,7 +33,7 @@ var storageAccountFuncQueuesParams = [
'update-queue'
'webhooks'
'signalr-events'
'job-result'
'custom-metrics'
]
var fileChangesQueueIndex = 0

View File

@ -88,7 +88,6 @@ class Integration(BaseModel):
target_method: Optional[str]
setup_dir: Optional[str]
target_env: Optional[Dict[str, str]]
pool: PoolName
TARGETS: Dict[str, Integration] = {
@ -98,7 +97,6 @@ TARGETS: Dict[str, Integration] = {
target_exe="fuzz.exe",
inputs="seeds",
wait_for_files={ContainerType.unique_reports: 1},
pool="linux",
),
"linux-libfuzzer": Integration(
template=TemplateType.libfuzzer,
@ -126,7 +124,6 @@ TARGETS: Dict[str, Integration] = {
"--only_asan_failures",
"--write_test_file={extra_output_dir}/test.txt",
],
pool="linux",
),
"linux-libfuzzer-with-options": Integration(
template=TemplateType.libfuzzer,
@ -140,7 +137,6 @@ TARGETS: Dict[str, Integration] = {
},
reboot_after_setup=True,
fuzzing_target_options=["-runs=10000000"],
pool="linux",
),
"linux-libfuzzer-dlopen": Integration(
template=TemplateType.libfuzzer,
@ -154,7 +150,6 @@ TARGETS: Dict[str, Integration] = {
},
reboot_after_setup=True,
use_setup=True,
pool="linux",
),
"linux-libfuzzer-linked-library": Integration(
template=TemplateType.libfuzzer,
@ -168,7 +163,6 @@ TARGETS: Dict[str, Integration] = {
},
reboot_after_setup=True,
use_setup=True,
pool="linux",
),
"linux-libfuzzer-dotnet": Integration(
template=TemplateType.libfuzzer_dotnet,
@ -186,7 +180,6 @@ TARGETS: Dict[str, Integration] = {
ContainerType.unique_reports: 1,
},
test_repro=False,
pool="linux",
),
"linux-libfuzzer-aarch64-crosscompile": Integration(
template=TemplateType.libfuzzer_qemu_user,
@ -196,7 +189,6 @@ TARGETS: Dict[str, Integration] = {
use_setup=True,
wait_for_files={ContainerType.inputs: 2, ContainerType.crashes: 1},
test_repro=False,
pool="linux",
),
"linux-libfuzzer-rust": Integration(
template=TemplateType.libfuzzer,
@ -204,7 +196,6 @@ TARGETS: Dict[str, Integration] = {
target_exe="fuzz_target_1",
wait_for_files={ContainerType.unique_reports: 1, ContainerType.coverage: 1},
fuzzing_target_options=["--test:{extra_setup_dir}"],
pool="linux",
),
"linux-trivial-crash": Integration(
template=TemplateType.radamsa,
@ -213,7 +204,6 @@ TARGETS: Dict[str, Integration] = {
inputs="seeds",
wait_for_files={ContainerType.unique_reports: 1},
inject_fake_regression=True,
pool="linux",
),
"linux-trivial-crash-asan": Integration(
template=TemplateType.radamsa,
@ -223,28 +213,6 @@ TARGETS: Dict[str, Integration] = {
wait_for_files={ContainerType.unique_reports: 1},
check_asan_log=True,
disable_check_debugger=True,
pool="linux",
),
# TODO: Don't install OMS extension on linux anymore
# TODO: Figure out why non mariner work is being scheduled to the mariner pool
"mariner-libfuzzer": Integration(
template=TemplateType.libfuzzer,
os=OS.linux,
target_exe="fuzz.exe",
inputs="seeds",
wait_for_files={
ContainerType.unique_reports: 1,
ContainerType.coverage: 1,
ContainerType.inputs: 2,
ContainerType.extra_output: 1,
},
reboot_after_setup=True,
inject_fake_regression=True,
fuzzing_target_options=[
"--test:{extra_setup_dir}",
"--write_test_file={extra_output_dir}/test.txt",
],
pool=PoolName("mariner")
),
"windows-libfuzzer": Integration(
template=TemplateType.libfuzzer,
@ -266,7 +234,6 @@ TARGETS: Dict[str, Integration] = {
"--only_asan_failures",
"--write_test_file={extra_output_dir}/test.txt",
],
pool="windows",
),
"windows-libfuzzer-linked-library": Integration(
template=TemplateType.libfuzzer,
@ -279,7 +246,6 @@ TARGETS: Dict[str, Integration] = {
ContainerType.coverage: 1,
},
use_setup=True,
pool="windows",
),
"windows-libfuzzer-load-library": Integration(
template=TemplateType.libfuzzer,
@ -292,7 +258,6 @@ TARGETS: Dict[str, Integration] = {
ContainerType.coverage: 1,
},
use_setup=True,
pool="windows",
),
"windows-libfuzzer-dotnet": Integration(
template=TemplateType.libfuzzer_dotnet,
@ -310,7 +275,6 @@ TARGETS: Dict[str, Integration] = {
ContainerType.unique_reports: 1,
},
test_repro=False,
pool="windows",
),
"windows-trivial-crash": Integration(
template=TemplateType.radamsa,
@ -319,7 +283,6 @@ TARGETS: Dict[str, Integration] = {
inputs="seeds",
wait_for_files={ContainerType.unique_reports: 1},
inject_fake_regression=True,
pool="windows",
),
}
@ -388,7 +351,7 @@ class TestOnefuzz:
self.inject_log(self.start_log_marker)
for entry in os_list:
name = self.build_pool_name(entry.name)
name = PoolName(f"testpool-{entry.name}-{self.test_id}")
self.logger.info("creating pool: %s:%s", entry.name, name)
self.of.pools.create(name, entry)
self.logger.info("creating scaleset for pool: %s", name)
@ -396,15 +359,6 @@ class TestOnefuzz:
name, pool_size, region=region, initial_size=pool_size
)
name = self.build_pool_name("mariner")
self.logger.info("creating pool: %s:%s", "mariner", name)
self.of.pools.create(name, OS.linux)
self.logger.info("creating scaleset for pool: %s", name)
self.of.scalesets.create(
name, pool_size, region=region, initial_size=pool_size, image="MicrosoftCBLMariner:cbl-mariner:cbl-mariner-2-gen2:latest"
)
class UnmanagedPool:
def __init__(
self,
@ -606,9 +560,12 @@ class TestOnefuzz:
) -> List[UUID]:
"""Launch all of the fuzzing templates"""
pool = None
pools: Dict[OS, Pool] = {}
if unmanaged_pool is not None:
pool = unmanaged_pool.pool_name
pools[unmanaged_pool.the_os] = self.of.pools.get(unmanaged_pool.pool_name)
else:
for pool in self.of.pools.list():
pools[pool.os] = pool
job_ids = []
@ -619,8 +576,8 @@ class TestOnefuzz:
if config.os not in os_list:
continue
if pool is None:
pool = self.build_pool_name(config.pool)
if config.os not in pools.keys():
raise Exception(f"No pool for target: {target} ,os: {config.os}")
self.logger.info("launching: %s", target)
@ -644,9 +601,8 @@ class TestOnefuzz:
setup = Directory(os.path.join(setup, config.nested_setup_dir))
job: Optional[Job] = None
job = self.build_job(
duration, pool, target, config, setup, target_exe, inputs
duration, pools, target, config, setup, target_exe, inputs
)
if config.inject_fake_regression and job is not None:
@ -662,7 +618,7 @@ class TestOnefuzz:
def build_job(
self,
duration: int,
pool: PoolName,
pools: Dict[OS, Pool],
target: str,
config: Integration,
setup: Optional[Directory],
@ -678,7 +634,7 @@ class TestOnefuzz:
self.project,
target,
BUILD,
pool,
pools[config.os].name,
target_exe=target_exe,
inputs=inputs,
setup_dir=setup,
@ -703,7 +659,7 @@ class TestOnefuzz:
self.project,
target,
BUILD,
pool,
pools[config.os].name,
target_dll=File(config.target_exe),
inputs=inputs,
setup_dir=setup,
@ -719,7 +675,7 @@ class TestOnefuzz:
self.project,
target,
BUILD,
pool,
pools[config.os].name,
inputs=inputs,
target_exe=target_exe,
duration=duration,
@ -732,7 +688,7 @@ class TestOnefuzz:
self.project,
target,
BUILD,
pool_name=pool,
pool_name=pools[config.os].name,
target_exe=target_exe,
inputs=inputs,
setup_dir=setup,
@ -747,7 +703,7 @@ class TestOnefuzz:
self.project,
target,
BUILD,
pool_name=pool,
pool_name=pools[config.os].name,
target_exe=target_exe,
inputs=inputs,
setup_dir=setup,
@ -1277,9 +1233,6 @@ class TestOnefuzz:
if seen_errors:
raise Exception("logs included errors")
def build_pool_name(self, os_type: str) -> PoolName:
return PoolName(f"testpool-{os_type}-{self.test_id}")
class Run(Command):

64
src/runtime-tools/linux/setup.sh Normal file → Executable file
View File

@ -18,14 +18,6 @@ export DOTNET_CLI_HOME="$DOTNET_ROOT"
export ONEFUZZ_ROOT=/onefuzz
export LLVM_SYMBOLIZER_PATH=/onefuzz/bin/llvm-symbolizer
# `logger` won't work on mariner unless we install this package first
if type yum > /dev/null 2> /dev/null; then
until yum install -y util-linux sudo; do
echo "yum failed. sleep 10s, then retrying"
sleep 10
done
fi
logger "onefuzz: making directories"
sudo mkdir -p /onefuzz/downloaded
sudo chown -R $(whoami) /onefuzz
@ -142,53 +134,31 @@ if type apt > /dev/null 2> /dev/null; then
sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH
fi
# Needed to install dotnet
# Install dotnet
until sudo apt install -y curl libicu-dev; do
logger "apt failed, sleeping 10s then retrying"
sleep 10
done
elif type yum > /dev/null 2> /dev/null; then
until yum install -y gdb gdb-gdbserver libunwind awk ca-certificates tar yum-utils shadow-utils cronie procps; do
echo "yum failed. sleep 10s, then retrying"
sleep 10
logger "downloading dotnet install"
curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install'
chmod +x dotnet-install.sh
for version in "${DOTNET_VERSIONS[@]}"; do
logger "running dotnet install $version"
/bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup'
done
rm dotnet-install.sh
# Install updated Microsoft Open Management Infrastructure - github.com/microsoft/omi
yum-config-manager --add-repo=https://packages.microsoft.com/config/rhel/8/prod.repo 2>&1 | logger -s -i -t 'onefuzz-OMI-add-MS-repo'
yum install -y omi 2>&1 | logger -s -i -t 'onefuzz-OMI-install'
if ! [ -f ${LLVM_SYMBOLIZER_PATH} ]; then
until yum install -y llvm-12.0.1; do
echo "yum failed, sleeping 10s then retrying"
sleep 10
done
# If specifying symbolizer, exe name must be a "known symbolizer".
# Using `llvm-symbolizer` works for clang 8 .. 12.
sudo ln -f -s $(which llvm-symbolizer-12) $LLVM_SYMBOLIZER_PATH
fi
logger "install dotnet tools"
pushd "$DOTNET_ROOT"
ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools'
"$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools'
"$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools'
"$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools'
popd
fi
# Install dotnet
logger "downloading dotnet install"
curl --retry 10 -sSL https://dot.net/v1/dotnet-install.sh -o dotnet-install.sh 2>&1 | logger -s -i -t 'onefuzz-curl-dotnet-install'
chmod +x dotnet-install.sh
for version in "${DOTNET_VERSIONS[@]}"; do
logger "running dotnet install $version"
/bin/bash ./dotnet-install.sh --channel "$version" --install-dir "$DOTNET_ROOT" 2>&1 | logger -s -i -t 'onefuzz-dotnet-setup'
done
rm dotnet-install.sh
logger "install dotnet tools"
pushd "$DOTNET_ROOT"
ls -lah 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools'
"$DOTNET_ROOT"/dotnet tool install dotnet-dump --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools'
"$DOTNET_ROOT"/dotnet tool install dotnet-coverage --version 17.5 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools'
"$DOTNET_ROOT"/dotnet tool install dotnet-sos --version 6.0.351802 --tool-path /onefuzz/tools 2>&1 | logger -s -i -t 'onefuzz-dotnet-tools'
popd
if [ -v DOCKER_BUILD ]; then
echo "building for docker"
elif [ -d /etc/systemd/system ]; then