mirror of
https://github.com/microsoft/onefuzz.git
synced 2025-06-14 19:08:08 +00:00
Add Roslyn analyzer to check results are used (#2443)
As seen in #2441, it is easy to drop return values of updated entities accidentally. This PR adds a Roslyn Analyzer which will detect when return values are unused. To explicitly ignore a value you can drop it with `_ = …;` Closes #2442.
This commit is contained in:
@ -162,3 +162,6 @@ dotnet_diagnostic.CA1848.severity = none
|
|||||||
# allow throwing base "Exception" class, since it's done a lot
|
# allow throwing base "Exception" class, since it's done a lot
|
||||||
# TODO: improve this
|
# TODO: improve this
|
||||||
dotnet_diagnostic.CA2201.severity = suggestion
|
dotnet_diagnostic.CA2201.severity = suggestion
|
||||||
|
|
||||||
|
# ignore this custom diagnostic about "complex" code
|
||||||
|
dotnet_diagnostic.CSE006.severity = none
|
||||||
|
@ -44,6 +44,7 @@
|
|||||||
<PackageReference Include="Scriban" Version="5.5.0" />
|
<PackageReference Include="Scriban" Version="5.5.0" />
|
||||||
<PackageReference Include="Octokit" Version="2.0.1" />
|
<PackageReference Include="Octokit" Version="2.0.1" />
|
||||||
<PackageReference Include="Microsoft.TeamFoundationServer.Client" Version="19.209.0-preview" />
|
<PackageReference Include="Microsoft.TeamFoundationServer.Client" Version="19.209.0-preview" />
|
||||||
|
<PackageReference Include="SmartAnalyzers.CSharpExtensions.Annotations" Version="4.2.7" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<None Update="host.json">
|
<None Update="host.json">
|
||||||
@ -60,4 +61,8 @@
|
|||||||
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
|
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
|
||||||
</EmbeddedResource>
|
</EmbeddedResource>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
|
||||||
|
<ItemGroup>
|
||||||
|
<AdditionalFiles Include="../CSharpExtensions.json" />
|
||||||
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
||||||
|
@ -52,7 +52,7 @@ public class AgentCommands {
|
|||||||
|
|
||||||
var message = await _context.NodeMessageOperations.GetEntityAsync(nodeCommand.MachineId.ToString(), nodeCommand.MessageId);
|
var message = await _context.NodeMessageOperations.GetEntityAsync(nodeCommand.MachineId.ToString(), nodeCommand.MessageId);
|
||||||
if (message != null) {
|
if (message != null) {
|
||||||
await _context.NodeMessageOperations.Delete(message);
|
await _context.NodeMessageOperations.Delete(message).IgnoreResult();
|
||||||
} else {
|
} else {
|
||||||
_log.WithTag("Command", "DELETE").Verbose($"failed to find machine id {nodeCommand.MachineId} for message {nodeCommand.MessageId}");
|
_log.WithTag("Command", "DELETE").Verbose($"failed to find machine id {nodeCommand.MachineId} for message {nodeCommand.MessageId}");
|
||||||
}
|
}
|
||||||
|
@ -73,12 +73,14 @@ public class AgentEvents {
|
|||||||
if (ev.State == NodeState.Free) {
|
if (ev.State == NodeState.Free) {
|
||||||
if (node.ReimageRequested || node.DeleteRequested) {
|
if (node.ReimageRequested || node.DeleteRequested) {
|
||||||
_log.Info($"stopping free node with reset flags: {machineId}");
|
_log.Info($"stopping free node with reset flags: {machineId}");
|
||||||
|
// discard result: node not used after this point
|
||||||
_ = await _context.NodeOperations.Stop(node);
|
_ = await _context.NodeOperations.Stop(node);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (await _context.NodeOperations.CouldShrinkScaleset(node)) {
|
if (await _context.NodeOperations.CouldShrinkScaleset(node)) {
|
||||||
_log.Info($"stopping free node to resize scaleset: {machineId}");
|
_log.Info($"stopping free node to resize scaleset: {machineId}");
|
||||||
|
// discard result: node not used after this point
|
||||||
_ = await _context.NodeOperations.SetHalt(node);
|
_ = await _context.NodeOperations.SetHalt(node);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -87,6 +89,7 @@ public class AgentEvents {
|
|||||||
if (ev.State == NodeState.Init) {
|
if (ev.State == NodeState.Init) {
|
||||||
if (node.DeleteRequested) {
|
if (node.DeleteRequested) {
|
||||||
_log.Info($"stopping node (init and delete_requested): {machineId}");
|
_log.Info($"stopping node (init and delete_requested): {machineId}");
|
||||||
|
// discard result: node not used after this point
|
||||||
_ = await _context.NodeOperations.Stop(node);
|
_ = await _context.NodeOperations.Stop(node);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -95,6 +98,7 @@ public class AgentEvents {
|
|||||||
// they send 'init' with reimage_requested, it's because the node was reimaged
|
// they send 'init' with reimage_requested, it's because the node was reimaged
|
||||||
// successfully.
|
// successfully.
|
||||||
node = node with { ReimageRequested = false, InitializedAt = DateTimeOffset.UtcNow };
|
node = node with { ReimageRequested = false, InitializedAt = DateTimeOffset.UtcNow };
|
||||||
|
// discard result: node not used after this point
|
||||||
_ = await _context.NodeOperations.SetState(node, ev.State);
|
_ = await _context.NodeOperations.SetState(node, ev.State);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -129,7 +133,7 @@ public class AgentEvents {
|
|||||||
// Other states we would want to preserve are excluded by the
|
// Other states we would want to preserve are excluded by the
|
||||||
// outermost conditional check.
|
// outermost conditional check.
|
||||||
if (task.State != TaskState.Running && task.State != TaskState.SettingUp) {
|
if (task.State != TaskState.Running && task.State != TaskState.SettingUp) {
|
||||||
await _context.TaskOperations.SetState(task, TaskState.SettingUp);
|
task = await _context.TaskOperations.SetState(task, TaskState.SettingUp);
|
||||||
}
|
}
|
||||||
|
|
||||||
var nodeTask = new NodeTasks(
|
var nodeTask = new NodeTasks(
|
||||||
@ -155,7 +159,8 @@ public class AgentEvents {
|
|||||||
// if tasks are running on the node when it reports as Done
|
// if tasks are running on the node when it reports as Done
|
||||||
// those are stopped early
|
// those are stopped early
|
||||||
await _context.NodeOperations.MarkTasksStoppedEarly(node, error);
|
await _context.NodeOperations.MarkTasksStoppedEarly(node, error);
|
||||||
await _context.NodeOperations.ToReimage(node, done: true);
|
// discard result: node not used after this point
|
||||||
|
_ = await _context.NodeOperations.ToReimage(node, done: true);
|
||||||
}
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
@ -193,8 +198,8 @@ public class AgentEvents {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!node.State.ReadyForReset()) {
|
if (!node.State.ReadyForReset()) {
|
||||||
|
// discard result: node not used after this point
|
||||||
_ = await _context.NodeOperations.SetState(node, NodeState.Busy);
|
_ = await _context.NodeOperations.SetState(node, NodeState.Busy);
|
||||||
// node unused after this point
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var nodeTask = new NodeTasks(
|
var nodeTask = new NodeTasks(
|
||||||
@ -212,7 +217,7 @@ public class AgentEvents {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_log.Info($"task started on node. machine_id:{machineId} job_id:{task.JobId} task_id:{task.TaskId}");
|
_log.Info($"task started on node. machine_id:{machineId} job_id:{task.JobId} task_id:{task.TaskId}");
|
||||||
await _context.TaskOperations.SetState(task, TaskState.Running);
|
task = await _context.TaskOperations.SetState(task, TaskState.Running);
|
||||||
|
|
||||||
var taskEvent = new TaskEvent(
|
var taskEvent = new TaskEvent(
|
||||||
TaskId: task.TaskId,
|
TaskId: task.TaskId,
|
||||||
|
@ -41,7 +41,8 @@ public class Pool {
|
|||||||
return await _context.RequestHandling.NotOk(r, poolResult.ErrorV, "pool stop");
|
return await _context.RequestHandling.NotOk(r, poolResult.ErrorV, "pool stop");
|
||||||
}
|
}
|
||||||
|
|
||||||
await _context.PoolOperations.SetShutdown(poolResult.OkV, Now: request.OkV.Now);
|
// discard result: not used after this point
|
||||||
|
_ = await _context.PoolOperations.SetShutdown(poolResult.OkV, Now: request.OkV.Now);
|
||||||
return await RequestHandling.Ok(r, true);
|
return await RequestHandling.Ok(r, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,14 +134,18 @@ public class Proxy {
|
|||||||
"ProxyReset");
|
"ProxyReset");
|
||||||
}
|
}
|
||||||
|
|
||||||
var proxyList = await _context.ProxyOperations.SearchByPartitionKeys(new[] { $"{request.OkV.Region}" }).ToListAsync();
|
bool any = false;
|
||||||
|
{
|
||||||
foreach (var proxy in proxyList) {
|
var proxyList = _context.ProxyOperations.SearchByPartitionKeys(new[] { $"{request.OkV.Region}" });
|
||||||
await _context.ProxyOperations.SetState(proxy, VmState.Stopping);
|
await foreach (var proxy in proxyList) {
|
||||||
|
any = true;
|
||||||
|
// ignoring result, proxyList not used outside this block
|
||||||
|
_ = await _context.ProxyOperations.SetState(proxy, VmState.Stopping);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var response = req.CreateResponse(HttpStatusCode.OK);
|
var response = req.CreateResponse(HttpStatusCode.OK);
|
||||||
await response.WriteAsJsonAsync(new BoolResult(proxyList.Any()));
|
await response.WriteAsJsonAsync(new BoolResult(any));
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,6 +43,7 @@ public class Scaleset {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var scaleset = scalesetResult.OkV;
|
var scaleset = scalesetResult.OkV;
|
||||||
|
// result ignored: not used after this point
|
||||||
_ = await _context.ScalesetOperations.SetShutdown(scaleset, request.OkV.Now);
|
_ = await _context.ScalesetOperations.SetShutdown(scaleset, request.OkV.Now);
|
||||||
return await RequestHandling.Ok(req, true);
|
return await RequestHandling.Ok(req, true);
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ public class TimerProxy {
|
|||||||
|
|
||||||
var proxyOperations = _context.ProxyOperations;
|
var proxyOperations = _context.ProxyOperations;
|
||||||
var scalesetOperations = _context.ScalesetOperations;
|
var scalesetOperations = _context.ScalesetOperations;
|
||||||
var nsgOpertions = _context.NsgOperations;
|
var nsgOperations = _context.NsgOperations;
|
||||||
|
|
||||||
var proxies = await proxyOperations.QueryAsync().ToListAsync();
|
var proxies = await proxyOperations.QueryAsync().ToListAsync();
|
||||||
|
|
||||||
@ -69,14 +69,14 @@ public class TimerProxy {
|
|||||||
// since we do not support bring your own NSG
|
// since we do not support bring your own NSG
|
||||||
var nsgName = Nsg.NameFromRegion(region);
|
var nsgName = Nsg.NameFromRegion(region);
|
||||||
|
|
||||||
if (await nsgOpertions.GetNsg(nsgName) != null) {
|
if (await nsgOperations.GetNsg(nsgName) != null) {
|
||||||
var network = await Network.Init(region, _context);
|
var network = await Network.Init(region, _context);
|
||||||
|
|
||||||
var subnet = await network.GetSubnet();
|
var subnet = await network.GetSubnet();
|
||||||
if (subnet != null) {
|
if (subnet != null) {
|
||||||
var vnet = await network.GetVnet();
|
var vnet = await network.GetVnet();
|
||||||
if (vnet != null) {
|
if (vnet != null) {
|
||||||
var result = await nsgOpertions.AssociateSubnet(nsgName, vnet, subnet);
|
var result = await nsgOperations.AssociateSubnet(nsgName, vnet, subnet);
|
||||||
if (!result.OkV) {
|
if (!result.OkV) {
|
||||||
_logger.Error($"Failed to associate NSG and subnet due to {result.ErrorV} in region {region}");
|
_logger.Error($"Failed to associate NSG and subnet due to {result.ErrorV} in region {region}");
|
||||||
}
|
}
|
||||||
@ -86,10 +86,10 @@ public class TimerProxy {
|
|||||||
}
|
}
|
||||||
// if there are NSGs with name same as the region that they are allocated
|
// if there are NSGs with name same as the region that they are allocated
|
||||||
// and have no NIC associated with it then delete the NSG
|
// and have no NIC associated with it then delete the NSG
|
||||||
await foreach (var nsg in nsgOpertions.ListNsgs()) {
|
await foreach (var nsg in nsgOperations.ListNsgs()) {
|
||||||
if (nsgOpertions.OkToDelete(regions, nsg.Data.Location!, nsg.Data.Name)) {
|
if (nsgOperations.OkToDelete(regions, nsg.Data.Location!, nsg.Data.Name)) {
|
||||||
if (nsg.Data.NetworkInterfaces.Count == 0 && nsg.Data.Subnets.Count == 0) {
|
if (nsg.Data.NetworkInterfaces.Count == 0 && nsg.Data.Subnets.Count == 0) {
|
||||||
if (!await nsgOpertions.StartDeleteNsg(nsg.Data.Name)) {
|
if (!await nsgOperations.StartDeleteNsg(nsg.Data.Name)) {
|
||||||
_logger.Warning($"failed to start deleting NSG {nsg.Data.Name}");
|
_logger.Warning($"failed to start deleting NSG {nsg.Data.Name}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,20 +14,25 @@ public class TimerRepro {
|
|||||||
|
|
||||||
[Function("TimerRepro")]
|
[Function("TimerRepro")]
|
||||||
public async Async.Task Run([TimerTrigger("00:00:30")] TimerInfo myTimer) {
|
public async Async.Task Run([TimerTrigger("00:00:30")] TimerInfo myTimer) {
|
||||||
var expired = _onefuzzContext.ReproOperations.SearchExpired();
|
var expiredVmIds = new HashSet<Guid>();
|
||||||
await foreach (var repro in expired) {
|
{
|
||||||
_log.Info($"stopping repro: {repro.VmId}");
|
var expired = _onefuzzContext.ReproOperations.SearchExpired();
|
||||||
_ = await _onefuzzContext.ReproOperations.Stopping(repro);
|
await foreach (var repro in expired) {
|
||||||
|
_log.Info($"stopping repro: {repro.VmId}");
|
||||||
|
_ = expiredVmIds.Add(repro.VmId);
|
||||||
|
// ignoring result: value not used later
|
||||||
|
_ = await _onefuzzContext.ReproOperations.Stopping(repro);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var expiredVmIds = expired.Select(repro => repro?.VmId);
|
|
||||||
|
|
||||||
await foreach (var repro in _onefuzzContext.ReproOperations.SearchStates(VmStateHelper.NeedsWork)) {
|
await foreach (var repro in _onefuzzContext.ReproOperations.SearchStates(VmStateHelper.NeedsWork)) {
|
||||||
if (await expiredVmIds.ContainsAsync(repro.VmId)) {
|
if (expiredVmIds.Contains(repro.VmId)) {
|
||||||
// this VM already got processed during the expired phase
|
// this VM already got processed during the expired phase
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
_log.Info($"update repro: {repro.VmId}");
|
_log.Info($"update repro: {repro.VmId}");
|
||||||
|
// ignoring result: value not used later
|
||||||
_ = await _onefuzzContext.ReproOperations.ProcessStateUpdates(repro);
|
_ = await _onefuzzContext.ReproOperations.ProcessStateUpdates(repro);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ public class TimerRetention {
|
|||||||
select container.Name;
|
select container.Name;
|
||||||
|
|
||||||
foreach (var c in containerNames) {
|
foreach (var c in containerNames) {
|
||||||
usedContainers.Add(c);
|
_ = usedContainers.Add(c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,19 +18,21 @@ public class TimerWorkers {
|
|||||||
private async Async.Task ProcessScalesets(Service.Scaleset scaleset) {
|
private async Async.Task ProcessScalesets(Service.Scaleset scaleset) {
|
||||||
_log.Verbose($"checking scaleset for updates: {scaleset.ScalesetId}");
|
_log.Verbose($"checking scaleset for updates: {scaleset.ScalesetId}");
|
||||||
|
|
||||||
await _scaleSetOps.UpdateConfigs(scaleset);
|
scaleset = await _scaleSetOps.UpdateConfigs(scaleset);
|
||||||
var r = await _scaleSetOps.SyncAutoscaleSettings(scaleset);
|
var r = await _scaleSetOps.SyncAutoscaleSettings(scaleset);
|
||||||
if (!r.IsOk) {
|
if (!r.IsOk) {
|
||||||
_log.Error($"failed to sync auto scale settings due to {r.ErrorV}");
|
_log.Error($"failed to sync auto scale settings due to {r.ErrorV}");
|
||||||
}
|
}
|
||||||
|
|
||||||
// if the scaleset is touched during cleanup, don't continue to process it
|
// if the scaleset is touched during cleanup, don't continue to process it
|
||||||
if (await _scaleSetOps.CleanupNodes(scaleset)) {
|
var (touched, ss) = await _scaleSetOps.CleanupNodes(scaleset);
|
||||||
|
if (touched) {
|
||||||
_log.Verbose($"scaleset needed cleanup: {scaleset.ScalesetId}");
|
_log.Verbose($"scaleset needed cleanup: {scaleset.ScalesetId}");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
await _scaleSetOps.SyncScalesetSize(scaleset);
|
scaleset = ss;
|
||||||
|
scaleset = await _scaleSetOps.SyncScalesetSize(scaleset);
|
||||||
_ = await _scaleSetOps.ProcessStateUpdate(scaleset);
|
_ = await _scaleSetOps.ProcessStateUpdate(scaleset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ public class Request {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private async Task<HttpResponseMessage> Send(HttpMethod method, Uri url, HttpContent? content = null, IDictionary<string, string>? headers = null) {
|
private async Task<HttpResponseMessage> Send(HttpMethod method, Uri url, HttpContent? content = null, IDictionary<string, string>? headers = null) {
|
||||||
var request = new HttpRequestMessage(method: method, requestUri: url);
|
using var request = new HttpRequestMessage(method: method, requestUri: url);
|
||||||
|
|
||||||
if (_auth is not null) {
|
if (_auth is not null) {
|
||||||
var (tokenType, accessToken) = await _auth();
|
var (tokenType, accessToken) = await _auth();
|
||||||
|
@ -166,7 +166,10 @@ public class Program {
|
|||||||
var storageAccount = serviceConfig.OneFuzzFuncStorage;
|
var storageAccount = serviceConfig.OneFuzzFuncStorage;
|
||||||
if (storageAccount is not null) {
|
if (storageAccount is not null) {
|
||||||
var tableClient = await storage.GetTableServiceClientForAccount(storageAccount);
|
var tableClient = await storage.GetTableServiceClientForAccount(storageAccount);
|
||||||
await Async.Task.WhenAll(toCreate.Select(t => tableClient.CreateTableIfNotExistsAsync(serviceConfig.OneFuzzStoragePrefix + t.Name)));
|
await Async.Task.WhenAll(toCreate.Select(async t => {
|
||||||
|
// don't care if it was created or not
|
||||||
|
_ = await tableClient.CreateTableIfNotExistsAsync(serviceConfig.OneFuzzStoragePrefix + t.Name);
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -462,7 +462,7 @@ public class Config : IConfig {
|
|||||||
var containers = new Dictionary<ContainerType, List<Container>>();
|
var containers = new Dictionary<ContainerType, List<Container>>();
|
||||||
|
|
||||||
foreach (var container in config.Containers) {
|
foreach (var container in config.Containers) {
|
||||||
if (exist.Contains(container.Name)) {
|
if (!exist.Add(container.Name)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -470,8 +470,6 @@ public class Config : IConfig {
|
|||||||
return ResultVoid<TaskConfigError>.Error(new TaskConfigError($"missing container: {container.Name}"));
|
return ResultVoid<TaskConfigError>.Error(new TaskConfigError($"missing container: {container.Name}"));
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = exist.Add(container.Name);
|
|
||||||
|
|
||||||
if (!containers.ContainsKey(container.Type)) {
|
if (!containers.ContainsKey(container.Type)) {
|
||||||
containers.Add(container.Type, new List<Container>());
|
containers.Add(container.Type, new List<Container>());
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ public class ConfigOperations : Orm<InstanceConfig>, IConfigOperations {
|
|||||||
private static readonly InstanceConfigCacheKey _key = new(); // singleton key
|
private static readonly InstanceConfigCacheKey _key = new(); // singleton key
|
||||||
public Task<InstanceConfig> Fetch()
|
public Task<InstanceConfig> Fetch()
|
||||||
=> _cache.GetOrCreateAsync(_key, async entry => {
|
=> _cache.GetOrCreateAsync(_key, async entry => {
|
||||||
entry.SetAbsoluteExpiration(TimeSpan.FromMinutes(10)); // cached for 10 minutes
|
entry = entry.SetAbsoluteExpiration(TimeSpan.FromMinutes(10)); // cached for 10 minutes
|
||||||
var key = _context.ServiceConfiguration.OneFuzzInstanceName ?? throw new Exception("Environment variable ONEFUZZ_INSTANCE_NAME is not set");
|
var key = _context.ServiceConfiguration.OneFuzzInstanceName ?? throw new Exception("Environment variable ONEFUZZ_INSTANCE_NAME is not set");
|
||||||
return await GetEntityAsync(key, key);
|
return await GetEntityAsync(key, key);
|
||||||
});
|
});
|
||||||
@ -49,9 +49,11 @@ public class ConfigOperations : Orm<InstanceConfig>, IConfigOperations {
|
|||||||
_log.WithHttpStatus(r.ErrorV).Error($"Failed to replace instance config record");
|
_log.WithHttpStatus(r.ErrorV).Error($"Failed to replace instance config record");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (r.IsOk) {
|
if (r.IsOk) {
|
||||||
_cache.Set(_key, newConfig);
|
_ = _cache.Set(_key, newConfig);
|
||||||
}
|
}
|
||||||
|
|
||||||
await _context.Events.SendEvent(new EventInstanceConfigUpdated(newConfig));
|
await _context.Events.SendEvent(new EventInstanceConfigUpdated(newConfig));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,7 +166,7 @@ public class Containers : IContainers {
|
|||||||
|
|
||||||
public async Async.Task SaveBlob(Container container, string name, string data, StorageType storageType) {
|
public async Async.Task SaveBlob(Container container, string name, string data, StorageType storageType) {
|
||||||
var client = await FindContainer(container, storageType) ?? throw new Exception($"unable to find container: {container} - {storageType}");
|
var client = await FindContainer(container, storageType) ?? throw new Exception($"unable to find container: {container} - {storageType}");
|
||||||
await client.GetBlobClient(name).UploadAsync(new BinaryData(data), overwrite: true);
|
_ = await client.GetBlobClient(name).UploadAsync(new BinaryData(data), overwrite: true);
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual Async.Task<Guid> GetInstanceId() => _getInstanceId.Value;
|
public virtual Async.Task<Guid> GetInstanceId() => _getInstanceId.Value;
|
||||||
|
@ -25,7 +25,7 @@ public class DiskOperations : IDiskOperations {
|
|||||||
_logTracer.Info($"deleting disks {resourceGroup} : {name}");
|
_logTracer.Info($"deleting disks {resourceGroup} : {name}");
|
||||||
var disk = await _creds.GetResourceGroupResource().GetDiskAsync(name);
|
var disk = await _creds.GetResourceGroupResource().GetDiskAsync(name);
|
||||||
if (disk != null) {
|
if (disk != null) {
|
||||||
await disk.Value.DeleteAsync(WaitUntil.Started);
|
_ = await disk.Value.DeleteAsync(WaitUntil.Started);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -371,8 +371,8 @@ public class Extensions : IExtensions {
|
|||||||
public async Task<Dictionary<string, VirtualMachineExtensionData>> ReproExtensions(AzureLocation region, Os reproOs, Guid reproId, ReproConfig reproConfig, Container? setupContainer) {
|
public async Task<Dictionary<string, VirtualMachineExtensionData>> ReproExtensions(AzureLocation region, Os reproOs, Guid reproId, ReproConfig reproConfig, Container? setupContainer) {
|
||||||
// TODO: what about contents of repro.ps1 / repro.sh?
|
// TODO: what about contents of repro.ps1 / repro.sh?
|
||||||
var report = await _context.Reports.GetReport(reproConfig.Container, reproConfig.Path);
|
var report = await _context.Reports.GetReport(reproConfig.Container, reproConfig.Path);
|
||||||
report.EnsureNotNull($"invalid report: {reproConfig}");
|
var checkedReport = report.EnsureNotNull($"invalid report: {reproConfig}");
|
||||||
report?.InputBlob.EnsureNotNull("unable to perform reproduction without an input blob");
|
var inputBlob = checkedReport.InputBlob.EnsureNotNull("unable to perform reproduction without an input blob");
|
||||||
|
|
||||||
var commands = new List<string>();
|
var commands = new List<string>();
|
||||||
if (setupContainer != null) {
|
if (setupContainer != null) {
|
||||||
@ -395,8 +395,8 @@ public class Extensions : IExtensions {
|
|||||||
BlobSasPermissions.Read
|
BlobSasPermissions.Read
|
||||||
),
|
),
|
||||||
await _context.Containers.GetFileSasUrl(
|
await _context.Containers.GetFileSasUrl(
|
||||||
report?.InputBlob?.Container!,
|
inputBlob.Container,
|
||||||
report?.InputBlob?.Name!,
|
inputBlob.Name,
|
||||||
StorageType.Corpus,
|
StorageType.Corpus,
|
||||||
BlobSasPermissions.Read
|
BlobSasPermissions.Read
|
||||||
)
|
)
|
||||||
|
@ -78,13 +78,13 @@ public class JobOperations : StatefulOrm<Job, JobState, JobOperations>, IJobOper
|
|||||||
Query.EqualEnum("state", JobState.Enabled)
|
Query.EqualEnum("state", JobState.Enabled)
|
||||||
});
|
});
|
||||||
|
|
||||||
var jobs = this.QueryAsync(filter);
|
await foreach (var job in QueryAsync(filter)) {
|
||||||
|
|
||||||
await foreach (var job in jobs) {
|
|
||||||
await foreach (var task in _context.TaskOperations.QueryAsync(Query.PartitionKey(job.JobId.ToString()))) {
|
await foreach (var task in _context.TaskOperations.QueryAsync(Query.PartitionKey(job.JobId.ToString()))) {
|
||||||
await _context.TaskOperations.MarkFailed(task, new Error(ErrorCode.TASK_FAILED, new[] { "job never not start" }));
|
await _context.TaskOperations.MarkFailed(task, new Error(ErrorCode.TASK_FAILED, new[] { "job never not start" }));
|
||||||
}
|
}
|
||||||
_logTracer.Info($"stopping job that never started: {job.JobId}");
|
_logTracer.Info($"stopping job that never started: {job.JobId}");
|
||||||
|
|
||||||
|
// updated result ignored: not used after this loop
|
||||||
_ = await _context.JobOperations.Stopping(job);
|
_ = await _context.JobOperations.Stopping(job);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ public class NodeMessageOperations : Orm<NodeMessage>, INodeMessageOperations {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public async Async.Task SendMessage(Guid machineId, NodeCommand message, string? messageId = null) {
|
public async Async.Task SendMessage(Guid machineId, NodeCommand message, string? messageId = null) {
|
||||||
messageId = messageId ?? EntityBase.NewSortedKey;
|
messageId ??= EntityBase.NewSortedKey;
|
||||||
var r = await Insert(new NodeMessage(machineId, messageId, message));
|
var r = await Insert(new NodeMessage(machineId, messageId, message));
|
||||||
if (!r.IsOk) {
|
if (!r.IsOk) {
|
||||||
_logTracer.WithHttpStatus(r.ErrorV).Error($"failed to insert message with id: {messageId} for machine id: {machineId} message: {message}");
|
_logTracer.WithHttpStatus(r.ErrorV).Error($"failed to insert message with id: {messageId} for machine id: {machineId} message: {message}");
|
||||||
|
@ -54,8 +54,8 @@ public class NotificationOperations : Orm<Notification>, INotificationOperations
|
|||||||
await foreach (var (task, containers) in GetQueueTasks()) {
|
await foreach (var (task, containers) in GetQueueTasks()) {
|
||||||
if (containers.Contains(container)) {
|
if (containers.Contains(container)) {
|
||||||
_logTracer.Info($"queuing input {container} {filename} {task.TaskId}");
|
_logTracer.Info($"queuing input {container} {filename} {task.TaskId}");
|
||||||
var url = _context.Containers.GetFileSasUrl(container, filename, StorageType.Corpus, BlobSasPermissions.Read | BlobSasPermissions.Delete);
|
var url = await _context.Containers.GetFileSasUrl(container, filename, StorageType.Corpus, BlobSasPermissions.Read | BlobSasPermissions.Delete);
|
||||||
await _context.Queue.SendMessage(task.TaskId.ToString(), url?.ToString() ?? "", StorageType.Corpus);
|
await _context.Queue.SendMessage(task.TaskId.ToString(), url.ToString(), StorageType.Corpus);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ namespace Microsoft.OneFuzz.Service {
|
|||||||
_logTracer.Info($"dissociating nic {nic.Data.Name} with nsg: {_context.Creds.GetBaseResourceGroup()} {nsg.Name}");
|
_logTracer.Info($"dissociating nic {nic.Data.Name} with nsg: {_context.Creds.GetBaseResourceGroup()} {nsg.Name}");
|
||||||
nic.Data.NetworkSecurityGroup = null;
|
nic.Data.NetworkSecurityGroup = null;
|
||||||
try {
|
try {
|
||||||
await _context.Creds.GetResourceGroupResource()
|
_ = await _context.Creds.GetResourceGroupResource()
|
||||||
.GetNetworkInterfaces()
|
.GetNetworkInterfaces()
|
||||||
.CreateOrUpdateAsync(WaitUntil.Started, nic.Data.Name, nic.Data);
|
.CreateOrUpdateAsync(WaitUntil.Started, nic.Data.Name, nic.Data);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
@ -183,11 +183,10 @@ namespace Microsoft.OneFuzz.Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await _context.Creds.GetResourceGroupResource().GetNetworkSecurityGroups().CreateOrUpdateAsync(
|
_ = await _context.Creds.GetResourceGroupResource().GetNetworkSecurityGroups().CreateOrUpdateAsync(
|
||||||
WaitUntil.Started,
|
WaitUntil.Started,
|
||||||
name,
|
name,
|
||||||
nsgParams
|
nsgParams);
|
||||||
);
|
|
||||||
} catch (RequestFailedException ex) {
|
} catch (RequestFailedException ex) {
|
||||||
if (IsConcurrentRequestError(ex.Message)) {
|
if (IsConcurrentRequestError(ex.Message)) {
|
||||||
// _logTracer.Debug($"create NSG had conflicts with concurrent request, ignoring {ex}");
|
// _logTracer.Debug($"create NSG had conflicts with concurrent request, ignoring {ex}");
|
||||||
@ -291,9 +290,8 @@ namespace Microsoft.OneFuzz.Service {
|
|||||||
_logTracer.Info($"associating nic {nic.Data.Name} with nsg: {_context.Creds.GetBaseResourceGroup()} {nsg.Data.Name}");
|
_logTracer.Info($"associating nic {nic.Data.Name} with nsg: {_context.Creds.GetBaseResourceGroup()} {nsg.Data.Name}");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await _context.Creds.GetResourceGroupResource().GetNetworkInterfaces().CreateOrUpdateAsync(
|
_ = await _context.Creds.GetResourceGroupResource().GetNetworkInterfaces().CreateOrUpdateAsync(
|
||||||
WaitUntil.Started, nic.Data.Name, nic.Data
|
WaitUntil.Started, nic.Data.Name, nic.Data);
|
||||||
);
|
|
||||||
} catch (RequestFailedException ex) {
|
} catch (RequestFailedException ex) {
|
||||||
if (IsConcurrentRequestError(ex.Message)) {
|
if (IsConcurrentRequestError(ex.Message)) {
|
||||||
// _logTracer.Debug($"associate NSG with NIC had conflicts with concurrent request, ignoring {ex}");
|
// _logTracer.Debug($"associate NSG with NIC had conflicts with concurrent request, ignoring {ex}");
|
||||||
@ -312,11 +310,10 @@ namespace Microsoft.OneFuzz.Service {
|
|||||||
_logTracer.Info($"updating nsg {_context.Creds.GetBaseResourceGroup()}:{nsg.Location}:{nsg.Name}");
|
_logTracer.Info($"updating nsg {_context.Creds.GetBaseResourceGroup()}:{nsg.Location}:{nsg.Name}");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await _context.Creds.GetResourceGroupResource().GetNetworkSecurityGroups().CreateOrUpdateAsync(
|
_ = await _context.Creds.GetResourceGroupResource().GetNetworkSecurityGroups().CreateOrUpdateAsync(
|
||||||
WaitUntil.Started,
|
WaitUntil.Started,
|
||||||
nsg.Name,
|
nsg.Name,
|
||||||
nsg
|
nsg);
|
||||||
);
|
|
||||||
} catch (RequestFailedException ex) {
|
} catch (RequestFailedException ex) {
|
||||||
if (IsConcurrentRequestError(ex.Message)) {
|
if (IsConcurrentRequestError(ex.Message)) {
|
||||||
//_logTracer.Debug($"create NSG had conflicts with concurrent request, ignoring {ex}");
|
//_logTracer.Debug($"create NSG had conflicts with concurrent request, ignoring {ex}");
|
||||||
|
@ -198,7 +198,8 @@ public class PoolOperations : StatefulOrm<Pool, PoolState, PoolOperations>, IPoo
|
|||||||
|
|
||||||
if (nodes is not null) {
|
if (nodes is not null) {
|
||||||
await foreach (var node in nodes) {
|
await foreach (var node in nodes) {
|
||||||
await _context.NodeOperations.SetShutdown(node);
|
// ignoring updated result - nodes not returned
|
||||||
|
_ = await _context.NodeOperations.SetShutdown(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,7 +232,8 @@ public class PoolOperations : StatefulOrm<Pool, PoolState, PoolOperations>, IPoo
|
|||||||
|
|
||||||
if (nodes is not null) {
|
if (nodes is not null) {
|
||||||
await foreach (var node in nodes) {
|
await foreach (var node in nodes) {
|
||||||
await _context.NodeOperations.SetHalt(node);
|
// updated value ignored: 'nodes' is not returned
|
||||||
|
_ = await _context.NodeOperations.SetHalt(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ public class ProxyForwardOperations : Orm<ProxyForward>, IProxyForwardOperations
|
|||||||
var firstEntry = entries.FirstOrDefault();
|
var firstEntry = entries.FirstOrDefault();
|
||||||
if (firstEntry != null) {
|
if (firstEntry != null) {
|
||||||
var updated = firstEntry with { EndTime = DateTimeOffset.UtcNow + TimeSpan.FromHours(duration) };
|
var updated = firstEntry with { EndTime = DateTimeOffset.UtcNow + TimeSpan.FromHours(duration) };
|
||||||
await this.Update(updated);
|
await Update(updated).IgnoreResult();
|
||||||
return OneFuzzResult.Ok(updated);
|
return OneFuzzResult.Ok(updated);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,8 +94,8 @@ public class ProxyForwardOperations : Orm<ProxyForward>, IProxyForwardOperations
|
|||||||
|
|
||||||
var regions = new HashSet<Region>();
|
var regions = new HashSet<Region>();
|
||||||
foreach (var entry in entries) {
|
foreach (var entry in entries) {
|
||||||
regions.Add(entry.Region);
|
_ = regions.Add(entry.Region);
|
||||||
await Delete(entry);
|
await Delete(entry).IgnoreResult();
|
||||||
}
|
}
|
||||||
|
|
||||||
return regions;
|
return regions;
|
||||||
|
@ -41,21 +41,23 @@ public class ProxyOperations : StatefulOrm<Proxy, VmState, ProxyOperations>, IPr
|
|||||||
}
|
}
|
||||||
|
|
||||||
public async Async.Task<Proxy> GetOrCreate(Region region) {
|
public async Async.Task<Proxy> GetOrCreate(Region region) {
|
||||||
var proxyList = QueryAsync(filter: TableClient.CreateQueryFilter($"region eq {region.String} and outdated eq false"));
|
{
|
||||||
|
var proxyList = QueryAsync(filter: TableClient.CreateQueryFilter($"region eq {region.String} and outdated eq false"));
|
||||||
await foreach (var proxy in proxyList) {
|
await foreach (var proxy in proxyList) {
|
||||||
if (IsOutdated(proxy)) {
|
if (IsOutdated(proxy)) {
|
||||||
var r1 = await Replace(proxy with { Outdated = true });
|
var r1 = await Replace(proxy with { Outdated = true });
|
||||||
if (!r1.IsOk) {
|
if (!r1.IsOk) {
|
||||||
_logTracer.WithHttpStatus(r1.ErrorV).Error($"failed to replace record to mark proxy {proxy.ProxyId} as outdated");
|
_logTracer.WithHttpStatus(r1.ErrorV).Error($"failed to replace record to mark proxy {proxy.ProxyId} as outdated");
|
||||||
|
}
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!VmStateHelper.Available.Contains(proxy.State)) {
|
if (!VmStateHelper.Available.Contains(proxy.State)) {
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
return proxy;
|
||||||
}
|
}
|
||||||
return proxy;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_logTracer.Info($"creating proxy: region:{region}");
|
_logTracer.Info($"creating proxy: region:{region}");
|
||||||
@ -305,7 +307,7 @@ public class ProxyOperations : StatefulOrm<Proxy, VmState, ProxyOperations>, IPr
|
|||||||
var stoppedVm = await SetState(proxy, VmState.Stopped);
|
var stoppedVm = await SetState(proxy, VmState.Stopped);
|
||||||
_logTracer.Info($"removing proxy: {stoppedVm.Region}");
|
_logTracer.Info($"removing proxy: {stoppedVm.Region}");
|
||||||
await _context.Events.SendEvent(new EventProxyDeleted(stoppedVm.Region, stoppedVm.ProxyId));
|
await _context.Events.SendEvent(new EventProxyDeleted(stoppedVm.Region, stoppedVm.ProxyId));
|
||||||
await Delete(stoppedVm);
|
await Delete(stoppedVm).IgnoreResult();
|
||||||
return stoppedVm;
|
return stoppedVm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ public class Queue : IQueue {
|
|||||||
public async Async.Task SendMessage(string name, string message, StorageType storageType, TimeSpan? visibilityTimeout = null, TimeSpan? timeToLive = null) {
|
public async Async.Task SendMessage(string name, string message, StorageType storageType, TimeSpan? visibilityTimeout = null, TimeSpan? timeToLive = null) {
|
||||||
var queue = await GetQueueClient(name, storageType);
|
var queue = await GetQueueClient(name, storageType);
|
||||||
try {
|
try {
|
||||||
await queue.SendMessageAsync(message, visibilityTimeout: visibilityTimeout, timeToLive: timeToLive);
|
_ = await queue.SendMessageAsync(message, visibilityTimeout: visibilityTimeout, timeToLive: timeToLive);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
_log.Exception(ex, $"Failed to send message {message}");
|
_log.Exception(ex, $"Failed to send message {message}");
|
||||||
throw;
|
throw;
|
||||||
|
@ -108,6 +108,7 @@ public class ReproOperations : StatefulOrm<Repro, VmState, ReproOperations>, IRe
|
|||||||
|
|
||||||
public async Async.Task<Repro> Stopped(Repro repro) {
|
public async Async.Task<Repro> Stopped(Repro repro) {
|
||||||
_logTracer.Info($"vm stopped: {repro.VmId}");
|
_logTracer.Info($"vm stopped: {repro.VmId}");
|
||||||
|
// BUG?: why are we updating repro and then deleting it and returning a new value
|
||||||
repro = repro with { State = VmState.Stopped };
|
repro = repro with { State = VmState.Stopped };
|
||||||
var r = await Delete(repro);
|
var r = await Delete(repro);
|
||||||
if (!r.IsOk) {
|
if (!r.IsOk) {
|
||||||
@ -204,7 +205,7 @@ public class ReproOperations : StatefulOrm<Repro, VmState, ReproOperations>, IRe
|
|||||||
repro = repro with { State = VmState.Running };
|
repro = repro with { State = VmState.Running };
|
||||||
}
|
}
|
||||||
|
|
||||||
await Replace(repro);
|
await Replace(repro).IgnoreResult();
|
||||||
return repro;
|
return repro;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ public class RequestAccess {
|
|||||||
var segments = path.Split("/", StringSplitOptions.RemoveEmptyEntries);
|
var segments = path.Split("/", StringSplitOptions.RemoveEmptyEntries);
|
||||||
|
|
||||||
var currentNode = _root;
|
var currentNode = _root;
|
||||||
currentNode.Rules.TryGetValue(method, out var currentRule);
|
_ = currentNode.Rules.TryGetValue(method, out var currentRule);
|
||||||
|
|
||||||
foreach (var currentSegment in segments) {
|
foreach (var currentSegment in segments) {
|
||||||
if (currentNode.Children.TryGetValue(currentSegment, out var node)) {
|
if (currentNode.Children.TryGetValue(currentSegment, out var node)) {
|
||||||
|
@ -10,14 +10,14 @@ public interface IScalesetOperations : IStatefulOrm<Scaleset, ScalesetState> {
|
|||||||
|
|
||||||
IAsyncEnumerable<Scaleset> SearchByPool(PoolName poolName);
|
IAsyncEnumerable<Scaleset> SearchByPool(PoolName poolName);
|
||||||
|
|
||||||
Async.Task UpdateConfigs(Scaleset scaleSet);
|
Async.Task<Scaleset> UpdateConfigs(Scaleset scaleSet);
|
||||||
|
|
||||||
Async.Task<OneFuzzResult<Scaleset>> GetById(Guid scalesetId);
|
Async.Task<OneFuzzResult<Scaleset>> GetById(Guid scalesetId);
|
||||||
IAsyncEnumerable<Scaleset> GetByObjectId(Guid objectId);
|
IAsyncEnumerable<Scaleset> GetByObjectId(Guid objectId);
|
||||||
|
|
||||||
Async.Task<bool> CleanupNodes(Scaleset scaleSet);
|
Async.Task<(bool, Scaleset)> CleanupNodes(Scaleset scaleSet);
|
||||||
|
|
||||||
Async.Task SyncScalesetSize(Scaleset scaleset);
|
Async.Task<Scaleset> SyncScalesetSize(Scaleset scaleset);
|
||||||
|
|
||||||
Async.Task<Scaleset> SetState(Scaleset scaleset, ScalesetState state);
|
Async.Task<Scaleset> SetState(Scaleset scaleset, ScalesetState state);
|
||||||
public Async.Task<List<ScalesetNodeState>> GetNodes(Scaleset scaleset);
|
public Async.Task<List<ScalesetNodeState>> GetNodes(Scaleset scaleset);
|
||||||
@ -56,11 +56,11 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
return QueryAsync(Query.PartitionKey(poolName.String));
|
return QueryAsync(Query.PartitionKey(poolName.String));
|
||||||
}
|
}
|
||||||
|
|
||||||
public async Async.Task SyncScalesetSize(Scaleset scaleset) {
|
public async Async.Task<Scaleset> SyncScalesetSize(Scaleset scaleset) {
|
||||||
// # If our understanding of size is out of sync with Azure, resize the
|
// # If our understanding of size is out of sync with Azure, resize the
|
||||||
// # scaleset to match our understanding.
|
// # scaleset to match our understanding.
|
||||||
if (scaleset.State != ScalesetState.Running) {
|
if (scaleset.State != ScalesetState.Running) {
|
||||||
return;
|
return scaleset;
|
||||||
}
|
}
|
||||||
|
|
||||||
var size = await _context.VmssOperations.GetVmssSize(scaleset.ScalesetId);
|
var size = await _context.VmssOperations.GetVmssSize(scaleset.ScalesetId);
|
||||||
@ -69,9 +69,10 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
//#if the scaleset is missing, this is an indication the scaleset
|
//#if the scaleset is missing, this is an indication the scaleset
|
||||||
//# was manually deleted, rather than having OneFuzz delete it. As
|
//# was manually deleted, rather than having OneFuzz delete it. As
|
||||||
//# such, we should go thruogh the process of deleting it.
|
//# such, we should go thruogh the process of deleting it.
|
||||||
await SetShutdown(scaleset, now: true);
|
scaleset = await SetShutdown(scaleset, now: true);
|
||||||
return;
|
return scaleset;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (size != scaleset.Size) {
|
if (size != scaleset.Size) {
|
||||||
//# Azure auto-scaled us or nodes were manually added/removed
|
//# Azure auto-scaled us or nodes were manually added/removed
|
||||||
//# New node state will be synced in cleanup_nodes
|
//# New node state will be synced in cleanup_nodes
|
||||||
@ -83,6 +84,8 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
_log.WithHttpStatus(replaceResult.ErrorV).Error($"Failed to update scaleset size for scaleset {scaleset.ScalesetId}");
|
_log.WithHttpStatus(replaceResult.ErrorV).Error($"Failed to update scaleset size for scaleset {scaleset.ScalesetId}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return scaleset;
|
||||||
}
|
}
|
||||||
|
|
||||||
public async Async.Task<OneFuzzResultVoid> SyncAutoscaleSettings(Scaleset scaleset) {
|
public async Async.Task<OneFuzzResultVoid> SyncAutoscaleSettings(Scaleset scaleset) {
|
||||||
@ -221,37 +224,34 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
return updatedScaleset;
|
return updatedScaleset;
|
||||||
}
|
}
|
||||||
|
|
||||||
public async Async.Task UpdateConfigs(Scaleset scaleSet) {
|
public async Async.Task<Scaleset> UpdateConfigs(Scaleset scaleSet) {
|
||||||
if (scaleSet == null) {
|
|
||||||
_log.Warning("skipping update configs on scaleset, since scaleset is null");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (scaleSet.State == ScalesetState.Halt) {
|
if (scaleSet.State == ScalesetState.Halt) {
|
||||||
_log.Info($"{SCALESET_LOG_PREFIX} not updating configs, scalest is set to be deleted. scaleset_id: {scaleSet.ScalesetId}");
|
_log.Info($"{SCALESET_LOG_PREFIX} not updating configs, scalest is set to be deleted. scaleset_id: {scaleSet.ScalesetId}");
|
||||||
return;
|
return scaleSet;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!scaleSet.NeedsConfigUpdate) {
|
if (!scaleSet.NeedsConfigUpdate) {
|
||||||
_log.Verbose($"{SCALESET_LOG_PREFIX} config update no needed. scaleset_id: {scaleSet.ScalesetId}");
|
_log.Verbose($"{SCALESET_LOG_PREFIX} config update no needed. scaleset_id: {scaleSet.ScalesetId}");
|
||||||
return;
|
return scaleSet;
|
||||||
}
|
}
|
||||||
|
|
||||||
_log.Info($"{SCALESET_LOG_PREFIX} updating scalset configs. scalset_id: {scaleSet.ScalesetId}");
|
_log.Info($"{SCALESET_LOG_PREFIX} updating scalset configs. scalset_id: {scaleSet.ScalesetId}");
|
||||||
|
|
||||||
var pool = await _context.PoolOperations.GetByName(scaleSet.PoolName);
|
var pool = await _context.PoolOperations.GetByName(scaleSet.PoolName);
|
||||||
|
|
||||||
if (!pool.IsOk) {
|
if (!pool.IsOk) {
|
||||||
_log.Error($"{SCALESET_LOG_PREFIX} unable to find pool during config update. pool:{scaleSet.PoolName}, scaleset_id:{scaleSet.ScalesetId}");
|
_log.Error($"{SCALESET_LOG_PREFIX} unable to find pool during config update. pool:{scaleSet.PoolName}, scaleset_id:{scaleSet.ScalesetId}");
|
||||||
await SetFailed(scaleSet, pool.ErrorV);
|
scaleSet = await SetFailed(scaleSet, pool.ErrorV);
|
||||||
return;
|
return scaleSet;
|
||||||
}
|
}
|
||||||
|
|
||||||
var extensions = await _context.Extensions.FuzzExtensions(pool.OkV, scaleSet);
|
var extensions = await _context.Extensions.FuzzExtensions(pool.OkV, scaleSet);
|
||||||
|
|
||||||
var res = await _context.VmssOperations.UpdateExtensions(scaleSet.ScalesetId, extensions);
|
var res = await _context.VmssOperations.UpdateExtensions(scaleSet.ScalesetId, extensions);
|
||||||
|
|
||||||
if (!res.IsOk) {
|
if (!res.IsOk) {
|
||||||
_log.Info($"{SCALESET_LOG_PREFIX} unable to update configs {string.Join(',', res.ErrorV.Errors!)}");
|
_log.Info($"{SCALESET_LOG_PREFIX} unable to update configs {string.Join(',', res.ErrorV.Errors!)}");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return scaleSet;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Async.Task<Scaleset> SetShutdown(Scaleset scaleset, bool now)
|
public Async.Task<Scaleset> SetShutdown(Scaleset scaleset, bool now)
|
||||||
@ -488,21 +488,22 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
/// </summary>
|
/// </summary>
|
||||||
/// <param name="scaleSet"></param>
|
/// <param name="scaleSet"></param>
|
||||||
/// <returns>true if scaleset got modified</returns>
|
/// <returns>true if scaleset got modified</returns>
|
||||||
public async Async.Task<bool> CleanupNodes(Scaleset scaleSet) {
|
public async Async.Task<(bool, Scaleset)> CleanupNodes(Scaleset scaleSet) {
|
||||||
_log.Info($"{SCALESET_LOG_PREFIX} cleaning up nodes. scaleset_id {scaleSet.ScalesetId}");
|
_log.Info($"{SCALESET_LOG_PREFIX} cleaning up nodes. scaleset_id {scaleSet.ScalesetId}");
|
||||||
|
|
||||||
if (scaleSet.State == ScalesetState.Halt) {
|
if (scaleSet.State == ScalesetState.Halt) {
|
||||||
_log.Info($"{SCALESET_LOG_PREFIX} halting scaleset scaleset_id {scaleSet.ScalesetId}");
|
_log.Info($"{SCALESET_LOG_PREFIX} halting scaleset scaleset_id {scaleSet.ScalesetId}");
|
||||||
await Halt(scaleSet);
|
scaleSet = await Halt(scaleSet);
|
||||||
return true;
|
return (true, scaleSet);
|
||||||
}
|
}
|
||||||
|
|
||||||
var pool = await _context.PoolOperations.GetByName(scaleSet.PoolName);
|
var pool = await _context.PoolOperations.GetByName(scaleSet.PoolName);
|
||||||
if (!pool.IsOk) {
|
if (!pool.IsOk) {
|
||||||
_log.Error($"unable to find pool during cleanup {scaleSet.ScalesetId} - {scaleSet.PoolName}");
|
_log.Error($"unable to find pool during cleanup {scaleSet.ScalesetId} - {scaleSet.PoolName}");
|
||||||
await SetFailed(scaleSet, pool.ErrorV!);
|
scaleSet = await SetFailed(scaleSet, pool.ErrorV!);
|
||||||
return true;
|
return (true, scaleSet);
|
||||||
}
|
}
|
||||||
|
|
||||||
await _context.NodeOperations.ReimageLongLivedNodes(scaleSet.ScalesetId);
|
await _context.NodeOperations.ReimageLongLivedNodes(scaleSet.ScalesetId);
|
||||||
|
|
||||||
//ground truth of existing nodes
|
//ground truth of existing nodes
|
||||||
@ -596,7 +597,7 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
await ReimageNodes(scaleSet, toReimage.Values, strategy);
|
await ReimageNodes(scaleSet, toReimage.Values, strategy);
|
||||||
await DeleteNodes(scaleSet, toDelete.Values, strategy);
|
await DeleteNodes(scaleSet, toDelete.Values, strategy);
|
||||||
|
|
||||||
return toReimage.Count > 0 || toDelete.Count > 0;
|
return (toReimage.Count > 0 || toDelete.Count > 0, scaleSet);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -627,7 +628,7 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
if (node.DebugKeepNode) {
|
if (node.DebugKeepNode) {
|
||||||
_log.Warning($"{SCALESET_LOG_PREFIX} not reimaging manually overriden node. scaleset_id:{scaleset.ScalesetId} machine_id:{node.MachineId}");
|
_log.Warning($"{SCALESET_LOG_PREFIX} not reimaging manually overriden node. scaleset_id:{scaleset.ScalesetId} machine_id:{node.MachineId}");
|
||||||
} else {
|
} else {
|
||||||
machineIds.Add(node.MachineId);
|
_ = machineIds.Add(node.MachineId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -641,7 +642,9 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
_log.Info($"{SCALESET_LOG_PREFIX} decommissioning nodes");
|
_log.Info($"{SCALESET_LOG_PREFIX} decommissioning nodes");
|
||||||
await Async.Task.WhenAll(nodes
|
await Async.Task.WhenAll(nodes
|
||||||
.Where(node => machineIds.Contains(node.MachineId))
|
.Where(node => machineIds.Contains(node.MachineId))
|
||||||
.Select(node => _context.NodeOperations.ReleaseScaleInProtection(node)));
|
.Select(async node => {
|
||||||
|
await _context.NodeOperations.ReleaseScaleInProtection(node).IgnoreResult();
|
||||||
|
}));
|
||||||
return;
|
return;
|
||||||
|
|
||||||
case NodeDisposalStrategy.ScaleIn:
|
case NodeDisposalStrategy.ScaleIn:
|
||||||
@ -651,7 +654,7 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
.Where(node => machineIds.Contains(node.MachineId))
|
.Where(node => machineIds.Contains(node.MachineId))
|
||||||
.Select(async node => {
|
.Select(async node => {
|
||||||
await _context.NodeOperations.Delete(node);
|
await _context.NodeOperations.Delete(node);
|
||||||
await _context.NodeOperations.ReleaseScaleInProtection(node);
|
await _context.NodeOperations.ReleaseScaleInProtection(node).IgnoreResult();
|
||||||
}));
|
}));
|
||||||
} else {
|
} else {
|
||||||
_log.Info($"failed to reimage nodes due to {r.ErrorV}");
|
_log.Info($"failed to reimage nodes due to {r.ErrorV}");
|
||||||
@ -680,7 +683,7 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
if (node.DebugKeepNode) {
|
if (node.DebugKeepNode) {
|
||||||
_log.Warning($"{SCALESET_LOG_PREFIX} not deleting manually overriden node. scaleset_id:{scaleset.ScalesetId} machine_id:{node.MachineId}");
|
_log.Warning($"{SCALESET_LOG_PREFIX} not deleting manually overriden node. scaleset_id:{scaleset.ScalesetId} machine_id:{node.MachineId}");
|
||||||
} else {
|
} else {
|
||||||
machineIds.Add(node.MachineId);
|
_ = machineIds.Add(node.MachineId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -689,7 +692,9 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
_log.Info($"{SCALESET_LOG_PREFIX} decommissioning nodes");
|
_log.Info($"{SCALESET_LOG_PREFIX} decommissioning nodes");
|
||||||
await Async.Task.WhenAll(nodes
|
await Async.Task.WhenAll(nodes
|
||||||
.Where(node => machineIds.Contains(node.MachineId))
|
.Where(node => machineIds.Contains(node.MachineId))
|
||||||
.Select(node => _context.NodeOperations.ReleaseScaleInProtection(node)));
|
.Select(async node => {
|
||||||
|
await _context.NodeOperations.ReleaseScaleInProtection(node).IgnoreResult();
|
||||||
|
}));
|
||||||
return;
|
return;
|
||||||
|
|
||||||
case NodeDisposalStrategy.ScaleIn:
|
case NodeDisposalStrategy.ScaleIn:
|
||||||
@ -699,7 +704,7 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
.Where(node => machineIds.Contains(node.MachineId))
|
.Where(node => machineIds.Contains(node.MachineId))
|
||||||
.Select(async node => {
|
.Select(async node => {
|
||||||
await _context.NodeOperations.Delete(node);
|
await _context.NodeOperations.Delete(node);
|
||||||
await _context.NodeOperations.ReleaseScaleInProtection(node);
|
await _context.NodeOperations.ReleaseScaleInProtection(node).IgnoreResult();
|
||||||
}));
|
}));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -812,10 +817,10 @@ public class ScalesetOperations : StatefulOrm<Scaleset, ScalesetState, ScalesetO
|
|||||||
}
|
}
|
||||||
|
|
||||||
_logTracer.Info($"{SCALESET_LOG_PREFIX} scaleset shutdown: scaleset_id:{scaleset.ScalesetId} size:{size}");
|
_logTracer.Info($"{SCALESET_LOG_PREFIX} scaleset shutdown: scaleset_id:{scaleset.ScalesetId} size:{size}");
|
||||||
var nodes = _context.NodeOperations.SearchStates(scalesetId: scaleset.ScalesetId);
|
{
|
||||||
// TODO: Parallelization opportunity
|
var nodes = _context.NodeOperations.SearchStates(scalesetId: scaleset.ScalesetId);
|
||||||
await foreach (var node in nodes) {
|
// TODO: Parallelization opportunity
|
||||||
await _context.NodeOperations.SetShutdown(node);
|
await nodes.ForEachAwaitAsync(_context.NodeOperations.SetShutdown);
|
||||||
}
|
}
|
||||||
|
|
||||||
_logTracer.Info($"{SCALESET_LOG_PREFIX} checking for existing auto scale settings {scaleset.ScalesetId}");
|
_logTracer.Info($"{SCALESET_LOG_PREFIX} checking for existing auto scale settings {scaleset.ScalesetId}");
|
||||||
|
@ -44,7 +44,7 @@ public class Scheduler : IScheduler {
|
|||||||
foreach (var workUnit in workSet.WorkUnits) {
|
foreach (var workUnit in workSet.WorkUnits) {
|
||||||
var task1 = tasks[workUnit.TaskId];
|
var task1 = tasks[workUnit.TaskId];
|
||||||
Task task = await _taskOperations.SetState(task1, TaskState.Scheduled);
|
Task task = await _taskOperations.SetState(task1, TaskState.Scheduled);
|
||||||
seen.Add(task.TaskId);
|
_ = seen.Add(task.TaskId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -172,16 +172,17 @@ public class Scheduler : IScheduler {
|
|||||||
}
|
}
|
||||||
var setupContainer = task.Config.Containers?.FirstOrDefault(c => c.Type == ContainerType.Setup) ?? throw new Exception($"task missing setup container: task_type = {task.Config.Task.Type}");
|
var setupContainer = task.Config.Containers?.FirstOrDefault(c => c.Type == ContainerType.Setup) ?? throw new Exception($"task missing setup container: task_type = {task.Config.Task.Type}");
|
||||||
|
|
||||||
var setupPs1Exist = _containers.BlobExists(setupContainer.Name, "setup.ps1", StorageType.Corpus);
|
|
||||||
var setupShExist = _containers.BlobExists(setupContainer.Name, "setup.sh", StorageType.Corpus);
|
|
||||||
|
|
||||||
string? setupScript = null;
|
string? setupScript = null;
|
||||||
if (task.Os == Os.Windows && await setupPs1Exist) {
|
if (task.Os == Os.Windows) {
|
||||||
setupScript = "setup.ps1";
|
if (await _containers.BlobExists(setupContainer.Name, "setup.ps1", StorageType.Corpus)) {
|
||||||
|
setupScript = "setup.ps1";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (task.Os == Os.Linux && await setupShExist) {
|
if (task.Os == Os.Linux) {
|
||||||
setupScript = "setup.sh";
|
if (await _containers.BlobExists(setupContainer.Name, "setup.sh", StorageType.Corpus)) {
|
||||||
|
setupScript = "setup.sh";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var reboot = false;
|
var reboot = false;
|
||||||
|
@ -51,11 +51,10 @@ public class Subnet : ISubnet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await _creds.GetResourceGroupResource().GetVirtualNetworks().CreateOrUpdateAsync(
|
_ = await _creds.GetResourceGroupResource().GetVirtualNetworks().CreateOrUpdateAsync(
|
||||||
WaitUntil.Started,
|
WaitUntil.Started,
|
||||||
name,
|
name,
|
||||||
virtualNetParam
|
virtualNetParam);
|
||||||
);
|
|
||||||
} catch (RequestFailedException ex) {
|
} catch (RequestFailedException ex) {
|
||||||
_logTracer.Error($"network creation failed: {name}:{region} {{error}}");
|
_logTracer.Error($"network creation failed: {name}:{region} {{error}}");
|
||||||
return OneFuzzResultVoid.Error(
|
return OneFuzzResultVoid.Error(
|
||||||
|
@ -3,8 +3,15 @@
|
|||||||
public static class ObjectExtention {
|
public static class ObjectExtention {
|
||||||
public static T EnsureNotNull<T>(this T? thisObject, string message) {
|
public static T EnsureNotNull<T>(this T? thisObject, string message) {
|
||||||
if (thisObject == null) {
|
if (thisObject == null) {
|
||||||
throw new ArgumentNullException(message);
|
throw new ArgumentException(message);
|
||||||
}
|
}
|
||||||
|
|
||||||
return thisObject;
|
return thisObject;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Explicitly discards the result value.
|
||||||
|
// In general we should not do this; eventually all call-sites should
|
||||||
|
// be updated.
|
||||||
|
public static Async.Task IgnoreResult<T>(this Async.Task<T> task)
|
||||||
|
=> task;
|
||||||
}
|
}
|
||||||
|
@ -19,49 +19,32 @@ namespace Microsoft.OneFuzz.Service {
|
|||||||
if (Location == null) { // EnsureNotNull does not satisfy the nullability checker
|
if (Location == null) { // EnsureNotNull does not satisfy the nullability checker
|
||||||
throw new ArgumentNullException("Location required for VirtualMachineExtension");
|
throw new ArgumentNullException("Location required for VirtualMachineExtension");
|
||||||
}
|
}
|
||||||
TypePropertiesType.EnsureNotNull("TypePropertiesType required for VirtualMachineExtension");
|
|
||||||
Publisher.EnsureNotNull("Publisher required for VirtualMachineExtension");
|
|
||||||
TypeHandlerVersion.EnsureNotNull("TypeHandlerVersion required for VirtualMachineExtension");
|
|
||||||
AutoUpgradeMinorVersion.EnsureNotNull("AutoUpgradeMinorVersion required for VirtualMachineExtension");
|
|
||||||
|
|
||||||
var settings = Settings ?? new BinaryData(new Dictionary<string, string>());
|
|
||||||
var protectedSettings = ProtectedSettings ?? new BinaryData(new Dictionary<string, string>());
|
|
||||||
|
|
||||||
return (Name!, new VirtualMachineExtensionData(Location.Value) {
|
return (Name!, new VirtualMachineExtensionData(Location.Value) {
|
||||||
TypePropertiesType = TypePropertiesType,
|
TypePropertiesType = TypePropertiesType.EnsureNotNull("TypePropertiesType required for VirtualMachineExtension"),
|
||||||
Publisher = Publisher,
|
Publisher = Publisher.EnsureNotNull("Publisher required for VirtualMachineExtension"),
|
||||||
TypeHandlerVersion = TypeHandlerVersion,
|
TypeHandlerVersion = TypeHandlerVersion.EnsureNotNull("TypeHandlerVersion required for VirtualMachineExtension"),
|
||||||
AutoUpgradeMinorVersion = AutoUpgradeMinorVersion,
|
AutoUpgradeMinorVersion = AutoUpgradeMinorVersion.EnsureNotNull("AutoUpgradeMinorVersion required for VirtualMachineExtension"),
|
||||||
EnableAutomaticUpgrade = EnableAutomaticUpgrade,
|
EnableAutomaticUpgrade = EnableAutomaticUpgrade,
|
||||||
ForceUpdateTag = ForceUpdateTag,
|
ForceUpdateTag = ForceUpdateTag,
|
||||||
Settings = settings,
|
Settings = Settings ?? new BinaryData(new Dictionary<string, string>()),
|
||||||
ProtectedSettings = protectedSettings
|
ProtectedSettings = ProtectedSettings ?? new BinaryData(new Dictionary<string, string>()),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
public VirtualMachineScaleSetExtensionData GetAsVirtualMachineScaleSetExtension() {
|
public VirtualMachineScaleSetExtensionData GetAsVirtualMachineScaleSetExtension() {
|
||||||
Name.EnsureNotNull("Name required for VirtualMachineScaleSetExtension");
|
|
||||||
TypePropertiesType.EnsureNotNull("TypePropertiesType required for VirtualMachineScaleSetExtension");
|
|
||||||
Publisher.EnsureNotNull("Publisher required for VirtualMachineScaleSetExtension");
|
|
||||||
TypeHandlerVersion.EnsureNotNull("TypeHandlerVersion required for VirtualMachineScaleSetExtension");
|
|
||||||
AutoUpgradeMinorVersion.EnsureNotNull("AutoUpgradeMinorVersion required for VirtualMachineScaleSetExtension");
|
|
||||||
|
|
||||||
var settings = Settings ?? new BinaryData(new Dictionary<string, string>());
|
|
||||||
var protectedSettings = ProtectedSettings ?? new BinaryData(new Dictionary<string, string>());
|
|
||||||
|
|
||||||
return new VirtualMachineScaleSetExtensionData() {
|
return new VirtualMachineScaleSetExtensionData() {
|
||||||
Name = Name,
|
Name = Name.EnsureNotNull("Name required for VirtualMachineScaleSetExtension"),
|
||||||
TypePropertiesType = TypePropertiesType,
|
TypePropertiesType = TypePropertiesType.EnsureNotNull("TypePropertiesType required for VirtualMachineScaleSetExtension"),
|
||||||
Publisher = Publisher,
|
Publisher = Publisher.EnsureNotNull("Publisher required for VirtualMachineScaleSetExtension"),
|
||||||
TypeHandlerVersion = TypeHandlerVersion,
|
TypeHandlerVersion = TypeHandlerVersion.EnsureNotNull("TypeHandlerVersion required for VirtualMachineScaleSetExtension"),
|
||||||
AutoUpgradeMinorVersion = AutoUpgradeMinorVersion,
|
AutoUpgradeMinorVersion = AutoUpgradeMinorVersion.EnsureNotNull("AutoUpgradeMinorVersion required for VirtualMachineScaleSetExtension"),
|
||||||
EnableAutomaticUpgrade = EnableAutomaticUpgrade,
|
EnableAutomaticUpgrade = EnableAutomaticUpgrade,
|
||||||
ForceUpdateTag = ForceUpdateTag,
|
ForceUpdateTag = ForceUpdateTag,
|
||||||
Settings = settings,
|
Settings = Settings ?? new BinaryData(new Dictionary<string, string>()),
|
||||||
ProtectedSettings = protectedSettings
|
ProtectedSettings = ProtectedSettings ?? new BinaryData(new Dictionary<string, string>()),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ public class VmOperations : IVmOperations {
|
|||||||
if (nic != null) {
|
if (nic != null) {
|
||||||
_logTracer.Info($"deleting nic {resourceGroup}:{name}");
|
_logTracer.Info($"deleting nic {resourceGroup}:{name}");
|
||||||
if (nic.Data.NetworkSecurityGroup != null && nsg != null) {
|
if (nic.Data.NetworkSecurityGroup != null && nsg != null) {
|
||||||
await _context.NsgOperations.DissociateNic(nsg, nic);
|
_ = await _context.NsgOperations.DissociateNic(nsg, nic);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
await _context.IpOperations.DeleteNic(resourceGroup, name);
|
await _context.IpOperations.DeleteNic(resourceGroup, name);
|
||||||
@ -217,7 +217,7 @@ public class VmOperations : IVmOperations {
|
|||||||
var vm = await _context.Creds.GetResourceGroupResource().GetVirtualMachineAsync(vmName);
|
var vm = await _context.Creds.GetResourceGroupResource().GetVirtualMachineAsync(vmName);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await vm.Value.GetVirtualMachineExtensions().CreateOrUpdateAsync(
|
_ = await vm.Value.GetVirtualMachineExtensions().CreateOrUpdateAsync(
|
||||||
WaitUntil.Started,
|
WaitUntil.Started,
|
||||||
extensionName,
|
extensionName,
|
||||||
extension
|
extension
|
||||||
@ -318,11 +318,10 @@ public class VmOperations : IVmOperations {
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await _context.Creds.GetResourceGroupResource().GetVirtualMachines().CreateOrUpdateAsync(
|
_ = await _context.Creds.GetResourceGroupResource().GetVirtualMachines().CreateOrUpdateAsync(
|
||||||
WaitUntil.Started,
|
WaitUntil.Started,
|
||||||
name,
|
name,
|
||||||
vmParams
|
vmParams);
|
||||||
);
|
|
||||||
} catch (RequestFailedException ex) {
|
} catch (RequestFailedException ex) {
|
||||||
if (ex.ErrorCode == "ResourceNotFound" && ex.Message.Contains("The request failed due to conflict with a concurrent request")) {
|
if (ex.ErrorCode == "ResourceNotFound" && ex.Message.Contains("The request failed due to conflict with a concurrent request")) {
|
||||||
// _logTracer.Debug($"create VM had conflicts with concurrent request, ignoring {ex.ToString()}");
|
// _logTracer.Debug($"create VM had conflicts with concurrent request, ignoring {ex.ToString()}");
|
||||||
|
@ -87,7 +87,7 @@ public class VmssOperations : IVmssOperations {
|
|||||||
var scalesetResource = GetVmssResource(name);
|
var scalesetResource = GetVmssResource(name);
|
||||||
var patch = new VirtualMachineScaleSetPatch();
|
var patch = new VirtualMachineScaleSetPatch();
|
||||||
patch.Sku.Capacity = capacity;
|
patch.Sku.Capacity = capacity;
|
||||||
await scalesetResource.UpdateAsync(WaitUntil.Started, patch);
|
_ = await scalesetResource.UpdateAsync(WaitUntil.Started, patch);
|
||||||
return OneFuzzResultVoid.Ok;
|
return OneFuzzResultVoid.Ok;
|
||||||
} else {
|
} else {
|
||||||
return OneFuzzResultVoid.Error(canUpdate.ErrorV);
|
return OneFuzzResultVoid.Error(canUpdate.ErrorV);
|
||||||
@ -222,7 +222,7 @@ public class VmssOperations : IVmssOperations {
|
|||||||
instanceVm.Data.ProtectionPolicy.ProtectFromScaleIn = protectFromScaleIn;
|
instanceVm.Data.ProtectionPolicy.ProtectFromScaleIn = protectFromScaleIn;
|
||||||
var vmCollection = GetVmssResource(name).GetVirtualMachineScaleSetVms();
|
var vmCollection = GetVmssResource(name).GetVirtualMachineScaleSetVms();
|
||||||
try {
|
try {
|
||||||
await vmCollection.CreateOrUpdateAsync(WaitUntil.Started, instanceVm.Data.InstanceId, instanceVm.Data);
|
_ = await vmCollection.CreateOrUpdateAsync(WaitUntil.Started, instanceVm.Data.InstanceId, instanceVm.Data);
|
||||||
return OneFuzzResultVoid.Ok;
|
return OneFuzzResultVoid.Ok;
|
||||||
} catch {
|
} catch {
|
||||||
var msg = $"unable to set protection policy on: {vmId}:{instanceVm.Id}";
|
var msg = $"unable to set protection policy on: {vmId}:{instanceVm.Id}";
|
||||||
@ -396,7 +396,7 @@ public class VmssOperations : IVmssOperations {
|
|||||||
|
|
||||||
public Async.Task<IReadOnlyList<string>> ListAvailableSkus(Region region)
|
public Async.Task<IReadOnlyList<string>> ListAvailableSkus(Region region)
|
||||||
=> _cache.GetOrCreateAsync<IReadOnlyList<string>>($"compute-skus-{region}", async entry => {
|
=> _cache.GetOrCreateAsync<IReadOnlyList<string>>($"compute-skus-{region}", async entry => {
|
||||||
entry.SetAbsoluteExpiration(TimeSpan.FromMinutes(10));
|
entry = entry.SetAbsoluteExpiration(TimeSpan.FromMinutes(10));
|
||||||
|
|
||||||
var sub = _creds.GetSubscriptionResource();
|
var sub = _creds.GetSubscriptionResource();
|
||||||
var skus = sub.GetResourceSkusAsync(filter: TableClient.CreateQueryFilter($"location eq {region.String}"));
|
var skus = sub.GetResourceSkusAsync(filter: TableClient.CreateQueryFilter($"location eq {region.String}"));
|
||||||
|
@ -65,13 +65,14 @@ public class WebhookOperations : Orm<Webhook>, IWebhookOperations {
|
|||||||
headers["X-Onefuzz-Digest"] = digest;
|
headers["X-Onefuzz-Digest"] = digest;
|
||||||
}
|
}
|
||||||
|
|
||||||
var client = new Request(_httpFactory.CreateClient());
|
using var httpClient = _httpFactory.CreateClient();
|
||||||
|
var client = new Request(httpClient);
|
||||||
_logTracer.Info(data);
|
_logTracer.Info(data);
|
||||||
var response = client.Post(url: webhook.Url, json: data, headers: headers);
|
using var response = await client.Post(url: webhook.Url, json: data, headers: headers);
|
||||||
var result = response.Result;
|
if (response.StatusCode == HttpStatusCode.Accepted) {
|
||||||
if (result.StatusCode == HttpStatusCode.Accepted) {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,13 +170,12 @@ container:{container} filename:{filename}";
|
|||||||
public async Async.Task UpdateExisting(WorkItem item, string notificationInfo) {
|
public async Async.Task UpdateExisting(WorkItem item, string notificationInfo) {
|
||||||
if (_config.OnDuplicate.Comment != null) {
|
if (_config.OnDuplicate.Comment != null) {
|
||||||
var comment = await Render(_config.OnDuplicate.Comment);
|
var comment = await Render(_config.OnDuplicate.Comment);
|
||||||
await _client.AddCommentAsync(
|
_ = await _client.AddCommentAsync(
|
||||||
new CommentCreate() {
|
new CommentCreate() {
|
||||||
Text = comment
|
Text = comment
|
||||||
},
|
},
|
||||||
_project,
|
_project,
|
||||||
(int)(item.Id!)
|
(int)(item.Id!));
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var document = new JsonPatchDocument();
|
var document = new JsonPatchDocument();
|
||||||
@ -209,7 +208,7 @@ container:{container} filename:{filename}";
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (document.Any()) {
|
if (document.Any()) {
|
||||||
await _client.UpdateWorkItemAsync(document, _project, (int)(item.Id!));
|
_ = await _client.UpdateWorkItemAsync(document, _project, (int)(item.Id!));
|
||||||
_logTracer.Info($"notify ado: updated work item {item.Id} - {notificationInfo}");
|
_logTracer.Info($"notify ado: updated work item {item.Id} - {notificationInfo}");
|
||||||
} else {
|
} else {
|
||||||
_logTracer.Info($"notify ado: no update for work item {item.Id} - {notificationInfo}");
|
_logTracer.Info($"notify ado: no update for work item {item.Id} - {notificationInfo}");
|
||||||
@ -228,13 +227,12 @@ container:{container} filename:{filename}";
|
|||||||
|
|
||||||
if (_config.Comment != null) {
|
if (_config.Comment != null) {
|
||||||
var comment = await Render(_config.Comment);
|
var comment = await Render(_config.Comment);
|
||||||
await _client.AddCommentAsync(
|
_ = await _client.AddCommentAsync(
|
||||||
new CommentCreate() {
|
new CommentCreate() {
|
||||||
Text = comment,
|
Text = comment,
|
||||||
},
|
},
|
||||||
_project,
|
_project,
|
||||||
(int)(entry.Id!)
|
(int)(entry.Id!));
|
||||||
);
|
|
||||||
}
|
}
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
@ -114,17 +114,17 @@ public class GithubIssues : NotificationsBase, IGithubIssues {
|
|||||||
private async Async.Task Update(Issue issue) {
|
private async Async.Task Update(Issue issue) {
|
||||||
_logTracer.Info($"updating issue: {issue}");
|
_logTracer.Info($"updating issue: {issue}");
|
||||||
if (_config.OnDuplicate.Comment != null) {
|
if (_config.OnDuplicate.Comment != null) {
|
||||||
await _gh.Issue.Comment.Create(issue.Repository.Id, issue.Number, await Render(_config.OnDuplicate.Comment));
|
_ = await _gh.Issue.Comment.Create(issue.Repository.Id, issue.Number, await Render(_config.OnDuplicate.Comment));
|
||||||
}
|
}
|
||||||
if (_config.OnDuplicate.Labels.Any()) {
|
if (_config.OnDuplicate.Labels.Any()) {
|
||||||
var labels = await _config.OnDuplicate.Labels.ToAsyncEnumerable()
|
var labels = await _config.OnDuplicate.Labels.ToAsyncEnumerable()
|
||||||
.SelectAwait(async label => await Render(label))
|
.SelectAwait(async label => await Render(label))
|
||||||
.ToArrayAsync();
|
.ToArrayAsync();
|
||||||
|
|
||||||
await _gh.Issue.Labels.ReplaceAllForIssue(issue.Repository.Id, issue.Number, labels);
|
_ = await _gh.Issue.Labels.ReplaceAllForIssue(issue.Repository.Id, issue.Number, labels);
|
||||||
}
|
}
|
||||||
if (_config.OnDuplicate.Reopen && issue.State != ItemState.Open) {
|
if (_config.OnDuplicate.Reopen && issue.State != ItemState.Open) {
|
||||||
await _gh.Issue.Update(issue.Repository.Id, issue.Number, new IssueUpdate() {
|
_ = await _gh.Issue.Update(issue.Repository.Id, issue.Number, new IssueUpdate() {
|
||||||
State = ItemState.Open
|
State = ItemState.Open
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -140,7 +140,7 @@ public class GithubIssues : NotificationsBase, IGithubIssues {
|
|||||||
.SelectAwait(async label => await Render(label))
|
.SelectAwait(async label => await Render(label))
|
||||||
.ToHashSetAsync();
|
.ToHashSetAsync();
|
||||||
|
|
||||||
labels.Add("OneFuzz");
|
_ = labels.Add("OneFuzz");
|
||||||
|
|
||||||
var newIssue = new NewIssue(await Render(_config.Title)) {
|
var newIssue = new NewIssue(await Render(_config.Title)) {
|
||||||
Body = await Render(_config.Body),
|
Body = await Render(_config.Body),
|
||||||
@ -149,12 +149,10 @@ public class GithubIssues : NotificationsBase, IGithubIssues {
|
|||||||
labels.ToList().ForEach(label => newIssue.Labels.Add(label));
|
labels.ToList().ForEach(label => newIssue.Labels.Add(label));
|
||||||
assignees.ForEach(assignee => newIssue.Assignees.Add(assignee));
|
assignees.ForEach(assignee => newIssue.Assignees.Add(assignee));
|
||||||
|
|
||||||
await _gh.Issue.Create(
|
_ = await _gh.Issue.Create(
|
||||||
await Render(_config.Organization),
|
await Render(_config.Organization),
|
||||||
await Render(_config.Repository),
|
await Render(_config.Repository),
|
||||||
newIssue
|
newIssue);
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,15 +42,25 @@ public abstract class NotificationsBase {
|
|||||||
private readonly Uri _inputUrl;
|
private readonly Uri _inputUrl;
|
||||||
private readonly Uri _reportUrl;
|
private readonly Uri _reportUrl;
|
||||||
|
|
||||||
public static async Async.Task<Renderer> ConstructRenderer(IOnefuzzContext context, Container container, string filename, Report report, Task? task = null, Job? job = null, Uri? targetUrl = null, Uri? inputUrl = null, Uri? reportUrl = null) {
|
public static async Async.Task<Renderer> ConstructRenderer(
|
||||||
|
IOnefuzzContext context,
|
||||||
|
Container container,
|
||||||
|
string filename,
|
||||||
|
Report report,
|
||||||
|
Task? task = null,
|
||||||
|
Job? job = null,
|
||||||
|
Uri? targetUrl = null,
|
||||||
|
Uri? inputUrl = null,
|
||||||
|
Uri? reportUrl = null) {
|
||||||
|
|
||||||
task ??= await context.TaskOperations.GetByJobIdAndTaskId(report.JobId, report.TaskId);
|
task ??= await context.TaskOperations.GetByJobIdAndTaskId(report.JobId, report.TaskId);
|
||||||
task.EnsureNotNull($"invalid task {report.TaskId}");
|
var checkedTask = task.EnsureNotNull($"invalid task {report.TaskId}");
|
||||||
|
|
||||||
job ??= await context.JobOperations.Get(report.JobId);
|
job ??= await context.JobOperations.Get(report.JobId);
|
||||||
job.EnsureNotNull($"invalid job {report.JobId}");
|
var checkedJob = job.EnsureNotNull($"invalid job {report.JobId}");
|
||||||
|
|
||||||
if (targetUrl == null) {
|
if (targetUrl == null) {
|
||||||
var setupContainer = Scheduler.GetSetupContainer(task?.Config!);
|
var setupContainer = Scheduler.GetSetupContainer(checkedTask.Config);
|
||||||
targetUrl = new Uri(context.Containers.AuthDownloadUrl(setupContainer, ReplaceFirstSetup(report.Executable)));
|
targetUrl = new Uri(context.Containers.AuthDownloadUrl(setupContainer, ReplaceFirstSetup(report.Executable)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,9 +72,25 @@ public abstract class NotificationsBase {
|
|||||||
inputUrl = new Uri(context.Containers.AuthDownloadUrl(report.InputBlob.Container, report.InputBlob.Name));
|
inputUrl = new Uri(context.Containers.AuthDownloadUrl(report.InputBlob.Container, report.InputBlob.Name));
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Renderer(container, filename, report, task!, job!, targetUrl, inputUrl!, reportUrl);
|
return new Renderer(
|
||||||
|
container,
|
||||||
|
filename,
|
||||||
|
report,
|
||||||
|
checkedTask,
|
||||||
|
checkedJob,
|
||||||
|
targetUrl,
|
||||||
|
inputUrl!, // TODO: incorrect
|
||||||
|
reportUrl);
|
||||||
}
|
}
|
||||||
public Renderer(Container container, string filename, Report report, Task task, Job job, Uri targetUrl, Uri inputUrl, Uri reportUrl) {
|
public Renderer(
|
||||||
|
Container container,
|
||||||
|
string filename,
|
||||||
|
Report report,
|
||||||
|
Task task,
|
||||||
|
Job job,
|
||||||
|
Uri targetUrl,
|
||||||
|
Uri inputUrl,
|
||||||
|
Uri reportUrl) {
|
||||||
_report = report;
|
_report = report;
|
||||||
_container = container;
|
_container = container;
|
||||||
_filename = filename;
|
_filename = filename;
|
||||||
|
@ -70,7 +70,7 @@ public sealed class CustomEnumConverter<T> : JsonConverter<T> where T : Enum {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
FormatAndAddToCaches(value, options.Encoder, _skipFormat);
|
_ = FormatAndAddToCaches(value, options.Encoder, _skipFormat);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -248,7 +248,7 @@ public sealed class PolymorphicConverter<T> : JsonConverter<T> {
|
|||||||
var newOptions = new JsonSerializerOptions(k);
|
var newOptions = new JsonSerializerOptions(k);
|
||||||
var thisConverter = newOptions.Converters.FirstOrDefault(c => c.GetType() == typeof(PolymorphicConverterFactory));
|
var thisConverter = newOptions.Converters.FirstOrDefault(c => c.GetType() == typeof(PolymorphicConverterFactory));
|
||||||
if (thisConverter != null) {
|
if (thisConverter != null) {
|
||||||
newOptions.Converters.Remove(thisConverter);
|
_ = newOptions.Converters.Remove(thisConverter);
|
||||||
}
|
}
|
||||||
|
|
||||||
return newOptions;
|
return newOptions;
|
||||||
|
@ -138,7 +138,7 @@ namespace ApiService.OneFuzzLib.Orm {
|
|||||||
|
|
||||||
|
|
||||||
public interface IStatefulOrm<T, TState> : IOrm<T> where T : StatefulEntityBase<TState> where TState : Enum {
|
public interface IStatefulOrm<T, TState> : IOrm<T> where T : StatefulEntityBase<TState> where TState : Enum {
|
||||||
Async.Task<T?> ProcessStateUpdate(T entity);
|
Async.Task<T> ProcessStateUpdate(T entity);
|
||||||
|
|
||||||
Async.Task<T?> ProcessStateUpdates(T entity, int MaxUpdates = 5);
|
Async.Task<T?> ProcessStateUpdates(T entity, int MaxUpdates = 5);
|
||||||
}
|
}
|
||||||
@ -201,7 +201,7 @@ namespace ApiService.OneFuzzLib.Orm {
|
|||||||
/// </summary>
|
/// </summary>
|
||||||
/// <param name="entity"></param>
|
/// <param name="entity"></param>
|
||||||
/// <returns></returns>
|
/// <returns></returns>
|
||||||
public async Async.Task<T?> ProcessStateUpdate(T entity) {
|
public async Async.Task<T> ProcessStateUpdate(T entity) {
|
||||||
TState state = entity.State;
|
TState state = entity.State;
|
||||||
var func = GetType().GetMethod(state.ToString()) switch {
|
var func = GetType().GetMethod(state.ToString()) switch {
|
||||||
null => null,
|
null => null,
|
||||||
@ -214,9 +214,8 @@ namespace ApiService.OneFuzzLib.Orm {
|
|||||||
_logTracer.Info($"processing state update: {typeof(T)} - PartitionKey: {partitionKey} RowKey: {rowKey} - {state}");
|
_logTracer.Info($"processing state update: {typeof(T)} - PartitionKey: {partitionKey} RowKey: {rowKey} - {state}");
|
||||||
return await func(entity);
|
return await func(entity);
|
||||||
} else {
|
} else {
|
||||||
_logTracer.Info($"State function for state: '{state}' not found on type {typeof(T)}");
|
throw new ArgumentException($"State function for state: '{state}' not found on type {typeof(T)}");
|
||||||
}
|
}
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
|
@ -333,6 +333,12 @@
|
|||||||
"resolved": "2.1.0",
|
"resolved": "2.1.0",
|
||||||
"contentHash": "1jUT0PwgKO9d9F/X2n762qLp7v/30OpMtJPFRtmjPXUX2/J0lnqiGiSJNNsW3yYTj5StF0Z1yE36TrvtGpcbrg=="
|
"contentHash": "1jUT0PwgKO9d9F/X2n762qLp7v/30OpMtJPFRtmjPXUX2/J0lnqiGiSJNNsW3yYTj5StF0Z1yE36TrvtGpcbrg=="
|
||||||
},
|
},
|
||||||
|
"SmartAnalyzers.CSharpExtensions.Annotations": {
|
||||||
|
"type": "Direct",
|
||||||
|
"requested": "[4.2.7, )",
|
||||||
|
"resolved": "4.2.7",
|
||||||
|
"contentHash": "9fRFxTUwPmH7lukckwEvvKawMcP8ObwnOngN8kx5Bx773WHSku1EGa5BIteV07th5553il76fPX7U1xz2bFmuQ=="
|
||||||
|
},
|
||||||
"System.IdentityModel.Tokens.Jwt": {
|
"System.IdentityModel.Tokens.Jwt": {
|
||||||
"type": "Direct",
|
"type": "Direct",
|
||||||
"requested": "[6.22.1, )",
|
"requested": "[6.22.1, )",
|
||||||
|
12
src/ApiService/CSharpExtensions.json
Normal file
12
src/ApiService/CSharpExtensions.json
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"$comment1": "See https://github.com/cezarypiatek/CSharpExtensions ",
|
||||||
|
"$comment2": "These types are all 'builders' and okay to ignore the result of",
|
||||||
|
"CSE005": {
|
||||||
|
"IgnoredReturnTypes": [
|
||||||
|
"Microsoft.Azure.Functions.Worker.IFunctionsWorkerApplicationBuilder",
|
||||||
|
"Microsoft.Extensions.DependencyInjection.IServiceCollection",
|
||||||
|
"Moq.Language.Flow.IReturnsResult",
|
||||||
|
"FluentAssertions.AndConstraint"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
@ -50,7 +50,7 @@ public abstract class ContainersTestBase : FunctionTestBase {
|
|||||||
public async Async.Task CanDelete() {
|
public async Async.Task CanDelete() {
|
||||||
var containerName = Container.Parse("test");
|
var containerName = Container.Parse("test");
|
||||||
var client = GetContainerClient(containerName);
|
var client = GetContainerClient(containerName);
|
||||||
await client.CreateIfNotExistsAsync();
|
_ = await client.CreateIfNotExistsAsync();
|
||||||
|
|
||||||
var msg = TestHttpRequestData.FromJson("DELETE", new ContainerDelete(containerName));
|
var msg = TestHttpRequestData.FromJson("DELETE", new ContainerDelete(containerName));
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ public abstract class ContainersTestBase : FunctionTestBase {
|
|||||||
public async Async.Task CanPost_Existing() {
|
public async Async.Task CanPost_Existing() {
|
||||||
var containerName = Container.Parse("test");
|
var containerName = Container.Parse("test");
|
||||||
var client = GetContainerClient(containerName);
|
var client = GetContainerClient(containerName);
|
||||||
await client.CreateIfNotExistsAsync();
|
_ = await client.CreateIfNotExistsAsync();
|
||||||
|
|
||||||
var metadata = new Dictionary<string, string> { { "some", "value" } };
|
var metadata = new Dictionary<string, string> { { "some", "value" } };
|
||||||
var msg = TestHttpRequestData.FromJson("POST", new ContainerCreate(containerName, metadata));
|
var msg = TestHttpRequestData.FromJson("POST", new ContainerCreate(containerName, metadata));
|
||||||
@ -113,7 +113,7 @@ public abstract class ContainersTestBase : FunctionTestBase {
|
|||||||
var containerName = Container.Parse("test");
|
var containerName = Container.Parse("test");
|
||||||
{
|
{
|
||||||
var client = GetContainerClient(containerName);
|
var client = GetContainerClient(containerName);
|
||||||
await client.CreateIfNotExistsAsync();
|
_ = await client.CreateIfNotExistsAsync();
|
||||||
}
|
}
|
||||||
|
|
||||||
var msg = TestHttpRequestData.FromJson("GET", new ContainerGet(containerName));
|
var msg = TestHttpRequestData.FromJson("GET", new ContainerGet(containerName));
|
||||||
@ -143,8 +143,8 @@ public abstract class ContainersTestBase : FunctionTestBase {
|
|||||||
public async Async.Task List_Existing() {
|
public async Async.Task List_Existing() {
|
||||||
var meta1 = new Dictionary<string, string> { { "key1", "value1" } };
|
var meta1 = new Dictionary<string, string> { { "key1", "value1" } };
|
||||||
var meta2 = new Dictionary<string, string> { { "key2", "value2" } };
|
var meta2 = new Dictionary<string, string> { { "key2", "value2" } };
|
||||||
await GetContainerClient(Container.Parse("one")).CreateIfNotExistsAsync(metadata: meta1);
|
_ = await GetContainerClient(Container.Parse("one")).CreateIfNotExistsAsync(metadata: meta1);
|
||||||
await GetContainerClient(Container.Parse("two")).CreateIfNotExistsAsync(metadata: meta2);
|
_ = await GetContainerClient(Container.Parse("two")).CreateIfNotExistsAsync(metadata: meta2);
|
||||||
|
|
||||||
var msg = TestHttpRequestData.Empty("GET"); // this means list all
|
var msg = TestHttpRequestData.Empty("GET"); // this means list all
|
||||||
|
|
||||||
@ -166,12 +166,12 @@ public abstract class ContainersTestBase : FunctionTestBase {
|
|||||||
|
|
||||||
private static async Async.Task AssertCanCRUD(Uri sasUrl) {
|
private static async Async.Task AssertCanCRUD(Uri sasUrl) {
|
||||||
var client = new BlobContainerClient(sasUrl);
|
var client = new BlobContainerClient(sasUrl);
|
||||||
await client.UploadBlobAsync("blob", new BinaryData("content")); // create
|
_ = await client.UploadBlobAsync("blob", new BinaryData("content")); // create
|
||||||
var b = Assert.Single(await client.GetBlobsAsync().ToListAsync()); // list
|
var b = Assert.Single(await client.GetBlobsAsync().ToListAsync()); // list
|
||||||
using (var s = await client.GetBlobClient(b.Name).OpenReadAsync())
|
using (var s = await client.GetBlobClient(b.Name).OpenReadAsync())
|
||||||
using (var sr = new StreamReader(s)) {
|
using (var sr = new StreamReader(s)) {
|
||||||
Assert.Equal("content", await sr.ReadToEndAsync()); // read
|
Assert.Equal("content", await sr.ReadToEndAsync()); // read
|
||||||
}
|
}
|
||||||
await client.DeleteBlobAsync("blob"); // delete
|
using var r = await client.DeleteBlobAsync("blob"); // delete
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,8 +74,8 @@ public abstract class DownloadTestBase : FunctionTestBase {
|
|||||||
// set up a file to download
|
// set up a file to download
|
||||||
var containerName = Container.Parse("xxx");
|
var containerName = Container.Parse("xxx");
|
||||||
var container = GetContainerClient(containerName);
|
var container = GetContainerClient(containerName);
|
||||||
await container.CreateAsync();
|
_ = await container.CreateAsync();
|
||||||
await container.UploadBlobAsync("yyy", new BinaryData("content"));
|
_ = await container.UploadBlobAsync("yyy", new BinaryData("content"));
|
||||||
|
|
||||||
var req = TestHttpRequestData.Empty("GET");
|
var req = TestHttpRequestData.Empty("GET");
|
||||||
var url = new UriBuilder(req.Url) { Query = "container=xxx&filename=yyy" }.Uri;
|
var url = new UriBuilder(req.Url) { Query = "container=xxx&filename=yyy" }.Uri;
|
||||||
|
@ -50,8 +50,8 @@ public abstract class InfoTestBase : FunctionTestBase {
|
|||||||
var instanceId = Guid.NewGuid().ToString();
|
var instanceId = Guid.NewGuid().ToString();
|
||||||
var baseConfigContainer = WellKnownContainers.BaseConfig;
|
var baseConfigContainer = WellKnownContainers.BaseConfig;
|
||||||
var containerClient = GetContainerClient(baseConfigContainer);
|
var containerClient = GetContainerClient(baseConfigContainer);
|
||||||
await containerClient.CreateAsync();
|
_ = await containerClient.CreateAsync();
|
||||||
await containerClient.GetBlobClient("instance_id").UploadAsync(new BinaryData(instanceId));
|
_ = await containerClient.GetBlobClient("instance_id").UploadAsync(new BinaryData(instanceId));
|
||||||
|
|
||||||
var auth = new TestEndpointAuthorization(RequestType.User, Logger, Context);
|
var auth = new TestEndpointAuthorization(RequestType.User, Logger, Context);
|
||||||
var func = new Info(auth, Context);
|
var func = new Info(auth, Context);
|
||||||
|
@ -23,4 +23,7 @@
|
|||||||
<ProjectReference Include="..\ApiService\ApiService.csproj" />
|
<ProjectReference Include="..\ApiService\ApiService.csproj" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
|
||||||
|
<ItemGroup>
|
||||||
|
<AdditionalFiles Include="../CSharpExtensions.json" />
|
||||||
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
||||||
|
@ -70,7 +70,7 @@ public abstract class FunctionTestBase : IAsyncLifetime {
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected static string BodyAsString(HttpResponseData data) {
|
protected static string BodyAsString(HttpResponseData data) {
|
||||||
data.Body.Seek(0, SeekOrigin.Begin);
|
_ = data.Body.Seek(0, SeekOrigin.Begin);
|
||||||
using var sr = new StreamReader(data.Body);
|
using var sr = new StreamReader(data.Body);
|
||||||
return sr.ReadToEnd();
|
return sr.ReadToEnd();
|
||||||
}
|
}
|
||||||
@ -85,7 +85,7 @@ public abstract class FunctionTestBase : IAsyncLifetime {
|
|||||||
.Where(c => c.IsDeleted != true)
|
.Where(c => c.IsDeleted != true)
|
||||||
.Select(async container => {
|
.Select(async container => {
|
||||||
try {
|
try {
|
||||||
await blobClient.DeleteBlobContainerAsync(container.Name);
|
using var _ = await blobClient.DeleteBlobContainerAsync(container.Name);
|
||||||
Logger.Info($"cleaned up container {container.Name}");
|
Logger.Info($"cleaned up container {container.Name}");
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
// swallow any exceptions: this is a best-effort attempt to cleanup
|
// swallow any exceptions: this is a best-effort attempt to cleanup
|
||||||
@ -100,7 +100,7 @@ public abstract class FunctionTestBase : IAsyncLifetime {
|
|||||||
.QueryAsync(filter: Query.StartsWith("TableName", _storagePrefix))
|
.QueryAsync(filter: Query.StartsWith("TableName", _storagePrefix))
|
||||||
.Select(async table => {
|
.Select(async table => {
|
||||||
try {
|
try {
|
||||||
await tableClient.DeleteTableAsync(table.Name);
|
using var _ = await tableClient.DeleteTableAsync(table.Name);
|
||||||
Logger.Info($"cleaned up table {table.Name}");
|
Logger.Info($"cleaned up table {table.Name}");
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
// swallow any exceptions: this is a best-effort attempt to cleanup
|
// swallow any exceptions: this is a best-effort attempt to cleanup
|
||||||
|
@ -1185,6 +1185,11 @@
|
|||||||
"resolved": "2.1.0",
|
"resolved": "2.1.0",
|
||||||
"contentHash": "1jUT0PwgKO9d9F/X2n762qLp7v/30OpMtJPFRtmjPXUX2/J0lnqiGiSJNNsW3yYTj5StF0Z1yE36TrvtGpcbrg=="
|
"contentHash": "1jUT0PwgKO9d9F/X2n762qLp7v/30OpMtJPFRtmjPXUX2/J0lnqiGiSJNNsW3yYTj5StF0Z1yE36TrvtGpcbrg=="
|
||||||
},
|
},
|
||||||
|
"SmartAnalyzers.CSharpExtensions.Annotations": {
|
||||||
|
"type": "Transitive",
|
||||||
|
"resolved": "4.2.7",
|
||||||
|
"contentHash": "9fRFxTUwPmH7lukckwEvvKawMcP8ObwnOngN8kx5Bx773WHSku1EGa5BIteV07th5553il76fPX7U1xz2bFmuQ=="
|
||||||
|
},
|
||||||
"System.AppContext": {
|
"System.AppContext": {
|
||||||
"type": "Transitive",
|
"type": "Transitive",
|
||||||
"resolved": "4.3.0",
|
"resolved": "4.3.0",
|
||||||
@ -2309,6 +2314,7 @@
|
|||||||
"Octokit": "2.0.1",
|
"Octokit": "2.0.1",
|
||||||
"Scriban": "5.5.0",
|
"Scriban": "5.5.0",
|
||||||
"Semver": "2.1.0",
|
"Semver": "2.1.0",
|
||||||
|
"SmartAnalyzers.CSharpExtensions.Annotations": "4.2.7",
|
||||||
"System.IdentityModel.Tokens.Jwt": "6.22.1",
|
"System.IdentityModel.Tokens.Jwt": "6.22.1",
|
||||||
"System.Linq.Async": "6.0.1",
|
"System.Linq.Async": "6.0.1",
|
||||||
"TaskTupleAwaiter": "2.0.0"
|
"TaskTupleAwaiter": "2.0.0"
|
||||||
|
@ -691,7 +691,7 @@ namespace Tests {
|
|||||||
ITestOutputHelper _output;
|
ITestOutputHelper _output;
|
||||||
|
|
||||||
public OrmModelsTest(ITestOutputHelper output) {
|
public OrmModelsTest(ITestOutputHelper output) {
|
||||||
Arb.Register<OrmArb>();
|
_ = Arb.Register<OrmArb>();
|
||||||
_output = output;
|
_output = output;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -773,7 +773,7 @@ namespace Tests {
|
|||||||
ITestOutputHelper _output;
|
ITestOutputHelper _output;
|
||||||
|
|
||||||
public OrmJsonSerialization(ITestOutputHelper output) {
|
public OrmJsonSerialization(ITestOutputHelper output) {
|
||||||
Arb.Register<OrmArb>();
|
_ = Arb.Register<OrmArb>();
|
||||||
_output = output;
|
_output = output;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,10 +141,10 @@ namespace Tests {
|
|||||||
|
|
||||||
var json = JsonNode.Parse(tableEntity.GetString("the_object"))?.AsObject() ?? throw new InvalidOperationException("Could not parse objec");
|
var json = JsonNode.Parse(tableEntity.GetString("the_object"))?.AsObject() ?? throw new InvalidOperationException("Could not parse objec");
|
||||||
|
|
||||||
json.TryGetPropertyValue("the_name", out var theName);
|
Assert.True(json.TryGetPropertyValue("the_name", out var theName));
|
||||||
json.TryGetPropertyValue("the_enum", out var theEnum);
|
Assert.True(json.TryGetPropertyValue("the_enum", out var theEnum));
|
||||||
json.TryGetPropertyValue("the_flag", out var theFlag);
|
Assert.True(json.TryGetPropertyValue("the_flag", out var theFlag));
|
||||||
json.TryGetPropertyValue("the_enum_value", out var theEnumValue);
|
Assert.True(json.TryGetPropertyValue("the_enum_value", out var theEnumValue));
|
||||||
|
|
||||||
Assert.Equal(entity1.TheObject.TheName, theName?.GetValue<string>());
|
Assert.Equal(entity1.TheObject.TheName, theName?.GetValue<string>());
|
||||||
Assert.Equal("the_two", theEnum?.GetValue<string>());
|
Assert.Equal("the_two", theEnum?.GetValue<string>());
|
||||||
|
@ -59,9 +59,9 @@ public class RequestAccessTests {
|
|||||||
|
|
||||||
[Fact]
|
[Fact]
|
||||||
public void TestAddingRuleOnSamePath() {
|
public void TestAddingRuleOnSamePath() {
|
||||||
Assert.Throws<RuleConflictException>(() => {
|
_ = Assert.Throws<RuleConflictException>(() => {
|
||||||
var guid1 = Guid.NewGuid();
|
var guid1 = Guid.NewGuid();
|
||||||
RequestAccess.Build(
|
_ = RequestAccess.Build(
|
||||||
new Dictionary<string, ApiAccessRule>{
|
new Dictionary<string, ApiAccessRule>{
|
||||||
{ "a/b/c", new ApiAccessRule(
|
{ "a/b/c", new ApiAccessRule(
|
||||||
Methods: new[]{"get"},
|
Methods: new[]{"get"},
|
||||||
|
@ -129,7 +129,7 @@ public class SchedulerTests {
|
|||||||
Assert.Equal(buckets.Count, bucketCount);
|
Assert.Equal(buckets.Count, bucketCount);
|
||||||
|
|
||||||
foreach (var task in tasks) {
|
foreach (var task in tasks) {
|
||||||
Assert.Single(buckets, b => b.Contains(task));
|
_ = Assert.Single(buckets, b => b.Contains(task));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,4 +25,7 @@
|
|||||||
<ProjectReference Include="..\ApiService\ApiService.csproj" />
|
<ProjectReference Include="..\ApiService\ApiService.csproj" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
|
||||||
|
<ItemGroup>
|
||||||
|
<AdditionalFiles Include="../CSharpExtensions.json" />
|
||||||
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
||||||
|
@ -1234,6 +1234,11 @@
|
|||||||
"resolved": "2.1.0",
|
"resolved": "2.1.0",
|
||||||
"contentHash": "1jUT0PwgKO9d9F/X2n762qLp7v/30OpMtJPFRtmjPXUX2/J0lnqiGiSJNNsW3yYTj5StF0Z1yE36TrvtGpcbrg=="
|
"contentHash": "1jUT0PwgKO9d9F/X2n762qLp7v/30OpMtJPFRtmjPXUX2/J0lnqiGiSJNNsW3yYTj5StF0Z1yE36TrvtGpcbrg=="
|
||||||
},
|
},
|
||||||
|
"SmartAnalyzers.CSharpExtensions.Annotations": {
|
||||||
|
"type": "Transitive",
|
||||||
|
"resolved": "4.2.7",
|
||||||
|
"contentHash": "9fRFxTUwPmH7lukckwEvvKawMcP8ObwnOngN8kx5Bx773WHSku1EGa5BIteV07th5553il76fPX7U1xz2bFmuQ=="
|
||||||
|
},
|
||||||
"System.AppContext": {
|
"System.AppContext": {
|
||||||
"type": "Transitive",
|
"type": "Transitive",
|
||||||
"resolved": "4.3.0",
|
"resolved": "4.3.0",
|
||||||
@ -2436,6 +2441,7 @@
|
|||||||
"Octokit": "2.0.1",
|
"Octokit": "2.0.1",
|
||||||
"Scriban": "5.5.0",
|
"Scriban": "5.5.0",
|
||||||
"Semver": "2.1.0",
|
"Semver": "2.1.0",
|
||||||
|
"SmartAnalyzers.CSharpExtensions.Annotations": "4.2.7",
|
||||||
"System.IdentityModel.Tokens.Jwt": "6.22.1",
|
"System.IdentityModel.Tokens.Jwt": "6.22.1",
|
||||||
"System.Linq.Async": "6.0.1",
|
"System.Linq.Async": "6.0.1",
|
||||||
"TaskTupleAwaiter": "2.0.0"
|
"TaskTupleAwaiter": "2.0.0"
|
||||||
|
Reference in New Issue
Block a user