Enable .NET functions in check-pr for Agent-specific functions (#2119)

Enable the .NET functions for the agent by sending the agent the URI for the `-net` service.

Also fix some things causing failures when using the .NET functions (`CouldShrinkScaleset` was not implemented).

Improve error handling around table serialization/deserialization, fix an issue with int64/long mismatch between Python & C# code.

----

For `check-pr` testing:

1. There's a new parameter `enable_dotnet` which maps directly to the `--enable_dotnet` switch on `deploy.py`.
2. If you put `agent` there, all the `agent_*` functions will be enabled for .NET and disabled for Python.
3. If `agent_can_schedule` is disabled on the Python side, it will automatically tell the agent to use the .NET functions.

So to test the .NET agent functions, do a `check-pr` run with `enable_dotnet` set to `agent` and it should all work.
This commit is contained in:
George Pollard
2022-07-21 08:40:30 +12:00
committed by GitHub
parent b1a3e7530a
commit 4fa6e74241
19 changed files with 184 additions and 129 deletions

View File

@ -36,26 +36,27 @@ public class AgentCanSchedule {
ErrorCode.UNABLE_TO_FIND,
new string[] {
"unable to find node"
}
),
canScheduleRequest.MachineId.ToString()
);
}),
canScheduleRequest.MachineId.ToString());
}
var allowed = true;
var workStopped = false;
if (!await _context.NodeOperations.CanProcessNewWork(node)) {
allowed = false;
}
var task = await _context.TaskOperations.GetByTaskId(canScheduleRequest.TaskId);
workStopped = task == null || task.State.ShuttingDown();
var workStopped = task == null || task.State.ShuttingDown();
if (workStopped) {
allowed = false;
}
if (allowed) {
allowed = (await _context.NodeOperations.AcquireScaleInProtection(node)).IsOk;
}
return await RequestHandling.Ok(req, new CanSchedule(allowed, workStopped));
return await RequestHandling.Ok(req, new CanSchedule(Allowed: allowed, WorkStopped: workStopped));
}
}

View File

@ -79,7 +79,7 @@ public class AgentEvents {
return null;
}
if (_context.NodeOperations.CouldShrinkScaleset(node)) {
if (await _context.NodeOperations.CouldShrinkScaleset(node)) {
_log.Info($"stopping free node to resize scaleset: {machineId}");
await _context.NodeOperations.SetHalt(node);
return null;

View File

@ -52,9 +52,16 @@ public record NodeCommandAddSshKey(string PublicKey);
public record NodeCommand
(
[property: JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
StopNodeCommand? Stop = default,
[property: JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
StopTaskNodeCommand? StopTask = default,
[property: JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
NodeCommandAddSshKey? AddSshKey = default,
[property: JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
NodeCommandStopIfFree? StopIfFree = default
);
@ -158,15 +165,15 @@ public record UserInfo(Guid? ApplicationId, Guid? ObjectId, String? Upn);
public record TaskDetails(
TaskType Type,
int Duration,
long Duration,
string? TargetExe = null,
Dictionary<string, string>? TargetEnv = null,
List<string>? TargetOptions = null,
int? TargetWorkers = null,
long? TargetWorkers = null,
bool? TargetOptionsMerge = null,
bool? CheckAsanLog = null,
bool? CheckDebugger = null,
int? CheckRetryCount = null,
long? CheckRetryCount = null,
bool? CheckFuzzerHelp = null,
bool? ExpectCrashOnFailure = null,
bool? RenameOutput = null,
@ -184,11 +191,11 @@ public record TaskDetails(
string? StatsFile = null,
StatsFormat? StatsFormat = null,
bool? RebootAfterSetup = null,
int? TargetTimeout = null,
int? EnsembleSyncDelay = null,
long? TargetTimeout = null,
long? EnsembleSyncDelay = null,
bool? PreserveExistingOutputs = null,
List<string>? ReportList = null,
int? MinimizedStackDepth = null,
long? MinimizedStackDepth = null,
string? CoverageFilter = null
);
@ -197,12 +204,12 @@ public record TaskVm(
string Sku,
string Image,
bool? RebootAfterSetup,
int Count = 1,
long Count = 1,
bool SpotInstance = false
);
public record TaskPool(
int Count,
long Count,
PoolName PoolName
);
@ -390,7 +397,6 @@ public record Scaleset(
Guid? ClientId,
Guid? ClientObjectId,
Dictionary<string, string> Tags
) : StatefulEntityBase<ScalesetState>(State);
[JsonConverter(typeof(ContainerConverter))]
@ -433,7 +439,7 @@ public record Report(
string? AsanLog,
Guid TaskId,
Guid JobId,
int? ScarinessScore,
long? ScarinessScore,
string? ScarinessDescription,
List<string>? MinimizedStack,
string? MinimizedStackSha256,
@ -492,7 +498,7 @@ public record Repro(
public record ReproConfig(
Container Container,
string Path,
int Duration
long Duration
);
// Skipping AutoScaleConfig because it's not used anymore
@ -572,7 +578,7 @@ public record JobConfig(
string Project,
string Name,
string Build,
int Duration,
long Duration,
string? Logs
);
@ -624,7 +630,7 @@ public class TaskUnitConfigConverter : JsonConverter<TaskUnitConfig> {
public record VmDefinition(
Compare Compare,
int Value
long Value
);
public record TaskDefinition(
@ -648,7 +654,7 @@ public record WorkSet(
public record ContainerDefinition(
ContainerType Type,
Compare Compare,
int Value,
long Value,
ContainerPermission Permissions);
@ -702,12 +708,12 @@ public record TaskUnitConfig(
public string? TargetExe { get; set; }
public Dictionary<string, string>? TargetEnv { get; set; }
public List<string>? TargetOptions { get; set; }
public int? TargetTimeout { get; set; }
public long? TargetTimeout { get; set; }
public bool? TargetOptionsMerge { get; set; }
public int? TargetWorkers { get; set; }
public long? TargetWorkers { get; set; }
public bool? CheckAsanLog { get; set; }
public bool? CheckDebugger { get; set; }
public int? CheckRetryCount { get; set; }
public long? CheckRetryCount { get; set; }
public bool? CheckFuzzerHelp { get; set; }
public bool? ExpectCrashOnFailure { get; set; }
public bool? RenameOutput { get; set; }
@ -720,9 +726,9 @@ public record TaskUnitConfig(
public List<string>? AnalyzerOptions { get; set; }
public string? StatsFile { get; set; }
public StatsFormat? StatsFormat { get; set; }
public int? EnsembleSyncDelay { get; set; }
public long? EnsembleSyncDelay { get; set; }
public List<string>? ReportList { get; set; }
public int? MinimizedStackDepth { get; set; }
public long? MinimizedStackDepth { get; set; }
public string? CoverageFilter { get; set; }
// from here forwards are Container definitions. These need to be inline

View File

@ -81,7 +81,10 @@ public record NodeSettingUpEventData(
) : NodeStateData;
public record NodeDoneEventData(
[property: JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
string? Error,
[property: JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
ProcessOutput? ScriptOutput
) : NodeStateData;

View File

@ -39,6 +39,8 @@ public interface IServiceConfig {
public string OneFuzzVersion { get; }
public string? OneFuzzAllowOutdatedAgent { get; }
// Prefix to add to the name of any tables & containers created. This allows
// multiple instances to run against the same storage account, which
// is useful for things like integration testing.
@ -85,6 +87,7 @@ public class ServiceConfiguration : IServiceConfig {
public string? OneFuzzResourceGroup { get => Environment.GetEnvironmentVariable("ONEFUZZ_RESOURCE_GROUP"); }
public string? OneFuzzTelemetry { get => Environment.GetEnvironmentVariable("ONEFUZZ_TELEMETRY"); }
public string OneFuzzVersion { get => Environment.GetEnvironmentVariable("ONEFUZZ_VERSION") ?? "0.0.0"; }
public string? OneFuzzAllowOutdatedAgent => Environment.GetEnvironmentVariable("ONEFUZZ_ALLOW_OUTDATED_AGENT");
public string OneFuzzNodeDisposalStrategy { get => Environment.GetEnvironmentVariable("ONEFUZZ_NODE_DISPOSAL_STRATEGY") ?? "scale_in"; }
public string OneFuzzStoragePrefix => ""; // in production we never prefix the tables

View File

@ -86,7 +86,7 @@ namespace ApiService.TestHooks {
var s = await req.ReadAsStringAsync();
var node = JsonSerializer.Deserialize<Node>(s!, EntityConverter.GetJsonSerializerOptions());
var r = _nodeOps.CouldShrinkScaleset(node!);
var r = await _nodeOps.CouldShrinkScaleset(node!);
var resp = req.CreateResponse(HttpStatusCode.OK);
await resp.WriteAsJsonAsync(r);
return resp;

View File

@ -93,9 +93,9 @@ public class Creds : ICreds {
return rg.Value.Data.Location.Name;
}
public Uri GetInstanceUrl() {
return new Uri($"https://{GetInstanceName()}.azurewebsites.net");
}
public Uri GetInstanceUrl()
// TODO: remove -net when promoted to main version
=> new($"https://{GetInstanceName()}-net.azurewebsites.net");
public record ScaleSetIdentity(string principalId);

View File

@ -14,7 +14,7 @@ public interface INodeOperations : IStatefulOrm<Node, NodeState> {
bool IsOutdated(Node node);
Async.Task Stop(Node node, bool done = false);
bool IsTooOld(Node node);
bool CouldShrinkScaleset(Node node);
Task<bool> CouldShrinkScaleset(Node node);
Async.Task SetHalt(Node node);
Async.Task SetState(Node node, NodeState state);
Async.Task ToReimage(Node node, bool done = false);
@ -65,10 +65,11 @@ public class NodeOperations : StatefulOrm<Node, NodeState, NodeOperations>, INod
}
public async Task<OneFuzzResultVoid> AcquireScaleInProtection(Node node) {
if (await ScalesetNodeExists(node) && node.ScalesetId != null) {
if (await ScalesetNodeExists(node) && node.ScalesetId is Guid scalesetId) {
_logTracer.Info($"Setting scale-in protection on node {node.MachineId}");
return await _context.VmssOperations.UpdateScaleInProtection((Guid)node.ScalesetId, node.MachineId, protectFromScaleIn: true);
return await _context.VmssOperations.UpdateScaleInProtection(scalesetId, node.MachineId, protectFromScaleIn: true);
}
return OneFuzzResultVoid.Ok;
}
@ -88,7 +89,7 @@ public class NodeOperations : StatefulOrm<Node, NodeState, NodeOperations>, INod
}
public async Task<bool> CanProcessNewWork(Node node) {
if (IsOutdated(node)) {
if (IsOutdated(node) && _context.ServiceConfiguration.OneFuzzAllowOutdatedAgent != "true") {
_logTracer.Info($"can_process_new_work agent and service versions differ, stopping node. machine_id:{node.MachineId} agent_version:{node.Version} service_version:{_context.ServiceConfiguration.OneFuzzVersion}");
await Stop(node, done: true);
return false;
@ -122,7 +123,7 @@ public class NodeOperations : StatefulOrm<Node, NodeState, NodeOperations>, INod
return false;
}
if (CouldShrinkScaleset(node)) {
if (await CouldShrinkScaleset(node)) {
_logTracer.Info($"can_process_new_work node scheduled to shrink. machine_id:{node.MachineId}");
await SetHalt(node);
return false;
@ -286,8 +287,22 @@ public class NodeOperations : StatefulOrm<Node, NodeState, NodeOperations>, INod
&& node.InitializedAt < DateTime.UtcNow - INodeOperations.NODE_REIMAGE_TIME;
}
public bool CouldShrinkScaleset(Node node) {
throw new NotImplementedException();
public async Task<bool> CouldShrinkScaleset(Node node) {
if (node.ScalesetId is Guid scalesetId) {
var queue = new ShrinkQueue(scalesetId, _context.Queue, _logTracer);
if (await queue.ShouldShrink()) {
return true;
}
}
if (node.PoolId is Guid poolId) {
var queue = new ShrinkQueue(poolId, _context.Queue, _logTracer);
if (await queue.ShouldShrink()) {
return true;
}
}
return false;
}
public async Async.Task SetState(Node node, NodeState state) {

View File

@ -57,13 +57,13 @@ public class Scheduler : IScheduler {
}
}
private async Async.Task<bool> ScheduleWorkset(WorkSet workSet, Pool pool, int count) {
private async Async.Task<bool> ScheduleWorkset(WorkSet workSet, Pool pool, long count) {
if (!PoolStateHelper.Available.Contains(pool.State)) {
_logTracer.Info($"pool not available for work: {pool.Name} state: {pool.State}");
return false;
}
for (var i = 0; i < count; i++) {
for (var i = 0L; i < count; i++) {
if (!await _poolOperations.ScheduleWorkset(pool, workSet)) {
_logTracer.Error($"unable to schedule workset. pool:{pool.Name} workset: {workSet}");
return false;
@ -118,7 +118,7 @@ public class Scheduler : IScheduler {
}
record BucketConfig(int count, bool reboot, Container setupContainer, string? setupScript, Pool pool);
record BucketConfig(long count, bool reboot, Container setupContainer, string? setupScript, Pool pool);
private async Async.Task<(BucketConfig, WorkUnit)?> BuildWorkunit(Task task) {
Pool? pool = await _taskOperations.GetPool(task);
@ -151,7 +151,7 @@ public class Scheduler : IScheduler {
}
var reboot = false;
var count = 1;
var count = 1L;
if (task.Config.Pool != null) {
count = task.Config.Pool.Count;
reboot = task.Config.Task.RebootAfterSetup ?? false;

View File

@ -15,8 +15,9 @@ public class ShrinkQueue {
}
public override string ToString() {
return $"to-shrink-{_baseId.ToString("N")}";
return $"to-shrink-{_baseId:N}";
}
public string QueueName => this.ToString();
public async Async.Task Clear() {

View File

@ -19,10 +19,8 @@ public interface IVmssOperations {
public class VmssOperations : IVmssOperations {
string INSTANCE_NOT_FOUND = " is not an active Virtual Machine Scale Set VM instanceId.";
ILogTracer _log;
ICreds _creds;
readonly ILogTracer _log;
readonly ICreds _creds;
public VmssOperations(ILogTracer log, ICreds creds) {
_log = log;
@ -144,46 +142,27 @@ public class VmssOperations : IVmssOperations {
}
}
public async Async.Task<OneFuzzResultVoid> UpdateScaleInProtection(Guid name, Guid vmId, bool protectFromScaleIn) {
var res = await GetInstanceVm(name, vmId);
if (!res.IsOk) {
return OneFuzzResultVoid.Error(res.ErrorV);
} else {
VirtualMachineScaleSetVmProtectionPolicy newProtectionPolicy;
var instanceVm = res.OkV!;
if (instanceVm.Data.ProtectionPolicy is not null) {
newProtectionPolicy = instanceVm.Data.ProtectionPolicy;
newProtectionPolicy.ProtectFromScaleIn = protectFromScaleIn;
} else {
newProtectionPolicy = new VirtualMachineScaleSetVmProtectionPolicy() { ProtectFromScaleIn = protectFromScaleIn };
}
instanceVm.Data.ProtectionPolicy = newProtectionPolicy;
var scaleSet = GetVmssResource(name);
var vmCollection = scaleSet.GetVirtualMachineScaleSetVms();
try {
var r = await vmCollection.CreateOrUpdateAsync(WaitUntil.Started, instanceVm.Data.InstanceId, instanceVm.Data);
if (r.GetRawResponse().IsError) {
var msg = $"failed to update scale in protection on vm {vmId} for scaleset {name}";
_log.WithHttpStatus((r.GetRawResponse().Status, r.GetRawResponse().ReasonPhrase)).Error(msg);
var instanceVm = res.OkV;
instanceVm.Data.ProtectionPolicy ??= new();
if (instanceVm.Data.ProtectionPolicy.ProtectFromScaleIn != protectFromScaleIn) {
instanceVm.Data.ProtectionPolicy.ProtectFromScaleIn = protectFromScaleIn;
var vmCollection = GetVmssResource(name).GetVirtualMachineScaleSetVms();
try {
await vmCollection.CreateOrUpdateAsync(WaitUntil.Started, instanceVm.Data.InstanceId, instanceVm.Data);
return OneFuzzResultVoid.Ok;
} catch {
var msg = $"unable to set protection policy on: {vmId}:{instanceVm.Id}";
return OneFuzzResultVoid.Error(ErrorCode.UNABLE_TO_UPDATE, msg);
} else {
return OneFuzzResultVoid.Ok;
}
} catch (Exception ex) when (ex is RequestFailedException || ex is CloudException) {
if (ex.Message.Contains(INSTANCE_NOT_FOUND) && protectFromScaleIn == false) {
_log.Info($"Tried to remove scale in protection on node {name} {vmId} but instance no longer exists");
return OneFuzzResultVoid.Ok;
} else {
var msg = $"failed to update scale in protection on vm {vmId} for scaleset {name}";
_log.Exception(ex, msg);
return OneFuzzResultVoid.Error(ErrorCode.UNABLE_TO_UPDATE, ex.Message);
}
} else {
_log.Info($"scale in protection was already set to {protectFromScaleIn} on vm {vmId} for scaleset {name}");
return OneFuzzResultVoid.Ok;
}
}
}
}

View File

@ -199,8 +199,8 @@ public class EntityConverter {
|| prop.type == typeof(DateTimeOffset?)
|| prop.type == typeof(int)
|| prop.type == typeof(int?)
|| prop.type == typeof(Int64)
|| prop.type == typeof(Int64?)
|| prop.type == typeof(long)
|| prop.type == typeof(long?)
|| prop.type == typeof(double)
|| prop.type == typeof(double?)
@ -253,51 +253,61 @@ public class EntityConverter {
_ => null,
};
}
var objType = obj.GetType();
if (ef.type == typeof(string)) {
return entity.GetString(fieldName);
} else if (ef.type == typeof(bool) || ef.type == typeof(bool?)) {
return entity.GetBoolean(fieldName);
} else if (ef.type == typeof(DateTimeOffset) || ef.type == typeof(DateTimeOffset?)) {
return entity.GetDateTimeOffset(fieldName);
} else if (ef.type == typeof(DateTime) || ef.type == typeof(DateTime?)) {
return entity.GetDateTime(fieldName);
} else if (ef.type == typeof(double) || ef.type == typeof(double?)) {
return entity.GetDouble(fieldName);
} else if (ef.type == typeof(Guid) || ef.type == typeof(Guid?)) {
return (object?)Guid.Parse(entity.GetString(fieldName));
} else if (ef.type == typeof(int) || ef.type == typeof(short) || ef.type == typeof(int?) || ef.type == typeof(short?)) {
return entity.GetInt32(fieldName);
} else if (ef.type == typeof(long) || ef.type == typeof(long?)) {
return entity.GetInt64(fieldName);
} else {
var outputType = ef.type;
if (ef.discriminator != null) {
var (attr, typeProvider) = ef.discriminator.Value;
var v = GetFieldValue(info, attr.FieldName, entity) ?? throw new Exception($"No value for {attr.FieldName}");
outputType = typeProvider.GetTypeInfo(v);
}
if (objType == typeof(string)) {
var value = entity.GetString(fieldName);
if (value.StartsWith('[') || value.StartsWith('{') || value == "null") {
return JsonSerializer.Deserialize(value, outputType, options: _options);
} else {
return JsonSerializer.Deserialize($"\"{value}\"", outputType, options: _options);
}
try {
if (ef.type == typeof(string)) {
return entity.GetString(fieldName);
} else if (ef.type == typeof(bool) || ef.type == typeof(bool?)) {
return entity.GetBoolean(fieldName);
} else if (ef.type == typeof(DateTimeOffset) || ef.type == typeof(DateTimeOffset?)) {
return entity.GetDateTimeOffset(fieldName);
} else if (ef.type == typeof(DateTime) || ef.type == typeof(DateTime?)) {
return entity.GetDateTime(fieldName);
} else if (ef.type == typeof(double) || ef.type == typeof(double?)) {
return entity.GetDouble(fieldName);
} else if (ef.type == typeof(Guid) || ef.type == typeof(Guid?)) {
return (object?)Guid.Parse(entity.GetString(fieldName));
} else if (ef.type == typeof(int) || ef.type == typeof(short) || ef.type == typeof(int?) || ef.type == typeof(short?)) {
return entity.GetInt32(fieldName);
} else if (ef.type == typeof(long) || ef.type == typeof(long?)) {
return entity.GetInt64(fieldName);
} else {
var value = entity.GetString(fieldName);
return JsonSerializer.Deserialize(value, outputType, options: _options);
var outputType = ef.type;
if (ef.discriminator != null) {
var (attr, typeProvider) = ef.discriminator.Value;
var v = GetFieldValue(info, attr.FieldName, entity) ?? throw new Exception($"No value for {attr.FieldName}");
outputType = typeProvider.GetTypeInfo(v);
}
var objType = obj.GetType();
if (objType == typeof(string)) {
var value = entity.GetString(fieldName);
if (value.StartsWith('[') || value.StartsWith('{') || value == "null") {
return JsonSerializer.Deserialize(value, outputType, options: _options);
} else {
return JsonSerializer.Deserialize($"\"{value}\"", outputType, options: _options);
}
} else {
var value = entity.GetString(fieldName);
return JsonSerializer.Deserialize(value, outputType, options: _options);
}
}
} catch (Exception ex) {
throw new InvalidOperationException($"Unable to get value for property '{name}' (entity field '{fieldName}')", ex);
}
}
public T ToRecord<T>(TableEntity entity) where T : EntityBase {
var entityInfo = GetEntityInfo<T>();
var parameters =
entityInfo.properties.Select(grouping => GetFieldValue(entityInfo, grouping.Key, entity)).ToArray();
object?[] parameters;
try {
parameters = entityInfo.properties.Select(grouping => GetFieldValue(entityInfo, grouping.Key, entity)).ToArray();
} catch (Exception ex) {
throw new InvalidOperationException($"Unable to extract properties from TableEntity for {typeof(T)}", ex);
}
try {
var entityRecord = (T)entityInfo.constructor.Invoke(parameters);
if (entity.ETag != default) {
@ -308,9 +318,7 @@ public class EntityConverter {
} catch (Exception ex) {
var stringParam = string.Join(", ", parameters);
throw new Exception($"Could not initialize object of type {typeof(T)} with the following parameters: {stringParam} constructor {entityInfo.constructor} : {ex}");
throw new InvalidOperationException($"Could not initialize object of type {typeof(T)} with the following parameters: {stringParam} constructor {entityInfo.constructor}", ex);
}
}
}

View File

@ -57,4 +57,6 @@ public sealed class TestServiceConfiguration : IServiceConfig {
public string? OneFuzzDataStorage => throw new NotImplementedException();
public string? OneFuzzResourceGroup => throw new NotImplementedException();
public string? OneFuzzAllowOutdatedAgent => throw new NotImplementedException();
}

View File

@ -183,8 +183,11 @@ public class RequestsTests {
""state_update"": {
""state"": ""done"",
""data"": {
""error"": null,
""script_output"": null
""script_output"": {
""exit_status"": null,
""stderr"": ""err"",
""stdout"": ""out""
}
}
}
},

View File

@ -13,7 +13,7 @@ from onefuzztypes.models import Error
from onefuzztypes.requests import AgentRegistrationGet, AgentRegistrationPost
from onefuzztypes.responses import AgentRegistration
from ..onefuzzlib.azure.creds import get_instance_url
from ..onefuzzlib.azure.creds import get_agent_instance_url
from ..onefuzzlib.azure.queue import get_queue_sas
from ..onefuzzlib.azure.storage import StorageType
from ..onefuzzlib.endpoint_authorization import call_if_agent
@ -23,7 +23,7 @@ from ..onefuzzlib.workers.pools import Pool
def create_registration_response(machine_id: UUID, pool: Pool) -> func.HttpResponse:
base_address = get_instance_url()
base_address = get_agent_instance_url()
events_url = "%s/api/agents/events" % base_address
commands_url = "%s/api/agents/commands" % base_address
work_queue = get_queue_sas(

View File

@ -81,6 +81,22 @@ def get_instance_url() -> str:
return "https://%s.azurewebsites.net" % get_instance_name()
@cached
def python_agent_functions_are_disabled() -> bool:
# note that we only check one function here;
# these should be enabled or disabled as a group
return os.environ["AzureWebJobs_agent_can_schedule_Disabled"] == "1"
# periods become underscores here
@cached
def get_agent_instance_url() -> str:
if python_agent_functions_are_disabled():
return "https://%s-net.azurewebsites.net" % get_instance_name()
else:
return get_instance_url()
@cached
def get_instance_id() -> UUID:
from .containers import get_blob

View File

@ -24,7 +24,7 @@ from .azure.containers import (
get_file_url,
save_blob,
)
from .azure.creds import get_instance_id, get_instance_url
from .azure.creds import get_agent_instance_url, get_instance_id
from .azure.log_analytics import get_monitor_settings
from .azure.queue import get_queue_sas
from .azure.storage import StorageType
@ -246,7 +246,7 @@ def build_scaleset_script(pool: Pool, scaleset: Scaleset) -> str:
def build_pool_config(pool: Pool) -> str:
config = AgentConfig(
pool_name=pool.name,
onefuzz_url=get_instance_url(),
onefuzz_url=get_agent_instance_url(),
heartbeat_queue=get_queue_sas(
"node-heartbeat",
StorageType.config,

View File

@ -13,9 +13,9 @@ from onefuzztypes.requests import PoolCreate, PoolSearch, PoolStop
from onefuzztypes.responses import BoolResult
from ..onefuzzlib.azure.creds import (
get_agent_instance_url,
get_base_region,
get_instance_id,
get_instance_url,
get_regions,
)
from ..onefuzzlib.azure.queue import get_queue_sas
@ -29,7 +29,7 @@ from ..onefuzzlib.workers.pools import Pool
def set_config(pool: Pool) -> Pool:
pool.config = AgentConfig(
pool_name=pool.name,
onefuzz_url=get_instance_url(),
onefuzz_url=get_agent_instance_url(),
instance_telemetry_key=os.environ.get("APPINSIGHTS_INSTRUMENTATIONKEY"),
microsoft_telemetry_key=os.environ.get("ONEFUZZ_TELEMETRY"),
heartbeat_queue=get_queue_sas(

View File

@ -4,6 +4,7 @@
# Licensed under the MIT License.
import argparse
import itertools
import json
import logging
import os
@ -1094,9 +1095,26 @@ class Client:
def enable_dotnet_func(self) -> None:
if self.enable_dotnet:
def expand_agent(f: str) -> List[str]:
# 'agent' is permitted as a shortcut for the agent functions
if f == "agent":
return [
"agent_can_schedule",
"agent_commands",
"agent_events",
"agent_registration",
]
else:
return [f]
enable_dotnet = itertools.chain.from_iterable(
map(expand_agent, self.enable_dotnet)
)
func = shutil.which("az")
assert func is not None
for function_name in self.enable_dotnet:
for function_name in enable_dotnet:
format_name = function_name.split("_")
dotnet_name = "".join(x.title() for x in format_name)
error: Optional[subprocess.CalledProcessError] = None