initial public release

This commit is contained in:
Brian Caswell
2020-09-18 12:21:04 -04:00
parent 9c3aa0bdfb
commit d3a0b292e6
387 changed files with 43810 additions and 28 deletions

View File

@ -0,0 +1,320 @@
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import os
from typing import Dict, List, Optional
from uuid import UUID
from onefuzztypes.enums import Compare, ContainerPermission, ContainerType, TaskFeature
from onefuzztypes.models import TaskConfig, TaskDefinition, TaskUnitConfig
from ..azure.containers import blob_exists, get_container_sas_url, get_containers
from ..azure.creds import get_fuzz_storage, get_instance_name
from ..azure.queue import get_queue_sas
from .defs import TASK_DEFINITIONS
LOGGER = logging.getLogger("onefuzz")
def get_input_container_queues(config: TaskConfig) -> Optional[List[str]]: # tasks.Task
if config.task.type not in TASK_DEFINITIONS:
raise TaskConfigError("unsupported task type: %s" % config.task.type.name)
container_type = TASK_DEFINITIONS[config.task.type].monitor_queue
if container_type:
return [x.name for x in config.containers if x.type == container_type]
return None
def check_val(compare: Compare, expected: int, actual: int) -> bool:
if compare == Compare.Equal:
return expected == actual
if compare == Compare.AtLeast:
return expected <= actual
if compare == Compare.AtMost:
return expected >= actual
raise NotImplementedError
def check_container(
compare: Compare,
expected: int,
container_type: ContainerType,
containers: Dict[ContainerType, List[str]],
) -> None:
actual = len(containers.get(container_type, []))
if not check_val(compare, expected, actual):
raise TaskConfigError(
"container type %s: expected %s %d, got %d"
% (container_type.name, compare.name, expected, actual)
)
def check_containers(definition: TaskDefinition, config: TaskConfig) -> None:
all_containers = set(get_containers().keys())
containers: Dict[ContainerType, List[str]] = {}
for container in config.containers:
if container.name not in all_containers:
raise TaskConfigError("missing container: %s" % container.name)
if container.type not in containers:
containers[container.type] = []
containers[container.type].append(container.name)
for container_def in definition.containers:
check_container(
container_def.compare, container_def.value, container_def.type, containers
)
for container_type in containers:
if container_type not in [x.type for x in definition.containers]:
raise TaskConfigError(
"unsupported container type for this task: %s", container_type.name
)
if definition.monitor_queue:
if definition.monitor_queue not in [x.type for x in definition.containers]:
raise TaskConfigError(
"unable to monitor container type as it is not used by this task: %s"
% definition.monitor_queue.name
)
def check_config(config: TaskConfig) -> None:
if config.task.type not in TASK_DEFINITIONS:
raise TaskConfigError("unsupported task type: %s" % config.task.type.name)
if config.vm is not None and config.pool is not None:
raise TaskConfigError("either the vm or pool must be specified, but not both")
definition = TASK_DEFINITIONS[config.task.type]
check_containers(definition, config)
if (
TaskFeature.supervisor_exe in definition.features
and not config.task.supervisor_exe
):
err = "missing supervisor_exe"
LOGGER.error(err)
raise TaskConfigError("missing supervisor_exe")
if config.vm:
if not check_val(definition.vm.compare, definition.vm.value, config.vm.count):
err = "invalid vm count: expected %s %d, got %s" % (
definition.vm.compare,
definition.vm.value,
config.vm.count,
)
LOGGER.error(err)
raise TaskConfigError(err)
elif config.pool:
if not check_val(definition.vm.compare, definition.vm.value, config.pool.count):
err = "invalid vm count: expected %s %d, got %s" % (
definition.vm.compare,
definition.vm.value,
config.pool.count,
)
LOGGER.error(err)
raise TaskConfigError(err)
else:
raise TaskConfigError("either the vm or pool must be specified")
if TaskFeature.target_exe in definition.features:
container = [x for x in config.containers if x.type == ContainerType.setup][0]
if not blob_exists(container.name, config.task.target_exe):
err = "target_exe `%s` does not exist in the setup container `%s`" % (
config.task.target_exe,
container.name,
)
LOGGER.warning(err)
if TaskFeature.generator_exe in definition.features:
container = [x for x in config.containers if x.type == ContainerType.tools][0]
if not config.task.generator_exe:
raise TaskConfigError("generator_exe is not defined")
tools_paths = ["{tools_dir}/", "{tools_dir}\\"]
for tool_path in tools_paths:
if config.task.generator_exe.startswith(tool_path):
generator = config.task.generator_exe.replace(tool_path, "")
if not blob_exists(container.name, generator):
err = (
"generator_exe `%s` does not exist in the tools container `%s`"
% (
config.task.generator_exe,
container.name,
)
)
LOGGER.error(err)
raise TaskConfigError(err)
if TaskFeature.stats_file in definition.features:
if config.task.stats_file is not None and config.task.stats_format is None:
err = "using a stats_file requires a stats_format"
LOGGER.error(err)
raise TaskConfigError(err)
def build_task_config(
job_id: UUID, task_id: UUID, task_config: TaskConfig
) -> TaskUnitConfig:
if task_config.task.type not in TASK_DEFINITIONS:
raise TaskConfigError("unsupported task type: %s" % task_config.task.type.name)
definition = TASK_DEFINITIONS[task_config.task.type]
config = TaskUnitConfig(
job_id=job_id,
task_id=task_id,
task_type=task_config.task.type,
instrumentation_key=os.environ.get("APPINSIGHTS_INSTRUMENTATIONKEY"),
telemetry_key=os.environ.get("ONEFUZZ_TELEMETRY"),
heartbeat_queue=get_queue_sas(
"heartbeat",
account_id=os.environ["ONEFUZZ_FUNC_STORAGE"],
add=True,
),
back_channel_address="https://%s.azurewebsites.net/api/back_channel"
% (get_instance_name()),
)
if definition.monitor_queue:
config.input_queue = get_queue_sas(
task_id,
add=True,
read=True,
update=True,
process=True,
account_id=get_fuzz_storage(),
)
for container_def in definition.containers:
if container_def.type == ContainerType.setup:
continue
containers = []
for (i, container) in enumerate(task_config.containers):
if container.type != container_def.type:
continue
containers.append(
{
"path": "_".join(["task", container_def.type.name, str(i)]),
"url": get_container_sas_url(
container.name,
read=ContainerPermission.Read in container_def.permissions,
write=ContainerPermission.Write in container_def.permissions,
add=ContainerPermission.Add in container_def.permissions,
delete=ContainerPermission.Delete in container_def.permissions,
create=ContainerPermission.Create in container_def.permissions,
list=ContainerPermission.List in container_def.permissions,
),
}
)
if not containers:
continue
if (
container_def.compare in [Compare.Equal, Compare.AtMost]
and container_def.value == 1
):
setattr(config, container_def.type.name, containers[0])
else:
setattr(config, container_def.type.name, containers)
EMPTY_DICT: Dict[str, str] = {}
EMPTY_LIST: List[str] = []
if TaskFeature.supervisor_exe in definition.features:
config.supervisor_exe = task_config.task.supervisor_exe
if TaskFeature.supervisor_env in definition.features:
config.supervisor_env = task_config.task.supervisor_env or EMPTY_DICT
if TaskFeature.supervisor_options in definition.features:
config.supervisor_options = task_config.task.supervisor_options or EMPTY_LIST
if TaskFeature.supervisor_input_marker in definition.features:
config.supervisor_input_marker = task_config.task.supervisor_input_marker
if TaskFeature.target_exe in definition.features:
config.target_exe = "setup/%s" % task_config.task.target_exe
if TaskFeature.target_env in definition.features:
config.target_env = task_config.task.target_env or EMPTY_DICT
if TaskFeature.target_options in definition.features:
config.target_options = task_config.task.target_options or EMPTY_LIST
if TaskFeature.target_options_merge in definition.features:
config.target_options_merge = task_config.task.target_options_merge or False
if TaskFeature.rename_output in definition.features:
config.rename_output = task_config.task.rename_output or False
if TaskFeature.generator_exe in definition.features:
config.generator_exe = task_config.task.generator_exe
if TaskFeature.generator_env in definition.features:
config.generator_env = task_config.task.generator_env or EMPTY_DICT
if TaskFeature.generator_options in definition.features:
config.generator_options = task_config.task.generator_options or EMPTY_LIST
if (
TaskFeature.wait_for_files in definition.features
and task_config.task.wait_for_files
):
config.wait_for_files = task_config.task.wait_for_files.name
if TaskFeature.analyzer_exe in definition.features:
config.analyzer_exe = task_config.task.analyzer_exe
if TaskFeature.analyzer_options in definition.features:
config.analyzer_options = task_config.task.analyzer_options or EMPTY_LIST
if TaskFeature.analyzer_env in definition.features:
config.analyzer_env = task_config.task.analyzer_env or EMPTY_DICT
if TaskFeature.stats_file in definition.features:
config.stats_file = task_config.task.stats_file
config.stats_format = task_config.task.stats_format
if TaskFeature.target_timeout in definition.features:
config.target_timeout = task_config.task.target_timeout
if TaskFeature.check_asan_log in definition.features:
config.check_asan_log = task_config.task.check_asan_log
if TaskFeature.check_debugger in definition.features:
config.check_debugger = task_config.task.check_debugger
if TaskFeature.check_retry_count in definition.features:
config.check_retry_count = task_config.task.check_retry_count or 0
return config
def get_setup_container(config: TaskConfig) -> str:
for container in config.containers:
if container.type == ContainerType.setup:
return container.name
raise TaskConfigError(
"task missing setup container: task_type = %s" % config.task.type
)
class TaskConfigError(Exception):
pass

View File

@ -0,0 +1,376 @@
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from onefuzztypes.enums import (
Compare,
ContainerPermission,
ContainerType,
TaskFeature,
TaskType,
)
from onefuzztypes.models import ContainerDefinition, TaskDefinition, VmDefinition
# all tasks are required to have a 'setup' container
TASK_DEFINITIONS = {
TaskType.generic_analysis: TaskDefinition(
features=[
TaskFeature.target_exe,
TaskFeature.target_options,
TaskFeature.analyzer_exe,
TaskFeature.analyzer_env,
TaskFeature.analyzer_options,
],
vm=VmDefinition(compare=Compare.AtLeast, value=1),
containers=[
ContainerDefinition(
type=ContainerType.setup,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.analysis,
compare=Compare.Equal,
value=1,
permissions=[
ContainerPermission.Write,
ContainerPermission.Read,
ContainerPermission.List,
ContainerPermission.Create,
],
),
ContainerDefinition(
type=ContainerType.crashes,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.tools,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
],
monitor_queue=ContainerType.crashes,
),
TaskType.libfuzzer_fuzz: TaskDefinition(
features=[
TaskFeature.target_exe,
TaskFeature.target_env,
TaskFeature.target_options,
TaskFeature.target_workers,
],
vm=VmDefinition(compare=Compare.AtLeast, value=1),
containers=[
ContainerDefinition(
type=ContainerType.setup,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.crashes,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Write, ContainerPermission.Create],
),
ContainerDefinition(
type=ContainerType.inputs,
compare=Compare.Equal,
value=1,
permissions=[
ContainerPermission.Write,
ContainerPermission.Read,
ContainerPermission.List,
ContainerPermission.Create,
],
),
ContainerDefinition(
type=ContainerType.readonly_inputs,
compare=Compare.AtLeast,
value=0,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
],
monitor_queue=None,
),
TaskType.libfuzzer_crash_report: TaskDefinition(
features=[
TaskFeature.target_exe,
TaskFeature.target_env,
TaskFeature.target_options,
TaskFeature.target_timeout,
TaskFeature.check_retry_count,
],
vm=VmDefinition(compare=Compare.AtLeast, value=1),
containers=[
ContainerDefinition(
type=ContainerType.setup,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.crashes,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.reports,
compare=Compare.AtMost,
value=1,
permissions=[ContainerPermission.Create],
),
ContainerDefinition(
type=ContainerType.unique_reports,
compare=Compare.AtMost,
value=1,
permissions=[ContainerPermission.Create],
),
ContainerDefinition(
type=ContainerType.no_repro,
compare=Compare.AtMost,
value=1,
permissions=[ContainerPermission.Create],
),
],
monitor_queue=ContainerType.crashes,
),
TaskType.libfuzzer_coverage: TaskDefinition(
features=[
TaskFeature.target_exe,
TaskFeature.target_env,
TaskFeature.target_options,
],
vm=VmDefinition(compare=Compare.Equal, value=1),
containers=[
ContainerDefinition(
type=ContainerType.setup,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.readonly_inputs,
compare=Compare.AtLeast,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.coverage,
compare=Compare.Equal,
value=1,
permissions=[
ContainerPermission.Create,
ContainerPermission.List,
ContainerPermission.Read,
ContainerPermission.Write,
],
),
],
monitor_queue=ContainerType.readonly_inputs,
),
TaskType.libfuzzer_merge: TaskDefinition(
features=[
TaskFeature.target_exe,
TaskFeature.target_env,
TaskFeature.target_options,
],
vm=VmDefinition(compare=Compare.Equal, value=1),
containers=[
ContainerDefinition(
type=ContainerType.setup,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.readonly_inputs,
compare=Compare.AtLeast,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.inputs,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Create, ContainerPermission.List],
),
],
monitor_queue=ContainerType.inputs,
),
TaskType.generic_supervisor: TaskDefinition(
features=[
TaskFeature.target_exe,
TaskFeature.target_options,
TaskFeature.supervisor_exe,
TaskFeature.supervisor_env,
TaskFeature.supervisor_options,
TaskFeature.supervisor_input_marker,
TaskFeature.wait_for_files,
],
vm=VmDefinition(compare=Compare.AtLeast, value=1),
containers=[
ContainerDefinition(
type=ContainerType.setup,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.tools,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.crashes,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Create],
),
ContainerDefinition(
type=ContainerType.inputs,
compare=Compare.Equal,
value=1,
permissions=[
ContainerPermission.Create,
ContainerPermission.Read,
ContainerPermission.List,
],
),
],
monitor_queue=None,
),
TaskType.generic_merge: TaskDefinition(
features=[
TaskFeature.target_exe,
TaskFeature.target_options,
TaskFeature.supervisor_exe,
TaskFeature.supervisor_env,
TaskFeature.supervisor_options,
TaskFeature.supervisor_input_marker,
TaskFeature.stats_file,
],
vm=VmDefinition(compare=Compare.AtLeast, value=1),
containers=[
ContainerDefinition(
type=ContainerType.setup,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.tools,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.readonly_inputs,
compare=Compare.AtLeast,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.inputs,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Create, ContainerPermission.List],
),
],
monitor_queue=None,
),
TaskType.generic_generator: TaskDefinition(
features=[
TaskFeature.generator_exe,
TaskFeature.generator_env,
TaskFeature.generator_options,
TaskFeature.target_exe,
TaskFeature.target_env,
TaskFeature.target_options,
TaskFeature.rename_output,
TaskFeature.target_timeout,
TaskFeature.check_asan_log,
TaskFeature.check_debugger,
TaskFeature.check_retry_count,
],
vm=VmDefinition(compare=Compare.AtLeast, value=1),
containers=[
ContainerDefinition(
type=ContainerType.setup,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.tools,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.crashes,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Create],
),
ContainerDefinition(
type=ContainerType.readonly_inputs,
compare=Compare.AtLeast,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
],
monitor_queue=None,
),
TaskType.generic_crash_report: TaskDefinition(
features=[
TaskFeature.target_exe,
TaskFeature.target_env,
TaskFeature.target_options,
TaskFeature.target_timeout,
TaskFeature.check_asan_log,
TaskFeature.check_debugger,
TaskFeature.check_retry_count,
],
vm=VmDefinition(compare=Compare.AtLeast, value=1),
containers=[
ContainerDefinition(
type=ContainerType.setup,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.crashes,
compare=Compare.Equal,
value=1,
permissions=[ContainerPermission.Read, ContainerPermission.List],
),
ContainerDefinition(
type=ContainerType.reports,
compare=Compare.AtMost,
value=1,
permissions=[ContainerPermission.Create],
),
ContainerDefinition(
type=ContainerType.unique_reports,
compare=Compare.AtMost,
value=1,
permissions=[ContainerPermission.Create],
),
ContainerDefinition(
type=ContainerType.no_repro,
compare=Compare.AtMost,
value=1,
permissions=[ContainerPermission.Create],
),
],
monitor_queue=ContainerType.crashes,
),
}

View File

@ -0,0 +1,231 @@
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
from datetime import datetime, timedelta
from typing import List, Optional, Tuple, Union
from uuid import UUID
from onefuzztypes.enums import ErrorCode, TaskState
from onefuzztypes.models import Error
from onefuzztypes.models import Task as BASE_TASK
from onefuzztypes.models import TaskConfig, TaskVm
from ..azure.creds import get_fuzz_storage
from ..azure.image import get_os
from ..azure.queue import create_queue, delete_queue
from ..orm import MappingIntStrAny, ORMMixin, QueryFilter
from ..pools import Node, Pool, Scaleset
from ..proxy_forward import ProxyForward
class Task(BASE_TASK, ORMMixin):
def ready_to_schedule(self) -> bool:
if self.config.prereq_tasks:
for task_id in self.config.prereq_tasks:
task = Task.get_by_task_id(task_id)
# if a prereq task fails, then mark this task as failed
if isinstance(task, Error):
self.error = task
self.state = TaskState.stopping
self.save()
return False
if task.state not in task.state.has_started():
return False
return True
@classmethod
def create(cls, config: TaskConfig, job_id: UUID) -> Union["Task", Error]:
if config.vm:
os = get_os(config.vm.region, config.vm.image)
elif config.pool:
pool = Pool.get_by_name(config.pool.pool_name)
if isinstance(pool, Error):
return pool
os = pool.os
else:
raise Exception("task must have vm or pool")
task = cls(config=config, job_id=job_id, os=os)
task.save()
return task
def save_exclude(self) -> Optional[MappingIntStrAny]:
return {"heartbeats": ...}
def is_ready(self) -> bool:
if self.config.prereq_tasks:
for prereq_id in self.config.prereq_tasks:
prereq = Task.get_by_task_id(prereq_id)
if isinstance(prereq, Error):
logging.info("task prereq has error: %s - %s", self.task_id, prereq)
self.error = prereq
self.state = TaskState.stopping
self.save()
return False
if prereq.state != TaskState.running:
logging.info(
"task is waiting on prereq: %s - %s:",
self.task_id,
prereq.task_id,
)
return False
return True
# At current, the telemetry filter will generate something like this:
#
# {
# 'job_id': 'f4a20fd8-0dcc-4a4e-8804-6ef7df50c978',
# 'task_id': '835f7b3f-43ad-4718-b7e4-d506d9667b09',
# 'state': 'stopped',
# 'config': {
# 'task': {'type': 'libfuzzer_coverage'},
# 'vm': {'count': 1}
# }
# }
def telemetry_include(self) -> Optional[MappingIntStrAny]:
return {
"job_id": ...,
"task_id": ...,
"state": ...,
"config": {"vm": {"count": ...}, "task": {"type": ...}},
}
def event_include(self) -> Optional[MappingIntStrAny]:
return {
"job_id": ...,
"task_id": ...,
"state": ...,
"error": ...,
}
def init(self) -> None:
create_queue(self.task_id, account_id=get_fuzz_storage())
self.state = TaskState.waiting
self.save()
def stopping(self) -> None:
# TODO: we need to tell every node currently working on this task to stop
# TODO: we need to 'unschedule' this task from the existing pools
self.state = TaskState.stopping
logging.info("stopping task: %s:%s", self.job_id, self.task_id)
ProxyForward.remove_forward(self.task_id)
delete_queue(str(self.task_id), account_id=get_fuzz_storage())
Node.stop_task(self.task_id)
self.state = TaskState.stopped
self.save()
def queue_stop(self) -> None:
self.queue(method=self.stopping)
@classmethod
def search_states(
cls, *, job_id: Optional[UUID] = None, states: Optional[List[TaskState]] = None
) -> List["Task"]:
query: QueryFilter = {}
if job_id:
query["job_id"] = [job_id]
if states:
query["state"] = states
return cls.search(query=query)
@classmethod
def search_expired(cls) -> List["Task"]:
time_filter = "end_time lt datetime'%s'" % datetime.utcnow().isoformat()
return cls.search(
query={"state": TaskState.available()}, raw_unchecked_filter=time_filter
)
@classmethod
def get_by_task_id(cls, task_id: UUID) -> Union[Error, "Task"]:
tasks = cls.search(query={"task_id": [task_id]})
if not tasks:
return Error(code=ErrorCode.INVALID_REQUEST, errors=["unable to find task"])
if len(tasks) != 1:
return Error(
code=ErrorCode.INVALID_REQUEST, errors=["error identifying task"]
)
task = tasks[0]
return task
def get_pool(self) -> Optional[Pool]:
if self.config.pool:
pool = Pool.get_by_name(self.config.pool.pool_name)
if isinstance(pool, Error):
logging.info(
"unable to schedule task to pool: %s - %s", self.task_id, pool
)
return None
return pool
elif self.config.vm:
scalesets = Scaleset.search()
scalesets = [
x
for x in scalesets
if x.vm_sku == self.config.vm.sku and x.image == self.config.vm.image
]
for scaleset in scalesets:
pool = Pool.get_by_name(scaleset.pool_name)
if isinstance(pool, Error):
logging.info(
"unable to schedule task to pool: %s - %s",
self.task_id,
pool,
)
else:
return pool
logging.warn(
"unable to find a scaleset that matches the task prereqs: %s",
self.task_id,
)
return None
def get_repro_vm_config(self) -> Union[TaskVm, None]:
if self.config.vm:
return self.config.vm
if self.config.pool is None:
raise Exception("either pool or vm must be specified: %s" % self.task_id)
pool = Pool.get_by_name(self.config.pool.pool_name)
if isinstance(pool, Error):
logging.info("unable to find pool from task: %s", self.task_id)
return None
for scaleset in Scaleset.search_by_pool(self.config.pool.pool_name):
return TaskVm(
region=scaleset.region,
sku=scaleset.vm_sku,
image=scaleset.image,
)
logging.warning(
"no scalesets are defined for task: %s:%s", self.job_id, self.task_id
)
return None
def on_start(self) -> None:
# try to keep this effectively idempotent
if self.end_time is None:
self.end_time = datetime.utcnow() + timedelta(
hours=self.config.task.duration
)
self.save()
from ..jobs import Job
job = Job.get(self.job_id)
if job:
job.on_start()
@classmethod
def key_fields(cls) -> Tuple[str, str]:
return ("job_id", "task_id")

View File

@ -0,0 +1,94 @@
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
from typing import Dict, List
from uuid import UUID
from onefuzztypes.enums import OS, TaskState
from onefuzztypes.models import WorkSet, WorkUnit
from ..azure.containers import blob_exists, get_container_sas_url, save_blob
from ..azure.creds import get_func_storage
from .config import build_task_config, get_setup_container
from .main import Task
HOURS = 60 * 60
def schedule_tasks() -> None:
to_schedule: Dict[UUID, List[Task]] = {}
for task in Task.search_states(states=[TaskState.waiting]):
if not task.ready_to_schedule():
continue
if task.job_id not in to_schedule:
to_schedule[task.job_id] = []
to_schedule[task.job_id].append(task)
for tasks in to_schedule.values():
# TODO: for now, we're only scheduling one task per VM.
for task in tasks:
logging.info("scheduling task: %s", task.task_id)
agent_config = build_task_config(task.job_id, task.task_id, task.config)
setup_container = get_setup_container(task.config)
setup_url = get_container_sas_url(setup_container, read=True, list=True)
setup_script = None
if task.os == OS.windows and blob_exists(setup_container, "setup.ps1"):
setup_script = "setup.ps1"
if task.os == OS.linux and blob_exists(setup_container, "setup.sh"):
setup_script = "setup.sh"
save_blob(
"task-configs",
"%s/config.json" % task.task_id,
agent_config.json(),
account_id=get_func_storage(),
)
reboot = False
count = 1
if task.config.pool:
count = task.config.pool.count
reboot = task.config.task.reboot_after_setup is True
elif task.config.vm:
# this branch should go away when we stop letting people specify
# VM configs directly.
count = task.config.vm.count
reboot = (
task.config.vm.reboot_after_setup is True
or task.config.task.reboot_after_setup is True
)
task_config = agent_config
task_config_json = task_config.json()
work_unit = WorkUnit(
job_id=task_config.job_id,
task_id=task_config.task_id,
task_type=task_config.task_type,
config=task_config_json,
)
# For now, only offer singleton work sets.
work_set = WorkSet(
reboot=reboot,
script=(setup_script is not None),
setup_url=setup_url,
work_units=[work_unit],
)
pool = task.get_pool()
if not pool:
logging.info("unable to find pool for task: %s", task.task_id)
continue
for _ in range(count):
pool.schedule_workset(work_set)
task.state = TaskState.scheduled
task.save()