mirror of
https://github.com/microsoft/onefuzz.git
synced 2025-06-16 11:58:09 +00:00
Remove generic reset command (#1511)
* Remove generic reset command Remove ability to reset containers by type. Work item #1508 * Test to see if we are getting timeouts when attempting to get a queue message. * Remove exception handling for timeout.
This commit is contained in:
@ -448,37 +448,6 @@ class Containers(Endpoint):
|
||||
self.logger.debug("list containers")
|
||||
return self._req_model_list("GET", responses.ContainerInfoBase)
|
||||
|
||||
def reset(
|
||||
self,
|
||||
*,
|
||||
container_types: Optional[
|
||||
List[enums.ContainerType]
|
||||
] = enums.ContainerType.reset_defaults(),
|
||||
yes: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Reset containers by container type (NOTE: This may cause unexpected issues with existing fuzzing jobs)
|
||||
"""
|
||||
if not container_types:
|
||||
return
|
||||
|
||||
message = "Confirm deleting container types: %s (specify y or n): " % (
|
||||
",".join(x.name for x in container_types)
|
||||
)
|
||||
if not yes and not user_confirmation(message):
|
||||
self.logger.warning("not deleting containers")
|
||||
return
|
||||
|
||||
for container in self.list():
|
||||
if (
|
||||
container.metadata
|
||||
and "container_type" in container.metadata
|
||||
and enums.ContainerType(container.metadata["container_type"])
|
||||
in container_types
|
||||
):
|
||||
self.logger.info("removing container: %s", container.name)
|
||||
self.delete(container.name)
|
||||
|
||||
|
||||
class Repro(Endpoint):
|
||||
"""Interact with Reproduction VMs"""
|
||||
@ -1783,148 +1752,6 @@ class Onefuzz:
|
||||
|
||||
return data
|
||||
|
||||
def _delete_components(
|
||||
self,
|
||||
*,
|
||||
containers: bool = False,
|
||||
jobs: bool = False,
|
||||
notifications: bool = False,
|
||||
pools: bool = False,
|
||||
repros: bool = False,
|
||||
scalesets: bool = False,
|
||||
tasks: bool = False,
|
||||
webhooks: bool = False,
|
||||
) -> None:
|
||||
if jobs:
|
||||
for job in self.jobs.list():
|
||||
self.logger.info("stopping job %s", job.job_id)
|
||||
self.jobs.delete(job.job_id)
|
||||
|
||||
if tasks:
|
||||
for task in self.tasks.list():
|
||||
self.logger.info("stopping task %s", task.task_id)
|
||||
self.tasks.delete(task.task_id)
|
||||
|
||||
if notifications:
|
||||
for notification in self.notifications.list():
|
||||
self.logger.info(
|
||||
"stopping notification %s", notification.notification_id
|
||||
)
|
||||
self.notifications.delete(notification.notification_id)
|
||||
|
||||
if repros:
|
||||
for vm in self.repro.list():
|
||||
self.repro.delete(str(vm.vm_id))
|
||||
|
||||
if pools:
|
||||
for pool in self.pools.list():
|
||||
self.logger.info("stopping pool: %s", pool.name)
|
||||
self.pools.shutdown(pool.name, now=True)
|
||||
|
||||
if scalesets:
|
||||
for scaleset in self.scalesets.list():
|
||||
self.logger.info("stopping scaleset: %s", scaleset.scaleset_id)
|
||||
self.scalesets.shutdown(scaleset.scaleset_id, now=True)
|
||||
|
||||
if containers:
|
||||
self.containers.reset(yes=True)
|
||||
|
||||
if webhooks:
|
||||
for webhook in self.webhooks.list():
|
||||
self.logger.info("removing webhook: %s", webhook.webhook_id)
|
||||
self.webhooks.delete(webhook.webhook_id)
|
||||
|
||||
def reset(
|
||||
self,
|
||||
*,
|
||||
containers: bool = False,
|
||||
everything: bool = False,
|
||||
jobs: bool = False,
|
||||
notifications: bool = False,
|
||||
pools: bool = False,
|
||||
repros: bool = False,
|
||||
scalesets: bool = False,
|
||||
tasks: bool = False,
|
||||
webhooks: bool = False,
|
||||
yes: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Resets onefuzz. Stops all jobs, notifications, and repro VMs.
|
||||
Specifying 'everything' will delete all containers, pools, and managed
|
||||
scalesets.
|
||||
|
||||
:param bool containers: Delete all the containers.
|
||||
:param bool everything: Delete all containers, pools and managed scalesets.
|
||||
:param bool jobs: Stop all jobs.
|
||||
:param bool notifications: Stop all notifications.
|
||||
:param bool pools: Delete all pools.
|
||||
:param bool repros: Delete all repro vms.
|
||||
:param bool scalesets: Delete all managed scalesets.
|
||||
:param bool tasks: Stop all tasks.
|
||||
:param bool webhooks: Stop all webhooks.
|
||||
:param bool yes: Ignoring to specify "y" in prompt.
|
||||
"""
|
||||
|
||||
if everything:
|
||||
(
|
||||
containers,
|
||||
jobs,
|
||||
pools,
|
||||
notifications,
|
||||
repros,
|
||||
scalesets,
|
||||
tasks,
|
||||
webhooks,
|
||||
) = (
|
||||
True,
|
||||
True,
|
||||
True,
|
||||
True,
|
||||
True,
|
||||
True,
|
||||
True,
|
||||
True,
|
||||
)
|
||||
elif not any(
|
||||
[containers, jobs, pools, notifications, repros, scalesets, tasks]
|
||||
):
|
||||
jobs, notifications, repros, tasks = True, True, True, True
|
||||
|
||||
if containers and not (tasks or jobs):
|
||||
raise Exception("Resetting containers requires resetting jobs or tasks")
|
||||
|
||||
to_delete = []
|
||||
argument_str = {
|
||||
"jobs",
|
||||
"tasks",
|
||||
"notifications",
|
||||
"pools",
|
||||
"scalesets",
|
||||
"repros",
|
||||
"containers",
|
||||
"webhooks",
|
||||
}
|
||||
for k, v in locals().items():
|
||||
if k in argument_str and v:
|
||||
to_delete.append(k)
|
||||
message = "Confirm stopping %s (specify y or n): " % (
|
||||
", ".join(sorted(to_delete))
|
||||
)
|
||||
if not yes and not user_confirmation(message):
|
||||
self.logger.warning("not resetting")
|
||||
return
|
||||
|
||||
self._delete_components(
|
||||
containers=containers,
|
||||
jobs=jobs,
|
||||
notifications=notifications,
|
||||
pools=pools,
|
||||
repros=repros,
|
||||
scalesets=scalesets,
|
||||
tasks=tasks,
|
||||
webhooks=webhooks,
|
||||
)
|
||||
|
||||
def _warn_preview(self, feature: PreviewFeature) -> None:
|
||||
self.logger.warning(
|
||||
"%s are a preview-feature and may change in an upcoming release",
|
||||
|
Reference in New Issue
Block a user