Expand valid scaleset names (#3045)

Scaleset names are now permitted to be any (valid) strings, instead of only GUIDs. When we generate a scaleset name it is now based upon the pool name; for example the pool `pool` might get a scaleset named `pool-3b24ba211cad4b078655914754485838`.

This should be backwards-compatible since GUIDs are [already serialized to table storage as strings](dddcfa4949/src/ApiService/ApiService/onefuzzlib/orm/EntityConverter.cs (L190-L191)), so this simply loosens the restrictions placed upon them.

Scaleset IDs now have a strong type in the same way as other IDs; this helps to avoid mixing them up with other strings. Because of this I found one bug in the scaleset search query logic due to Pool ID/VMSS ID confusion. As part of fixing this I've changed the scaleset search query to only return nodes from the table rather than querying Azure to find a list; this seems to be sufficient for the CLI.
This commit is contained in:
George Pollard
2023-05-17 09:58:58 +12:00
committed by GitHub
parent d84b72b5fd
commit 2f478d6c0b
38 changed files with 380 additions and 291 deletions

View File

@ -1451,11 +1451,10 @@ class Node(Endpoint):
self,
*,
state: Optional[List[enums.NodeState]] = None,
scaleset_id: Optional[UUID_EXPANSION] = None,
scaleset_id: Optional[str] = None,
pool_name: Optional[primitives.PoolName] = None,
) -> List[models.Node]:
self.logger.debug("list nodes")
scaleset_id_expanded: Optional[UUID] = None
if pool_name is not None:
pool_name = primitives.PoolName(
@ -1467,18 +1466,11 @@ class Node(Endpoint):
)
)
if scaleset_id is not None:
scaleset_id_expanded = self._disambiguate_uuid(
"scaleset_id",
scaleset_id,
lambda: [str(x.scaleset_id) for x in self.onefuzz.scalesets.list()],
)
return self._req_model_list(
"GET",
models.Node,
data=requests.NodeSearch(
scaleset_id=scaleset_id_expanded, state=state, pool_name=pool_name
scaleset_id=scaleset_id, state=state, pool_name=pool_name
),
)
@ -1510,7 +1502,7 @@ class Scaleset(Endpoint):
def _expand_scaleset_machine(
self,
scaleset_id: UUID_EXPANSION,
scaleset_id: str,
machine_id: UUID_EXPANSION,
*,
include_auth: bool = False,
@ -1577,54 +1569,32 @@ class Scaleset(Endpoint):
),
)
def shutdown(
self, scaleset_id: UUID_EXPANSION, *, now: bool = False
) -> responses.BoolResult:
scaleset_id_expanded = self._disambiguate_uuid(
"scaleset_id",
scaleset_id,
lambda: [str(x.scaleset_id) for x in self.list()],
)
self.logger.debug("shutdown scaleset: %s (now: %s)", scaleset_id_expanded, now)
def shutdown(self, scaleset_id: str, *, now: bool = False) -> responses.BoolResult:
self.logger.debug("shutdown scaleset: %s (now: %s)", scaleset_id, now)
return self._req_model(
"DELETE",
responses.BoolResult,
data=requests.ScalesetStop(scaleset_id=scaleset_id_expanded, now=now),
data=requests.ScalesetStop(scaleset_id=scaleset_id, now=now),
)
def get(
self, scaleset_id: UUID_EXPANSION, *, include_auth: bool = False
) -> models.Scaleset:
def get(self, scaleset_id: str, *, include_auth: bool = False) -> models.Scaleset:
self.logger.debug("get scaleset: %s", scaleset_id)
scaleset_id_expanded = self._disambiguate_uuid(
"scaleset_id",
scaleset_id,
lambda: [str(x.scaleset_id) for x in self.list()],
)
return self._req_model(
"GET",
models.Scaleset,
data=requests.ScalesetSearch(
scaleset_id=scaleset_id_expanded, include_auth=include_auth
scaleset_id=scaleset_id, include_auth=include_auth
),
)
def update(
self, scaleset_id: UUID_EXPANSION, *, size: Optional[int] = None
self, scaleset_id: str, *, size: Optional[int] = None
) -> models.Scaleset:
self.logger.debug("update scaleset: %s", scaleset_id)
scaleset_id_expanded = self._disambiguate_uuid(
"scaleset_id",
scaleset_id,
lambda: [str(x.scaleset_id) for x in self.list()],
)
return self._req_model(
"PATCH",
models.Scaleset,
data=requests.ScalesetUpdate(scaleset_id=scaleset_id_expanded, size=size),
data=requests.ScalesetUpdate(scaleset_id=scaleset_id, size=size),
)
def list(
@ -1645,7 +1615,7 @@ class ScalesetProxy(Endpoint):
def delete(
self,
scaleset_id: UUID_EXPANSION,
scaleset_id: str,
machine_id: UUID_EXPANSION,
*,
dst_port: Optional[int] = None,
@ -1681,7 +1651,7 @@ class ScalesetProxy(Endpoint):
)
def get(
self, scaleset_id: UUID_EXPANSION, machine_id: UUID_EXPANSION, dst_port: int
self, scaleset_id: str, machine_id: UUID_EXPANSION, dst_port: int
) -> responses.ProxyGetResult:
"""Get information about a specific job"""
(
@ -1705,7 +1675,7 @@ class ScalesetProxy(Endpoint):
def create(
self,
scaleset_id: UUID_EXPANSION,
scaleset_id: str,
machine_id: UUID_EXPANSION,
dst_port: int,
*,

View File

@ -103,7 +103,7 @@ class DebugScaleset(Command):
"""Debug tasks"""
def _get_proxy_setup(
self, scaleset_id: UUID, machine_id: UUID, port: int, duration: Optional[int]
self, scaleset_id: str, machine_id: UUID, port: int, duration: Optional[int]
) -> Tuple[bool, str, Optional[Tuple[str, int]]]:
proxy = self.onefuzz.scaleset_proxy.create(
scaleset_id, machine_id, port, duration=duration
@ -115,7 +115,7 @@ class DebugScaleset(Command):
def rdp(
self,
scaleset_id: UUID_EXPANSION,
scaleset_id: str,
machine_id: UUID_EXPANSION,
duration: Optional[int] = 1,
) -> None:
@ -144,7 +144,7 @@ class DebugScaleset(Command):
def ssh(
self,
scaleset_id: UUID_EXPANSION,
scaleset_id: str,
machine_id: UUID_EXPANSION,
duration: Optional[int] = 1,
command: Optional[str] = None,
@ -185,7 +185,7 @@ class DebugTask(Command):
def _get_node(
self, task_id: UUID_EXPANSION, node_id: Optional[UUID]
) -> Tuple[UUID, UUID]:
) -> Tuple[str, UUID]:
nodes = self.list_nodes(task_id)
if not nodes:
raise Exception("task is not currently executing on nodes")