work around issue with discriminated typed unions (#939)

We're experiencing a bug where Unions of sub-models are getting downcast, which causes a loss of information.  

As an example, EventScalesetCreated was getting downcast to EventScalesetDeleted.  I have not figured out why, nor can I replicate it locally to minimize the bug send upstream, but I was able to reliably replicate it on the service.

While working through this issue, I noticed that deserialization of SignalR events was frequently wrong, leaving things like tasks as "init" in `status top`.

Both of these issues are Unions of models with a type field, so it's likely these are related.
This commit is contained in:
bmc-msft
2021-06-02 12:40:58 -04:00
committed by GitHub
parent 3d191c3c5d
commit a92c84d42a
7 changed files with 81 additions and 39 deletions

View File

@ -3,6 +3,7 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import json
import logging
from datetime import datetime
from enum import Enum
@ -15,7 +16,6 @@ from onefuzztypes.events import (
EventFileAdded,
EventJobCreated,
EventJobStopped,
EventMessage,
EventNodeCreated,
EventNodeDeleted,
EventNodeStateUpdated,
@ -26,6 +26,7 @@ from onefuzztypes.events import (
EventTaskStateUpdated,
EventTaskStopped,
EventType,
parse_event_message,
)
from onefuzztypes.models import (
Job,
@ -152,31 +153,43 @@ class TopCache:
self.add_files_set(name, set(files.files))
def add_message(self, message: EventMessage) -> None:
events = {
EventPoolCreated: lambda x: self.pool_created(x),
EventPoolDeleted: lambda x: self.pool_deleted(x),
EventTaskCreated: lambda x: self.task_created(x),
EventTaskStopped: lambda x: self.task_stopped(x),
EventTaskFailed: lambda x: self.task_stopped(x),
EventTaskStateUpdated: lambda x: self.task_state_updated(x),
EventJobCreated: lambda x: self.job_created(x),
EventJobStopped: lambda x: self.job_stopped(x),
EventNodeStateUpdated: lambda x: self.node_state_updated(x),
EventNodeCreated: lambda x: self.node_created(x),
EventNodeDeleted: lambda x: self.node_deleted(x),
EventCrashReported: lambda x: self.file_added(x),
EventFileAdded: lambda x: self.file_added(x),
}
def add_message(self, message_obj: Any) -> None:
message = parse_event_message(message_obj)
for event_cls in events:
if isinstance(message.event, event_cls):
events[event_cls](message.event)
event = message.event
if isinstance(event, EventPoolCreated):
self.pool_created(event)
elif isinstance(event, EventPoolDeleted):
self.pool_deleted(event)
elif isinstance(event, EventTaskCreated):
self.task_created(event)
elif isinstance(event, EventTaskStopped):
self.task_stopped(event)
elif isinstance(event, EventTaskFailed):
self.task_failed(event)
elif isinstance(event, EventTaskStateUpdated):
self.task_state_updated(event)
elif isinstance(event, EventJobCreated):
self.job_created(event)
elif isinstance(event, EventJobStopped):
self.job_stopped(event)
elif isinstance(event, EventNodeStateUpdated):
self.node_state_updated(event)
elif isinstance(event, EventNodeCreated):
self.node_created(event)
elif isinstance(event, EventNodeDeleted):
self.node_deleted(event)
elif isinstance(event, (EventCrashReported, EventFileAdded)):
self.file_added(event)
self.last_update = datetime.now()
messages = [x for x in self.messages][-99:]
messages += [
(datetime.now(), message.event_type, message.event.json(exclude_none=True))
(
datetime.now(),
message.event_type,
json.dumps(message_obj, sort_keys=True),
)
]
self.messages = messages
@ -301,6 +314,10 @@ class TopCache:
if event.task_id in self.tasks:
del self.tasks[event.task_id]
def task_failed(self, event: EventTaskFailed) -> None:
if event.task_id in self.tasks:
del self.tasks[event.task_id]
def render_tasks(self) -> List:
results = []
for task in self.tasks.values():
@ -352,6 +369,11 @@ class TopCache:
if event.job_id in self.jobs:
del self.jobs[event.job_id]
to_remove = [x.task_id for x in self.tasks.values() if x.job_id == event.job_id]
for task_id in to_remove:
del self.tasks[task_id]
def render_jobs(self) -> List[Tuple]:
results: List[Tuple] = []

View File

@ -9,8 +9,6 @@ from queue import PriorityQueue
from threading import Thread
from typing import Any, Optional
from onefuzztypes.events import EventMessage
from .cache import JobFilter, TopCache
from .signalr import Stream
from .top_view import render
@ -50,8 +48,7 @@ class Top:
def handler(self, message: Any) -> None:
for event_raw in message:
message = EventMessage.parse_obj(event_raw)
self.cache.add_message(message)
self.cache.add_message(event_raw)
def setup(self) -> Stream:
client = Stream(self.onefuzz, self.logger)