cleanup logging in multiple functions (#73)

This commit is contained in:
bmc-msft
2020-10-01 23:31:08 -04:00
committed by GitHub
parent 8b6ea9f66b
commit f72543dde2
6 changed files with 18 additions and 11 deletions

View File

@ -58,7 +58,7 @@ def get_storage_account_name_key(account_id: Optional[str] = None) -> Tuple[str,
@cached(ttl=60) @cached(ttl=60)
def get_blob_service(account_id: Optional[str] = None) -> BlockBlobService: def get_blob_service(account_id: Optional[str] = None) -> BlockBlobService:
logging.info("getting blob container (account_id: %s)", account_id) logging.debug("getting blob container (account_id: %s)", account_id)
name, key = get_storage_account_name_key(account_id) name, key = get_storage_account_name_key(account_id)
service = BlockBlobService(account_name=name, account_key=key) service = BlockBlobService(account_name=name, account_key=key)
return service return service

View File

@ -26,7 +26,7 @@ QueueNameType = Union[str, UUID]
@cached(ttl=60) @cached(ttl=60)
def get_queue_client(account_id: str) -> QueueServiceClient: def get_queue_client(account_id: str) -> QueueServiceClient:
logging.info("getting blob container (account_id: %s)", account_id) logging.debug("getting blob container (account_id: %s)", account_id)
name, key = get_storage_account_name_key(account_id) name, key = get_storage_account_name_key(account_id)
account_url = "https://%s.queue.core.windows.net" % name account_url = "https://%s.queue.core.windows.net" % name
client = QueueServiceClient( client = QueueServiceClient(
@ -36,6 +36,7 @@ def get_queue_client(account_id: str) -> QueueServiceClient:
return client return client
@cached(ttl=60)
def get_queue_sas( def get_queue_sas(
queue: QueueNameType, queue: QueueNameType,
*, *,
@ -45,7 +46,7 @@ def get_queue_sas(
update: bool = False, update: bool = False,
process: bool = False, process: bool = False,
) -> str: ) -> str:
logging.info("getting queue sas %s (account_id: %s)", queue, account_id) logging.debug("getting queue sas %s (account_id: %s)", queue, account_id)
name, key = get_storage_account_name_key(account_id) name, key = get_storage_account_name_key(account_id)
expiry = datetime.datetime.utcnow() + datetime.timedelta(days=30) expiry = datetime.datetime.utcnow() + datetime.timedelta(days=30)

View File

@ -20,7 +20,7 @@ def get_client(
if account_id is None: if account_id is None:
account_id = os.environ["ONEFUZZ_FUNC_STORAGE"] account_id = os.environ["ONEFUZZ_FUNC_STORAGE"]
logging.info("getting table account: (account_id: %s)", account_id) logging.debug("getting table account: (account_id: %s)", account_id)
name, key = get_storage_account_name_key(account_id) name, key = get_storage_account_name_key(account_id)
client = TableService(account_name=name, account_key=key) client = TableService(account_name=name, account_key=key)

View File

@ -21,14 +21,20 @@ HOURS = 60 * 60
def schedule_tasks() -> None: def schedule_tasks() -> None:
to_schedule: Dict[UUID, List[Task]] = {} to_schedule: Dict[UUID, List[Task]] = {}
not_ready_count = 0
for task in Task.search_states(states=[TaskState.waiting]): for task in Task.search_states(states=[TaskState.waiting]):
if not task.ready_to_schedule(): if not task.ready_to_schedule():
not_ready_count += 1
continue continue
if task.job_id not in to_schedule: if task.job_id not in to_schedule:
to_schedule[task.job_id] = [] to_schedule[task.job_id] = []
to_schedule[task.job_id].append(task) to_schedule[task.job_id].append(task)
if not to_schedule and not_ready_count > 0:
logging.info("tasks not ready: %d", not_ready_count)
for tasks in to_schedule.values(): for tasks in to_schedule.values():
# TODO: for now, we're only scheduling one task per VM. # TODO: for now, we're only scheduling one task per VM.

View File

@ -78,8 +78,6 @@ def execute_update(update: Update) -> None:
if update.update_type == UpdateType.Scaleset: if update.update_type == UpdateType.Scaleset:
return return
logging.info("performing queued update: %s", update)
if update.update_type in update_objects: if update.update_type in update_objects:
if update.PartitionKey is None or update.RowKey is None: if update.PartitionKey is None or update.RowKey is None:
raise Exception("unsupported update: %s" % update) raise Exception("unsupported update: %s" % update)
@ -90,6 +88,7 @@ def execute_update(update: Update) -> None:
return return
if update.method and hasattr(obj, update.method): if update.method and hasattr(obj, update.method):
logging.info("performing queued update: %s", update)
getattr(obj, update.method)() getattr(obj, update.method)()
return return
else: else:
@ -99,8 +98,13 @@ def execute_update(update: Update) -> None:
return return
func = getattr(obj, state.name, None) func = getattr(obj, state.name, None)
if func is None: if func is None:
logging.info("no function to implement state: %s", update) logging.debug(
"no function to implement state: %s - %s", update, state.name
)
return return
logging.info(
"performing queued update for state: %s - %s", update, state.name
)
func() func()
return return

View File

@ -3,8 +3,6 @@
# Copyright (c) Microsoft Corporation. # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License. # Licensed under the MIT License.
import logging
import azure.functions as func import azure.functions as func
from ..onefuzzlib.dashboard import get_event from ..onefuzzlib.dashboard import get_event
@ -12,8 +10,6 @@ from ..onefuzzlib.tasks.scheduler import schedule_tasks
def main(mytimer: func.TimerRequest, dashboard: func.Out[str]) -> None: # noqa: F841 def main(mytimer: func.TimerRequest, dashboard: func.Out[str]) -> None: # noqa: F841
logging.info("scheduling waiting tasks")
schedule_tasks() schedule_tasks()
event = get_event() event = get_event()