mirror of
https://github.com/GNS3/gns3-server.git
synced 2025-04-25 05:19:43 +00:00
Merge pull request #2053 from GNS3/image-md5sum-management
New images detection and checksum handling
This commit is contained in:
commit
2ee9232ac5
@ -506,7 +506,7 @@ class BaseManager:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return list_images(self._NODE_TYPE)
|
return await list_images(self._NODE_TYPE)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise ComputeError(f"Can not list images {e}")
|
raise ComputeError(f"Can not list images {e}")
|
||||||
|
|
||||||
|
@ -1057,10 +1057,10 @@ class QemuVM(BaseNode):
|
|||||||
# In case user upload image manually we don't have md5 sums.
|
# In case user upload image manually we don't have md5 sums.
|
||||||
# We need generate hashes at this point, otherwise they will be generated
|
# We need generate hashes at this point, otherwise they will be generated
|
||||||
# at asdict but not on separate thread.
|
# at asdict but not on separate thread.
|
||||||
await cancellable_wait_run_in_executor(md5sum, self._hda_disk_image)
|
await cancellable_wait_run_in_executor(md5sum, self._hda_disk_image, self.working_dir)
|
||||||
await cancellable_wait_run_in_executor(md5sum, self._hdb_disk_image)
|
await cancellable_wait_run_in_executor(md5sum, self._hdb_disk_image, self.working_dir)
|
||||||
await cancellable_wait_run_in_executor(md5sum, self._hdc_disk_image)
|
await cancellable_wait_run_in_executor(md5sum, self._hdc_disk_image, self.working_dir)
|
||||||
await cancellable_wait_run_in_executor(md5sum, self._hdd_disk_image)
|
await cancellable_wait_run_in_executor(md5sum, self._hdd_disk_image, self.working_dir)
|
||||||
|
|
||||||
super().create()
|
super().create()
|
||||||
|
|
||||||
|
@ -153,8 +153,14 @@ class ApplianceManager:
|
|||||||
version_images[appliance_key] = image_in_db.filename
|
version_images[appliance_key] = image_in_db.filename
|
||||||
else:
|
else:
|
||||||
# check if the image is on disk
|
# check if the image is on disk
|
||||||
|
# FIXME: still necessary? the image should have been discovered and saved in the db already
|
||||||
image_path = os.path.join(image_dir, appliance_file)
|
image_path = os.path.join(image_dir, appliance_file)
|
||||||
if os.path.exists(image_path) and await wait_run_in_executor(md5sum, image_path) == image_checksum:
|
if os.path.exists(image_path) and \
|
||||||
|
await wait_run_in_executor(
|
||||||
|
md5sum,
|
||||||
|
image_path,
|
||||||
|
cache_to_md5file=False
|
||||||
|
) == image_checksum:
|
||||||
async with aiofiles.open(image_path, "rb") as f:
|
async with aiofiles.open(image_path, "rb") as f:
|
||||||
await write_image(appliance_file, image_path, f, images_repo)
|
await write_image(appliance_file, image_path, f, images_repo)
|
||||||
else:
|
else:
|
||||||
|
@ -630,9 +630,6 @@ class Compute:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if type in ["qemu", "dynamips", "iou"]:
|
if type in ["qemu", "dynamips", "iou"]:
|
||||||
# for local_image in list_images(type):
|
|
||||||
# if local_image['filename'] not in [i['filename'] for i in images]:
|
|
||||||
# images.append(local_image)
|
|
||||||
images = sorted(images, key=itemgetter("filename"))
|
images = sorted(images, key=itemgetter("filename"))
|
||||||
else:
|
else:
|
||||||
images = sorted(images, key=itemgetter("image"))
|
images = sorted(images, key=itemgetter("image"))
|
||||||
|
@ -24,7 +24,8 @@ from gns3server.controller import Controller
|
|||||||
from gns3server.compute import MODULES
|
from gns3server.compute import MODULES
|
||||||
from gns3server.compute.port_manager import PortManager
|
from gns3server.compute.port_manager import PortManager
|
||||||
from gns3server.utils.http_client import HTTPClient
|
from gns3server.utils.http_client import HTTPClient
|
||||||
from gns3server.db.tasks import connect_to_db, disconnect_from_db, get_computes
|
from gns3server.db.tasks import connect_to_db, get_computes, disconnect_from_db, discover_images_on_filesystem
|
||||||
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -59,7 +60,9 @@ def create_startup_handler(app: FastAPI) -> Callable:
|
|||||||
# computing with server start
|
# computing with server start
|
||||||
from gns3server.compute.qemu import Qemu
|
from gns3server.compute.qemu import Qemu
|
||||||
|
|
||||||
asyncio.ensure_future(Qemu.instance().list_images())
|
# Start the discovering new images on file system 5 seconds after the server has started
|
||||||
|
# to give it a chance to process API requests
|
||||||
|
loop.call_later(5, asyncio.create_task, discover_images_on_filesystem(app))
|
||||||
|
|
||||||
for module in MODULES:
|
for module in MODULES:
|
||||||
log.debug(f"Loading module {module.__name__}")
|
log.debug(f"Loading module {module.__name__}")
|
||||||
|
@ -15,11 +15,13 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import signal
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
from fastapi.encoders import jsonable_encoder
|
|
||||||
from pydantic import ValidationError
|
from pydantic import ValidationError
|
||||||
|
from watchfiles import awatch, Change
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
from sqlalchemy import event
|
from sqlalchemy import event
|
||||||
@ -27,6 +29,8 @@ from sqlalchemy.engine import Engine
|
|||||||
from sqlalchemy.exc import SQLAlchemyError
|
from sqlalchemy.exc import SQLAlchemyError
|
||||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
|
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
|
||||||
from gns3server.db.repositories.computes import ComputesRepository
|
from gns3server.db.repositories.computes import ComputesRepository
|
||||||
|
from gns3server.db.repositories.images import ImagesRepository
|
||||||
|
from gns3server.utils.images import discover_images, check_valid_image_header, read_image_info, InvalidImageError
|
||||||
from gns3server import schemas
|
from gns3server import schemas
|
||||||
|
|
||||||
from .models import Base
|
from .models import Base
|
||||||
@ -82,3 +86,94 @@ async def get_computes(app: FastAPI) -> List[dict]:
|
|||||||
continue
|
continue
|
||||||
computes.append(compute)
|
computes.append(compute)
|
||||||
return computes
|
return computes
|
||||||
|
|
||||||
|
|
||||||
|
def image_filter(change: Change, path: str) -> bool:
|
||||||
|
|
||||||
|
if change == Change.added:
|
||||||
|
header_magic_len = 7
|
||||||
|
with open(path, "rb") as f:
|
||||||
|
image_header = f.read(header_magic_len) # read the first 7 bytes of the file
|
||||||
|
if len(image_header) >= header_magic_len:
|
||||||
|
try:
|
||||||
|
check_valid_image_header(image_header)
|
||||||
|
except InvalidImageError as e:
|
||||||
|
log.debug(f"New image '{path}' added: {e}")
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
log.debug(f"New image '{path}' added: size is too small to be valid")
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
# FIXME: should we support image deletion?
|
||||||
|
# elif change == Change.deleted:
|
||||||
|
# return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
async def monitor_images_on_filesystem(app: FastAPI):
|
||||||
|
|
||||||
|
server_config = Config.instance().settings.Server
|
||||||
|
images_dir = os.path.expanduser(server_config.images_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
async for changes in awatch(
|
||||||
|
images_dir,
|
||||||
|
watch_filter=image_filter,
|
||||||
|
raise_interrupt=True
|
||||||
|
):
|
||||||
|
async with AsyncSession(app.state._db_engine) as db_session:
|
||||||
|
images_repository = ImagesRepository(db_session)
|
||||||
|
for change in changes:
|
||||||
|
change_type, image_path = change
|
||||||
|
if change_type == Change.added:
|
||||||
|
try:
|
||||||
|
image = await read_image_info(image_path)
|
||||||
|
except InvalidImageError as e:
|
||||||
|
log.warning(str(e))
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
if await images_repository.get_image(image_path):
|
||||||
|
continue
|
||||||
|
await images_repository.add_image(**image)
|
||||||
|
log.info(f"Discovered image '{image_path}' has been added to the database")
|
||||||
|
except SQLAlchemyError as e:
|
||||||
|
log.warning(f"Error while adding image '{image_path}' to the database: {e}")
|
||||||
|
# if change_type == Change.deleted:
|
||||||
|
# try:
|
||||||
|
# if await images_repository.get_image(image_path):
|
||||||
|
# success = await images_repository.delete_image(image_path)
|
||||||
|
# if not success:
|
||||||
|
# log.warning(f"Could not delete image '{image_path}' from the database")
|
||||||
|
# else:
|
||||||
|
# log.info(f"Image '{image_path}' has been deleted from the database")
|
||||||
|
# except SQLAlchemyError as e:
|
||||||
|
# log.warning(f"Error while deleting image '{image_path}' from the database: {e}")
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
# send SIGTERM to the server PID so uvicorn can shutdown the process
|
||||||
|
os.kill(os.getpid(), signal.SIGTERM)
|
||||||
|
|
||||||
|
|
||||||
|
async def discover_images_on_filesystem(app: FastAPI):
|
||||||
|
|
||||||
|
async with AsyncSession(app.state._db_engine) as db_session:
|
||||||
|
images_repository = ImagesRepository(db_session)
|
||||||
|
db_images = await images_repository.get_images()
|
||||||
|
existing_image_paths = []
|
||||||
|
for db_image in db_images:
|
||||||
|
try:
|
||||||
|
image = schemas.Image.from_orm(db_image)
|
||||||
|
existing_image_paths.append(image.path)
|
||||||
|
except ValidationError as e:
|
||||||
|
log.error(f"Could not load image '{db_image.filename}' from database: {e}")
|
||||||
|
continue
|
||||||
|
for image_type in ("qemu", "ios", "iou"):
|
||||||
|
discovered_images = await discover_images(image_type, existing_image_paths)
|
||||||
|
for image in discovered_images:
|
||||||
|
log.info(f"Adding discovered image '{image['path']}' to the database")
|
||||||
|
try:
|
||||||
|
await images_repository.add_image(**image)
|
||||||
|
except SQLAlchemyError as e:
|
||||||
|
log.warning(f"Error while adding image '{image['path']}' to the database: {e}")
|
||||||
|
|
||||||
|
# monitor if images have been manually added
|
||||||
|
asyncio.create_task(monitor_images_on_filesystem(app))
|
||||||
|
@ -20,19 +20,20 @@ import stat
|
|||||||
import aiofiles
|
import aiofiles
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from typing import AsyncGenerator
|
from typing import List, AsyncGenerator
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
from . import force_unix_path
|
from . import force_unix_path
|
||||||
|
|
||||||
import gns3server.db.models as models
|
import gns3server.db.models as models
|
||||||
from gns3server.db.repositories.images import ImagesRepository
|
from gns3server.db.repositories.images import ImagesRepository
|
||||||
|
from gns3server.utils.asyncio import wait_run_in_executor
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def list_images(image_type):
|
async def list_images(image_type):
|
||||||
"""
|
"""
|
||||||
Scan directories for available image for a given type.
|
Scan directories for available image for a given type.
|
||||||
|
|
||||||
@ -59,7 +60,6 @@ def list_images(image_type):
|
|||||||
directory = os.path.normpath(directory)
|
directory = os.path.normpath(directory)
|
||||||
for root, _, filenames in _os_walk(directory, recurse=recurse):
|
for root, _, filenames in _os_walk(directory, recurse=recurse):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
path = os.path.join(root, filename)
|
|
||||||
if filename not in files:
|
if filename not in files:
|
||||||
if filename.endswith(".md5sum") or filename.startswith("."):
|
if filename.endswith(".md5sum") or filename.startswith("."):
|
||||||
continue
|
continue
|
||||||
@ -92,7 +92,7 @@ def list_images(image_type):
|
|||||||
{
|
{
|
||||||
"filename": filename,
|
"filename": filename,
|
||||||
"path": force_unix_path(path),
|
"path": force_unix_path(path),
|
||||||
"md5sum": md5sum(os.path.join(root, filename)),
|
"md5sum": await wait_run_in_executor(md5sum, os.path.join(root, filename)),
|
||||||
"filesize": os.stat(os.path.join(root, filename)).st_size,
|
"filesize": os.stat(os.path.join(root, filename)).st_size,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -101,6 +101,59 @@ def list_images(image_type):
|
|||||||
return images
|
return images
|
||||||
|
|
||||||
|
|
||||||
|
async def read_image_info(path: str, expected_image_type: str = None) -> dict:
|
||||||
|
|
||||||
|
header_magic_len = 7
|
||||||
|
try:
|
||||||
|
async with aiofiles.open(path, "rb") as f:
|
||||||
|
image_header = await f.read(header_magic_len) # read the first 7 bytes of the file
|
||||||
|
if len(image_header) >= header_magic_len:
|
||||||
|
detected_image_type = check_valid_image_header(image_header)
|
||||||
|
if expected_image_type and detected_image_type != expected_image_type:
|
||||||
|
raise InvalidImageError(f"Detected image type for '{path}' is {detected_image_type}, "
|
||||||
|
f"expected type is {expected_image_type}")
|
||||||
|
else:
|
||||||
|
raise InvalidImageError(f"Image '{path}' is too small to be valid")
|
||||||
|
except OSError as e:
|
||||||
|
raise InvalidImageError(f"Cannot read image '{path}': {e}")
|
||||||
|
|
||||||
|
image_info = {
|
||||||
|
"image_name": os.path.basename(path),
|
||||||
|
"image_type": detected_image_type,
|
||||||
|
"image_size": os.stat(path).st_size,
|
||||||
|
"path": path,
|
||||||
|
"checksum": await wait_run_in_executor(md5sum, path, cache_to_md5file=False),
|
||||||
|
"checksum_algorithm": "md5",
|
||||||
|
}
|
||||||
|
return image_info
|
||||||
|
|
||||||
|
|
||||||
|
async def discover_images(image_type: str, skip_image_paths: list = None) -> List[dict]:
|
||||||
|
"""
|
||||||
|
Scan directories for available images
|
||||||
|
"""
|
||||||
|
|
||||||
|
files = set()
|
||||||
|
images = []
|
||||||
|
|
||||||
|
for directory in images_directories(image_type):
|
||||||
|
for root, _, filenames in os.walk(os.path.normpath(directory)):
|
||||||
|
for filename in filenames:
|
||||||
|
if filename.endswith(".md5sum") or filename.startswith("."):
|
||||||
|
continue
|
||||||
|
path = os.path.join(root, filename)
|
||||||
|
if not os.path.isfile(path) or skip_image_paths and path in skip_image_paths or path in files:
|
||||||
|
continue
|
||||||
|
files.add(path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
images.append(await read_image_info(path, image_type))
|
||||||
|
except InvalidImageError as e:
|
||||||
|
log.debug(str(e))
|
||||||
|
continue
|
||||||
|
return images
|
||||||
|
|
||||||
|
|
||||||
def _os_walk(directory, recurse=True, **kwargs):
|
def _os_walk(directory, recurse=True, **kwargs):
|
||||||
"""
|
"""
|
||||||
Work like os.walk but if recurse is False just list current directory
|
Work like os.walk but if recurse is False just list current directory
|
||||||
@ -133,18 +186,18 @@ def default_images_directory(image_type):
|
|||||||
raise NotImplementedError(f"%s node type is not supported", image_type)
|
raise NotImplementedError(f"%s node type is not supported", image_type)
|
||||||
|
|
||||||
|
|
||||||
def images_directories(type):
|
def images_directories(image_type):
|
||||||
"""
|
"""
|
||||||
Return all directories where we will look for images
|
Return all directories where we will look for images
|
||||||
by priority
|
by priority
|
||||||
|
|
||||||
:param type: Type of emulator
|
:param image_type: Type of emulator
|
||||||
"""
|
"""
|
||||||
|
|
||||||
server_config = Config.instance().settings.Server
|
server_config = Config.instance().settings.Server
|
||||||
paths = []
|
paths = []
|
||||||
img_dir = os.path.expanduser(server_config.images_path)
|
img_dir = os.path.expanduser(server_config.images_path)
|
||||||
type_img_directory = default_images_directory(type)
|
type_img_directory = default_images_directory(image_type)
|
||||||
try:
|
try:
|
||||||
os.makedirs(type_img_directory, exist_ok=True)
|
os.makedirs(type_img_directory, exist_ok=True)
|
||||||
paths.append(type_img_directory)
|
paths.append(type_img_directory)
|
||||||
@ -158,7 +211,7 @@ def images_directories(type):
|
|||||||
return [force_unix_path(p) for p in paths if os.path.exists(p)]
|
return [force_unix_path(p) for p in paths if os.path.exists(p)]
|
||||||
|
|
||||||
|
|
||||||
def md5sum(path, working_dir=None, stopped_event=None):
|
def md5sum(path, working_dir=None, stopped_event=None, cache_to_md5file=True):
|
||||||
"""
|
"""
|
||||||
Return the md5sum of an image and cache it on disk
|
Return the md5sum of an image and cache it on disk
|
||||||
|
|
||||||
@ -193,7 +246,7 @@ def md5sum(path, working_dir=None, stopped_event=None):
|
|||||||
if stopped_event is not None and stopped_event.is_set():
|
if stopped_event is not None and stopped_event.is_set():
|
||||||
log.error(f"MD5 sum calculation of `{path}` has stopped due to cancellation")
|
log.error(f"MD5 sum calculation of `{path}` has stopped due to cancellation")
|
||||||
return
|
return
|
||||||
buf = f.read(128)
|
buf = f.read(1024)
|
||||||
if not buf:
|
if not buf:
|
||||||
break
|
break
|
||||||
m.update(buf)
|
m.update(buf)
|
||||||
@ -202,6 +255,7 @@ def md5sum(path, working_dir=None, stopped_event=None):
|
|||||||
log.error("Can't create digest of %s: %s", path, str(e))
|
log.error("Can't create digest of %s: %s", path, str(e))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
if cache_to_md5file:
|
||||||
try:
|
try:
|
||||||
with open(md5sum_file, "w+") as f:
|
with open(md5sum_file, "w+") as f:
|
||||||
f.write(digest)
|
f.write(digest)
|
||||||
@ -237,10 +291,11 @@ def check_valid_image_header(data: bytes) -> str:
|
|||||||
# for IOS images: file must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
# for IOS images: file must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
||||||
return "ios"
|
return "ios"
|
||||||
elif data[:7] == b'\x7fELF\x01\x01\x01' or data[:7] == b'\x7fELF\x02\x01\x01':
|
elif data[:7] == b'\x7fELF\x01\x01\x01' or data[:7] == b'\x7fELF\x02\x01\x01':
|
||||||
# for IOU images file must start with the ELF magic number, be 32-bit or 64-bit, little endian and
|
# for IOU images: file must start with the ELF magic number, be 32-bit or 64-bit, little endian and
|
||||||
# have an ELF version of 1 (normal IOS images are big endian!)
|
# have an ELF version of 1 (normal IOS images are big endian!)
|
||||||
return "iou"
|
return "iou"
|
||||||
elif data[:4] != b'QFI\xfb' or data[:4] != b'KDMV':
|
elif data[:4] == b'QFI\xfb' or data[:4] == b'KDMV':
|
||||||
|
# for Qemy images: file must be QCOW2 or VMDK
|
||||||
return "qemu"
|
return "qemu"
|
||||||
else:
|
else:
|
||||||
raise InvalidImageError("Could not detect image type, please make sure it is a valid image")
|
raise InvalidImageError("Could not detect image type, please make sure it is a valid image")
|
||||||
|
@ -15,4 +15,5 @@ aiosqlite===0.17.0
|
|||||||
passlib[bcrypt]==1.7.4
|
passlib[bcrypt]==1.7.4
|
||||||
python-jose==3.3.0
|
python-jose==3.3.0
|
||||||
email-validator==1.1.3
|
email-validator==1.1.3
|
||||||
|
watchfiles==0.13
|
||||||
setuptools==60.6.0 # don't upgrade because of https://github.com/pypa/setuptools/issues/3084
|
setuptools==60.6.0 # don't upgrade because of https://github.com/pypa/setuptools/issues/3084
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
|
import pytest
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
|
||||||
@ -110,7 +111,8 @@ def test_remove_checksum(tmpdir):
|
|||||||
remove_checksum(str(tmpdir / 'not_exists'))
|
remove_checksum(str(tmpdir / 'not_exists'))
|
||||||
|
|
||||||
|
|
||||||
def test_list_images(tmpdir, config):
|
@pytest.mark.asyncio
|
||||||
|
async def test_list_images(tmpdir, config):
|
||||||
|
|
||||||
path1 = tmpdir / "images1" / "IOS" / "test1.image"
|
path1 = tmpdir / "images1" / "IOS" / "test1.image"
|
||||||
path1.write(b'\x7fELF\x01\x02\x01', ensure=True)
|
path1.write(b'\x7fELF\x01\x02\x01', ensure=True)
|
||||||
@ -140,7 +142,7 @@ def test_list_images(tmpdir, config):
|
|||||||
config.settings.Server.images_path = str(tmpdir / "images1")
|
config.settings.Server.images_path = str(tmpdir / "images1")
|
||||||
config.settings.Server.additional_images_paths = "/tmp/null24564;" + str(tmpdir / "images2")
|
config.settings.Server.additional_images_paths = "/tmp/null24564;" + str(tmpdir / "images2")
|
||||||
|
|
||||||
assert list_images("dynamips") == [
|
assert await list_images("dynamips") == [
|
||||||
{
|
{
|
||||||
'filename': 'test1.image',
|
'filename': 'test1.image',
|
||||||
'filesize': 7,
|
'filesize': 7,
|
||||||
@ -156,7 +158,7 @@ def test_list_images(tmpdir, config):
|
|||||||
]
|
]
|
||||||
|
|
||||||
if sys.platform.startswith("linux"):
|
if sys.platform.startswith("linux"):
|
||||||
assert list_images("iou") == [
|
assert await list_images("iou") == [
|
||||||
{
|
{
|
||||||
'filename': 'test3.bin',
|
'filename': 'test3.bin',
|
||||||
'filesize': 7,
|
'filesize': 7,
|
||||||
@ -165,7 +167,7 @@ def test_list_images(tmpdir, config):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
assert list_images("qemu") == [
|
assert await list_images("qemu") == [
|
||||||
{
|
{
|
||||||
'filename': 'test4.qcow2',
|
'filename': 'test4.qcow2',
|
||||||
'filesize': 1,
|
'filesize': 1,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user