mirror of
https://github.com/GNS3/gns3-server.git
synced 2025-01-26 22:29:18 +00:00
Improvements for built-in disks
* Checksum is updated in the database for updated disks. * It is not possible to prune them.
This commit is contained in:
parent
b472f6dbf8
commit
4758431c76
@ -27,11 +27,11 @@ from fastapi.encoders import jsonable_encoder
|
||||
from starlette.requests import ClientDisconnect
|
||||
from sqlalchemy.orm.exc import MultipleResultsFound
|
||||
from typing import List, Optional
|
||||
from gns3server import schemas
|
||||
|
||||
from gns3server import schemas
|
||||
from gns3server.config import Config
|
||||
from gns3server.compute.qemu import Qemu
|
||||
from gns3server.utils.images import InvalidImageError, write_image, read_image_info, default_images_directory
|
||||
from gns3server.utils.images import InvalidImageError, write_image, read_image_info, default_images_directory, get_builtin_disks
|
||||
from gns3server.db.repositories.images import ImagesRepository
|
||||
from gns3server.db.repositories.templates import TemplatesRepository
|
||||
from gns3server.db.repositories.rbac import RbacRepository
|
||||
@ -51,7 +51,6 @@ log = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/qemu/{image_path:path}",
|
||||
response_model=schemas.Image,
|
||||
@ -175,6 +174,24 @@ async def upload_image(
|
||||
return image
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/prune",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
dependencies=[Depends(has_privilege("Image.Allocate"))]
|
||||
)
|
||||
async def prune_images(
|
||||
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
||||
) -> None:
|
||||
"""
|
||||
Prune images not attached to any template.
|
||||
|
||||
Required privilege: Image.Allocate
|
||||
"""
|
||||
|
||||
skip_images = get_builtin_disks()
|
||||
await images_repo.prune_images(skip_images)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{image_path:path}",
|
||||
response_model=schemas.Image,
|
||||
@ -218,7 +235,7 @@ async def delete_image(
|
||||
image = await images_repo.get_image(image_path)
|
||||
except MultipleResultsFound:
|
||||
raise ControllerBadRequestError(f"Image '{image_path}' matches multiple images. "
|
||||
f"Please include the relative path of the image")
|
||||
f"Please include the absolute path of the image")
|
||||
|
||||
if not image:
|
||||
raise ControllerNotFoundError(f"Image '{image_path}' not found")
|
||||
@ -236,20 +253,3 @@ async def delete_image(
|
||||
success = await images_repo.delete_image(image_path)
|
||||
if not success:
|
||||
raise ControllerError(f"Image '{image_path}' could not be deleted")
|
||||
|
||||
|
||||
@router.post(
|
||||
"/prune",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
dependencies=[Depends(has_privilege("Image.Allocate"))]
|
||||
)
|
||||
async def prune_images(
|
||||
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
||||
) -> None:
|
||||
"""
|
||||
Prune images not attached to any template.
|
||||
|
||||
Required privilege: Image.Allocate
|
||||
"""
|
||||
|
||||
await images_repo.prune_images()
|
||||
|
@ -18,6 +18,7 @@
|
||||
API routes for templates.
|
||||
"""
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
@ -34,6 +35,8 @@ from gns3server.db.repositories.templates import TemplatesRepository
|
||||
from gns3server.services.templates import TemplatesService
|
||||
from gns3server.db.repositories.rbac import RbacRepository
|
||||
from gns3server.db.repositories.images import ImagesRepository
|
||||
from gns3server.controller.controller_error import ControllerError
|
||||
from gns3server.utils.images import get_builtin_disks
|
||||
|
||||
from .dependencies.authentication import get_current_active_user
|
||||
from .dependencies.rbac import has_privilege
|
||||
@ -132,10 +135,28 @@ async def delete_template(
|
||||
Required privilege: Template.Allocate
|
||||
"""
|
||||
|
||||
images = await templates_repo.get_template_images(template_id)
|
||||
await TemplatesService(templates_repo).delete_template(template_id)
|
||||
await rbac_repo.delete_all_ace_starting_with_path(f"/templates/{template_id}")
|
||||
if prune_images:
|
||||
await images_repo.prune_images()
|
||||
if prune_images and images:
|
||||
skip_images = get_builtin_disks()
|
||||
for image in images:
|
||||
if image.filename in skip_images:
|
||||
continue
|
||||
templates = await images_repo.get_image_templates(image.image_id)
|
||||
if templates:
|
||||
template_names = ", ".join([template.name for template in templates])
|
||||
raise ControllerError(f"Image '{image.path}' is used by one or more templates: {template_names}")
|
||||
|
||||
try:
|
||||
os.remove(image.path)
|
||||
except OSError:
|
||||
log.warning(f"Could not delete image file {image.path}")
|
||||
|
||||
print(f"Deleting image '{image.path}'")
|
||||
success = await images_repo.delete_image(image.path)
|
||||
if not success:
|
||||
raise ControllerError(f"Image '{image.path}' could not removed from the database")
|
||||
|
||||
|
||||
@router.get(
|
||||
|
@ -115,7 +115,7 @@ class Docker(BaseManager):
|
||||
dst_path = self.resources_path()
|
||||
log.info(f"Installing Docker resources in '{dst_path}'")
|
||||
from gns3server.controller import Controller
|
||||
Controller.instance().install_resource_files(dst_path, "compute/docker/resources")
|
||||
await Controller.instance().install_resource_files(dst_path, "compute/docker/resources")
|
||||
await self.install_busybox(dst_path)
|
||||
except OSError as e:
|
||||
raise DockerError(f"Could not install Docker resources to {dst_path}: {e}")
|
||||
|
@ -28,10 +28,10 @@ try:
|
||||
except ImportError:
|
||||
from importlib import resources as importlib_resources
|
||||
|
||||
|
||||
from ..config import Config
|
||||
from ..utils import parse_version, md5sum
|
||||
from ..utils.images import default_images_directory
|
||||
from ..utils.asyncio import wait_run_in_executor
|
||||
|
||||
from .project import Project
|
||||
from .appliance import Appliance
|
||||
@ -43,6 +43,7 @@ from .topology import load_topology
|
||||
from .gns3vm import GNS3VM
|
||||
from .gns3vm.gns3_vm_error import GNS3VMError
|
||||
from .controller_error import ControllerError, ControllerNotFoundError
|
||||
from ..db.tasks import update_disk_checksums
|
||||
from ..version import __version__
|
||||
|
||||
import logging
|
||||
@ -72,8 +73,11 @@ class Controller:
|
||||
async def start(self, computes=None):
|
||||
|
||||
log.info("Controller is starting")
|
||||
self._install_base_configs()
|
||||
self._install_builtin_disks()
|
||||
await self._install_base_configs()
|
||||
installed_disks = await self._install_builtin_disks()
|
||||
if installed_disks:
|
||||
await update_disk_checksums(installed_disks)
|
||||
|
||||
server_config = Config.instance().settings.Server
|
||||
Config.instance().listen_for_config_changes(self._update_config)
|
||||
name = server_config.name
|
||||
@ -86,7 +90,7 @@ class Controller:
|
||||
if host == "0.0.0.0":
|
||||
host = "127.0.0.1"
|
||||
|
||||
self._load_controller_vars()
|
||||
await self._load_controller_vars()
|
||||
|
||||
if server_config.enable_ssl:
|
||||
self._ssl_context = self._create_ssl_context(server_config)
|
||||
@ -190,7 +194,7 @@ class Controller:
|
||||
async def reload(self):
|
||||
|
||||
log.info("Controller is reloading")
|
||||
self._load_controller_vars()
|
||||
await self._load_controller_vars()
|
||||
|
||||
# remove all projects deleted from disk.
|
||||
for project in self._projects.copy().values():
|
||||
@ -234,7 +238,7 @@ class Controller:
|
||||
except OSError as e:
|
||||
log.error(f"Cannot write controller vars file '{self._vars_file}': {e}")
|
||||
|
||||
def _load_controller_vars(self):
|
||||
async def _load_controller_vars(self):
|
||||
"""
|
||||
Reload the controller vars from disk
|
||||
"""
|
||||
@ -274,9 +278,9 @@ class Controller:
|
||||
builtin_appliances_path = self._appliance_manager.builtin_appliances_path()
|
||||
if not previous_version or \
|
||||
parse_version(__version__.split("+")[0]) > parse_version(previous_version.split("+")[0]):
|
||||
self._appliance_manager.install_builtin_appliances()
|
||||
await self._appliance_manager.install_builtin_appliances()
|
||||
elif not os.listdir(builtin_appliances_path):
|
||||
self._appliance_manager.install_builtin_appliances()
|
||||
await self._appliance_manager.install_builtin_appliances()
|
||||
else:
|
||||
log.info(f"Built-in appliances are installed in '{builtin_appliances_path}'")
|
||||
|
||||
@ -307,18 +311,21 @@ class Controller:
|
||||
|
||||
|
||||
@staticmethod
|
||||
def install_resource_files(dst_path, resource_name, upgrade_resources=True):
|
||||
async def install_resource_files(dst_path, resource_name, upgrade_resources=True):
|
||||
"""
|
||||
Install files from resources to user's file system
|
||||
"""
|
||||
|
||||
def should_copy(src, dst, upgrade_resources):
|
||||
installed_resources = []
|
||||
async def should_copy(src, dst, upgrade_resources):
|
||||
if not os.path.exists(dst):
|
||||
return True
|
||||
if upgrade_resources is False:
|
||||
return False
|
||||
# copy the resource if it is different
|
||||
return md5sum(src) != md5sum(dst)
|
||||
src_md5 = await wait_run_in_executor(md5sum, src)
|
||||
dst_md5 = await wait_run_in_executor(md5sum, dst)
|
||||
return src_md5 != dst_md5
|
||||
|
||||
if hasattr(sys, "frozen") and sys.platform.startswith("win"):
|
||||
resource_path = os.path.normpath(os.path.join(os.path.dirname(sys.executable), resource_name))
|
||||
@ -328,14 +335,16 @@ class Controller:
|
||||
else:
|
||||
for entry in importlib_resources.files('gns3server').joinpath(resource_name).iterdir():
|
||||
full_path = os.path.join(dst_path, entry.name)
|
||||
if entry.is_file() and should_copy(str(entry), full_path, upgrade_resources):
|
||||
if entry.is_file() and await should_copy(str(entry), full_path, upgrade_resources):
|
||||
log.debug(f'Installing {resource_name} resource file "{entry.name}" to "{full_path}"')
|
||||
shutil.copy(str(entry), os.path.join(dst_path, entry.name))
|
||||
shutil.copy(str(entry), os.path.join(full_path))
|
||||
installed_resources.append(full_path)
|
||||
elif entry.is_dir():
|
||||
os.makedirs(full_path, exist_ok=True)
|
||||
Controller.install_resource_files(full_path, os.path.join(resource_name, entry.name))
|
||||
await Controller.install_resource_files(full_path, os.path.join(resource_name, entry.name))
|
||||
return installed_resources
|
||||
|
||||
def _install_base_configs(self):
|
||||
async def _install_base_configs(self):
|
||||
"""
|
||||
At startup we copy base configs to the user location to allow
|
||||
them to customize it
|
||||
@ -345,11 +354,11 @@ class Controller:
|
||||
log.info(f"Installing base configs in '{dst_path}'")
|
||||
try:
|
||||
# do not overwrite base configs because they may have been customized by the user
|
||||
Controller.install_resource_files(dst_path, "configs", upgrade_resources=False)
|
||||
await Controller.install_resource_files(dst_path, "configs", upgrade_resources=False)
|
||||
except OSError as e:
|
||||
log.error(f"Could not install base config files to {dst_path}: {e}")
|
||||
|
||||
def _install_builtin_disks(self):
|
||||
async def _install_builtin_disks(self):
|
||||
"""
|
||||
At startup we copy built-in Qemu disks to the user location to allow
|
||||
them to use with appliances
|
||||
@ -358,7 +367,7 @@ class Controller:
|
||||
dst_path = self.disks_path()
|
||||
log.info(f"Installing built-in disks in '{dst_path}'")
|
||||
try:
|
||||
Controller.install_resource_files(dst_path, "disks")
|
||||
return await Controller.install_resource_files(dst_path, "disks")
|
||||
except OSError as e:
|
||||
log.error(f"Could not install disk files to {dst_path}: {e}")
|
||||
|
||||
|
@ -110,7 +110,7 @@ class ApplianceManager:
|
||||
os.makedirs(appliances_dir, exist_ok=True)
|
||||
return appliances_dir
|
||||
|
||||
def install_builtin_appliances(self):
|
||||
async def install_builtin_appliances(self):
|
||||
"""
|
||||
At startup we copy the built-in appliances files.
|
||||
"""
|
||||
@ -119,7 +119,7 @@ class ApplianceManager:
|
||||
log.info(f"Installing built-in appliances in '{dst_path}'")
|
||||
from . import Controller
|
||||
try:
|
||||
Controller.instance().install_resource_files(dst_path, "appliances")
|
||||
await Controller.instance().install_resource_files(dst_path, "appliances")
|
||||
except OSError as e:
|
||||
log.error(f"Could not install built-in appliance files to {dst_path}: {e}")
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
import os
|
||||
|
||||
from typing import Optional, List
|
||||
from sqlalchemy import select, delete
|
||||
from sqlalchemy import select, delete, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from .base import BaseRepository
|
||||
@ -103,6 +103,22 @@ class ImagesRepository(BaseRepository):
|
||||
await self._db_session.refresh(db_image)
|
||||
return db_image
|
||||
|
||||
async def update_image(self, image_path: str, checksum: str, checksum_algorithm: str) -> models.Image:
|
||||
"""
|
||||
Update an image.
|
||||
"""
|
||||
|
||||
query = update(models.Image).\
|
||||
where(models.Image.path == image_path).\
|
||||
values(checksum=checksum, checksum_algorithm=checksum_algorithm)
|
||||
|
||||
await self._db_session.execute(query)
|
||||
await self._db_session.commit()
|
||||
image_db = await self.get_image_by_checksum(checksum)
|
||||
if image_db:
|
||||
await self._db_session.refresh(image_db) # force refresh of updated_at value
|
||||
return image_db
|
||||
|
||||
async def delete_image(self, image_path: str) -> bool:
|
||||
"""
|
||||
Delete an image.
|
||||
@ -119,7 +135,7 @@ class ImagesRepository(BaseRepository):
|
||||
await self._db_session.commit()
|
||||
return result.rowcount > 0
|
||||
|
||||
async def prune_images(self) -> int:
|
||||
async def prune_images(self, skip_images: list[str] = None) -> int:
|
||||
"""
|
||||
Prune images not attached to any template.
|
||||
"""
|
||||
@ -130,12 +146,15 @@ class ImagesRepository(BaseRepository):
|
||||
images = result.scalars().all()
|
||||
images_deleted = 0
|
||||
for image in images:
|
||||
if skip_images and image.filename in skip_images:
|
||||
log.debug(f"Skipping image '{image.path}' for pruning")
|
||||
continue
|
||||
try:
|
||||
log.debug(f"Deleting image '{image.path}'")
|
||||
os.remove(image.path)
|
||||
except OSError:
|
||||
log.warning(f"Could not delete image file {image.path}")
|
||||
if await self.delete_image(image.filename):
|
||||
if await self.delete_image(image.path):
|
||||
images_deleted += 1
|
||||
log.info(f"{images_deleted} image(s) have been deleted")
|
||||
return images_deleted
|
||||
|
@ -170,3 +170,14 @@ class TemplatesRepository(BaseRepository):
|
||||
await self._db_session.commit()
|
||||
await self._db_session.refresh(template_in_db)
|
||||
return template_in_db
|
||||
|
||||
async def get_template_images(self, template_id: UUID) -> int:
|
||||
"""
|
||||
Return all images attached to a template.
|
||||
"""
|
||||
|
||||
query = select(models.Image).\
|
||||
join(models.Image.templates).\
|
||||
filter(models.Template.template_id == template_id)
|
||||
result = await self._db_session.execute(query)
|
||||
return result.scalars().all()
|
||||
|
@ -35,7 +35,8 @@ from watchdog.events import FileSystemEvent, PatternMatchingEventHandler
|
||||
|
||||
from gns3server.db.repositories.computes import ComputesRepository
|
||||
from gns3server.db.repositories.images import ImagesRepository
|
||||
from gns3server.utils.images import discover_images, read_image_info, default_images_directory, InvalidImageError
|
||||
from gns3server.utils.images import md5sum, discover_images, read_image_info, InvalidImageError
|
||||
from gns3server.utils.asyncio import wait_run_in_executor
|
||||
from gns3server import schemas
|
||||
|
||||
from .models import Base
|
||||
@ -130,7 +131,7 @@ async def get_computes(app: FastAPI) -> List[dict]:
|
||||
return computes
|
||||
|
||||
|
||||
async def discover_images_on_filesystem(app: FastAPI):
|
||||
async def discover_images_on_filesystem(app: FastAPI) -> None:
|
||||
|
||||
async with AsyncSession(app.state._db_engine) as db_session:
|
||||
images_repository = ImagesRepository(db_session)
|
||||
@ -155,6 +156,25 @@ async def discover_images_on_filesystem(app: FastAPI):
|
||||
# monitor if images have been manually added
|
||||
asyncio.create_task(monitor_images_on_filesystem(app))
|
||||
|
||||
|
||||
async def update_disk_checksums(updated_disks: List[str]) -> None:
|
||||
"""
|
||||
Update the checksum of a list of disks in the database.
|
||||
|
||||
:param updated_disks: list of updated disks
|
||||
"""
|
||||
|
||||
from gns3server.api.server import app
|
||||
async with AsyncSession(app.state._db_engine) as db_session:
|
||||
images_repository = ImagesRepository(db_session)
|
||||
for path in updated_disks:
|
||||
image = await images_repository.get_image(path)
|
||||
if image:
|
||||
log.info(f"Updating image '{path}' in the database")
|
||||
checksum = await wait_run_in_executor(md5sum, path, cache_to_md5file=False)
|
||||
if image.checksum != checksum:
|
||||
await images_repository.update_image(path, checksum, "md5")
|
||||
|
||||
class EventHandler(PatternMatchingEventHandler):
|
||||
"""
|
||||
Watchdog event handler.
|
||||
@ -227,7 +247,6 @@ async def monitor_images_on_filesystem(app: FastAPI):
|
||||
|
||||
async for filesystem_event in EventIterator(queue):
|
||||
# read the file system event from the queue
|
||||
print(filesystem_event)
|
||||
image_path = filesystem_event.src_path
|
||||
expected_image_type = None
|
||||
if "IOU" in image_path:
|
||||
|
@ -20,6 +20,11 @@ import stat
|
||||
import aiofiles
|
||||
import shutil
|
||||
|
||||
try:
|
||||
import importlib_resources
|
||||
except ImportError:
|
||||
from importlib import resources as importlib_resources
|
||||
|
||||
from typing import List, AsyncGenerator
|
||||
from ..config import Config
|
||||
from . import force_unix_path
|
||||
@ -111,6 +116,14 @@ async def list_images(image_type):
|
||||
return images
|
||||
|
||||
|
||||
def get_builtin_disks() -> List[str]:
|
||||
builtin_disks = []
|
||||
for entry in importlib_resources.files('gns3server').joinpath("disks").iterdir():
|
||||
if entry.is_file():
|
||||
builtin_disks.append(entry.name)
|
||||
return builtin_disks
|
||||
|
||||
|
||||
async def read_image_info(path: str, expected_image_type: str = None) -> dict:
|
||||
|
||||
header_magic_len = 7
|
||||
@ -118,7 +131,7 @@ async def read_image_info(path: str, expected_image_type: str = None) -> dict:
|
||||
async with aiofiles.open(path, "rb") as f:
|
||||
image_header = await f.read(header_magic_len) # read the first 7 bytes of the file
|
||||
if len(image_header) >= header_magic_len:
|
||||
detected_image_type = check_valid_image_header(image_header)
|
||||
detected_image_type = check_valid_image_header(path, image_header)
|
||||
if expected_image_type and detected_image_type != expected_image_type:
|
||||
raise InvalidImageError(f"Detected image type for '{path}' is {detected_image_type}, "
|
||||
f"expected type is {expected_image_type}")
|
||||
@ -302,7 +315,7 @@ class InvalidImageError(Exception):
|
||||
return self._message
|
||||
|
||||
|
||||
def check_valid_image_header(data: bytes, allow_raw_image: bool = False) -> str:
|
||||
def check_valid_image_header(path: str, data: bytes, allow_raw_image: bool = False) -> str:
|
||||
|
||||
if data[:7] == b'\x7fELF\x01\x02\x01':
|
||||
# for IOS images: file must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
||||
@ -317,7 +330,7 @@ def check_valid_image_header(data: bytes, allow_raw_image: bool = False) -> str:
|
||||
else:
|
||||
if allow_raw_image is True:
|
||||
return "qemu"
|
||||
raise InvalidImageError("Could not detect image type, please make sure it is a valid image")
|
||||
raise InvalidImageError(f"{path}: could not detect image type, please make sure it is a valid image")
|
||||
|
||||
|
||||
async def write_image(
|
||||
@ -342,7 +355,7 @@ async def write_image(
|
||||
async for chunk in stream:
|
||||
if check_image_header and len(chunk) >= header_magic_len:
|
||||
check_image_header = False
|
||||
image_type = check_valid_image_header(chunk, allow_raw_image)
|
||||
image_type = check_valid_image_header(image_path, chunk, allow_raw_image)
|
||||
await f.write(chunk)
|
||||
checksum.update(chunk)
|
||||
|
||||
|
@ -28,11 +28,11 @@ pytestmark = pytest.mark.asyncio
|
||||
|
||||
class TestApplianceRoutes:
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _install_builtin_appliances(self, controller: Controller):
|
||||
|
||||
controller.appliance_manager.install_builtin_appliances()
|
||||
controller.appliance_manager.load_appliances()
|
||||
# @pytest.fixture(autouse=True)
|
||||
# def _install_builtin_appliances(self, controller: Controller):
|
||||
#
|
||||
# controller.appliance_manager.install_builtin_appliances()
|
||||
# controller.appliance_manager.load_appliances()
|
||||
|
||||
async def test_appliances_list(self, app: FastAPI, client: AsyncClient) -> None:
|
||||
|
||||
|
@ -261,7 +261,7 @@ class TestImageRoutes:
|
||||
|
||||
async def test_prune_images(self, app: FastAPI, client: AsyncClient, db_session: AsyncSession) -> None:
|
||||
|
||||
response = await client.post(app.url_path_for("prune_images"))
|
||||
response = await client.delete(app.url_path_for("prune_images"))
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
|
||||
images_repo = ImagesRepository(db_session)
|
||||
@ -275,7 +275,7 @@ class TestImageRoutes:
|
||||
controller: Controller
|
||||
) -> None:
|
||||
|
||||
controller.appliance_manager.install_builtin_appliances()
|
||||
await controller.appliance_manager.install_builtin_appliances()
|
||||
controller.appliance_manager.load_appliances() # make sure appliances are loaded
|
||||
image_path = "tests/resources/empty30G.qcow2"
|
||||
image_name = os.path.basename(image_path)
|
||||
|
@ -245,7 +245,8 @@ async def test_start(controller):
|
||||
}
|
||||
|
||||
#with asyncio_patch("gns3server.controller.compute.Compute.connect") as mock:
|
||||
await controller.start()
|
||||
with asyncio_patch("gns3server.controller.Controller._install_builtin_disks", return_value=[]):
|
||||
await controller.start()
|
||||
#assert mock.called
|
||||
assert len(controller.computes) == 1 # Local compute is created
|
||||
assert controller.computes["local"].name == f"{socket.gethostname()} (controller)"
|
||||
@ -266,8 +267,9 @@ async def test_start_vm(controller):
|
||||
with asyncio_patch("gns3server.controller.gns3vm.vmware_gns3_vm.VMwareGNS3VM.start") as mock:
|
||||
with asyncio_patch("gns3server.controller.gns3vm.GNS3VM._check_network"):
|
||||
with asyncio_patch("gns3server.controller.compute.Compute.connect"):
|
||||
await controller.start()
|
||||
assert mock.called
|
||||
with asyncio_patch("gns3server.controller.Controller._install_builtin_disks", return_value=[]):
|
||||
await controller.start()
|
||||
assert mock.called
|
||||
assert "local" in controller.computes
|
||||
assert "vm" in controller.computes
|
||||
assert len(controller.computes) == 2 # Local compute and vm are created
|
||||
@ -356,7 +358,7 @@ async def test_install_base_configs(controller, config, tmpdir):
|
||||
with open(str(tmpdir / 'iou_l2_base_startup-config.txt'), 'w+') as f:
|
||||
f.write('test')
|
||||
|
||||
controller._install_base_configs()
|
||||
await controller._install_base_configs()
|
||||
assert os.path.exists(str(tmpdir / 'iou_l3_base_startup-config.txt'))
|
||||
|
||||
# Check is the file has not been overwritten
|
||||
@ -385,12 +387,13 @@ async def test_install_base_configs(controller, config, tmpdir):
|
||||
async def test_install_builtin_disks(controller, config, tmpdir, builtin_disk):
|
||||
|
||||
config.settings.Server.images_path = str(tmpdir)
|
||||
controller._install_builtin_disks()
|
||||
await controller._install_builtin_disks()
|
||||
# we only install Qemu empty disks at this time
|
||||
assert os.path.exists(str(tmpdir / "QEMU" / builtin_disk))
|
||||
|
||||
|
||||
def test_appliances(controller, config, tmpdir):
|
||||
@pytest.mark.asyncio
|
||||
async def test_appliances(controller, config, tmpdir):
|
||||
|
||||
my_appliance = {
|
||||
"name": "My Appliance",
|
||||
@ -406,7 +409,7 @@ def test_appliances(controller, config, tmpdir):
|
||||
json.dump(my_appliance, f)
|
||||
|
||||
config.settings.Server.appliances_path = str(tmpdir)
|
||||
controller.appliance_manager.install_builtin_appliances()
|
||||
await controller.appliance_manager.install_builtin_appliances()
|
||||
controller.appliance_manager.load_appliances()
|
||||
assert len(controller.appliance_manager.appliances) > 0
|
||||
for appliance in controller.appliance_manager.appliances.values():
|
||||
|
Loading…
x
Reference in New Issue
Block a user