mirror of
https://github.com/GNS3/gns3-server.git
synced 2025-06-24 09:46:42 +00:00
Compare commits
34 Commits
Author | SHA1 | Date | |
---|---|---|---|
469bd692a0 | |||
d34c3a0ac7 | |||
fb3409c1a1 | |||
08693871ae | |||
f5ea20347e | |||
ac4c5f5985 | |||
e17a79e261 | |||
4758431c76 | |||
b472f6dbf8 | |||
b39c7541fb | |||
82779d816f | |||
96c6805ace | |||
efb84b3063 | |||
0be45c7da2 | |||
a4222b4d03 | |||
5f75fc7573 | |||
5269d4386c | |||
9d6cea665a | |||
8b57fbaa0a | |||
d4a9a21af9 | |||
8a900588ca | |||
8ed1fa6ad5 | |||
a689a55937 | |||
1c0b0ae423 | |||
ad7813d04b | |||
685bf88005 | |||
a1090a3da8 | |||
c8e5b41f39 | |||
94b5d3e636 | |||
e94b55e3bf | |||
1a6a729736 | |||
77c356c82c | |||
b4819b5500 | |||
11a9451098 |
2
.github/workflows/testing.yml
vendored
2
.github/workflows/testing.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
os: ["ubuntu-latest"]
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||
#include:
|
||||
# only test with Python 3.10 on Windows
|
||||
# - os: windows-latest
|
||||
|
11
CHANGELOG
11
CHANGELOG
@ -1,5 +1,16 @@
|
||||
# Change Log
|
||||
|
||||
## 3.0.2 03/01/2025
|
||||
|
||||
* Bundle web-ui v3.0.2
|
||||
* Support to create templates based on image checksums.
|
||||
* Improvements for installing built-in disks.
|
||||
* Use watchdog instead of watchfiles to monitor for new images on the file system
|
||||
* Drop Python 3.8
|
||||
* Replace python-jose library by joserfc
|
||||
* Upgrade dependencies
|
||||
* Remove blocking IOU phone home call.
|
||||
|
||||
## 3.0.1 27/12/2024
|
||||
|
||||
* Bundle web-ui v3.0.1
|
||||
|
@ -27,11 +27,11 @@ from fastapi.encoders import jsonable_encoder
|
||||
from starlette.requests import ClientDisconnect
|
||||
from sqlalchemy.orm.exc import MultipleResultsFound
|
||||
from typing import List, Optional
|
||||
from gns3server import schemas
|
||||
|
||||
from gns3server import schemas
|
||||
from gns3server.config import Config
|
||||
from gns3server.compute.qemu import Qemu
|
||||
from gns3server.utils.images import InvalidImageError, write_image, read_image_info, default_images_directory
|
||||
from gns3server.utils.images import InvalidImageError, write_image, read_image_info, default_images_directory, get_builtin_disks
|
||||
from gns3server.db.repositories.images import ImagesRepository
|
||||
from gns3server.db.repositories.templates import TemplatesRepository
|
||||
from gns3server.db.repositories.rbac import RbacRepository
|
||||
@ -51,7 +51,6 @@ log = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/qemu/{image_path:path}",
|
||||
response_model=schemas.Image,
|
||||
@ -175,6 +174,61 @@ async def upload_image(
|
||||
return image
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/prune",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
dependencies=[Depends(has_privilege("Image.Allocate"))]
|
||||
)
|
||||
async def prune_images(
|
||||
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
||||
) -> None:
|
||||
"""
|
||||
Prune images not attached to any template.
|
||||
|
||||
Required privilege: Image.Allocate
|
||||
"""
|
||||
|
||||
skip_images = get_builtin_disks()
|
||||
await images_repo.prune_images(skip_images)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/install",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
dependencies=[Depends(has_privilege("Image.Allocate"))]
|
||||
)
|
||||
async def install_images(
|
||||
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
||||
templates_repo: TemplatesRepository = Depends(get_repository(TemplatesRepository))
|
||||
) -> None:
|
||||
"""
|
||||
Attempt to automatically create templates based on image checksums.
|
||||
|
||||
Required privilege: Image.Allocate
|
||||
"""
|
||||
|
||||
skip_images = get_builtin_disks()
|
||||
images = await images_repo.get_images()
|
||||
for image in images:
|
||||
if skip_images and image.filename in skip_images:
|
||||
log.debug(f"Skipping image '{image.path}' for image installation")
|
||||
continue
|
||||
templates = await images_repo.get_image_templates(image.image_id)
|
||||
if templates:
|
||||
# the image is already used by a template
|
||||
log.warning(f"Image '{image.path}' is used by one or more templates")
|
||||
continue
|
||||
await Controller.instance().appliance_manager.install_appliances_from_image(
|
||||
image.path,
|
||||
image.checksum,
|
||||
images_repo,
|
||||
templates_repo,
|
||||
None,
|
||||
None,
|
||||
os.path.dirname(image.path)
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/{image_path:path}",
|
||||
response_model=schemas.Image,
|
||||
@ -218,7 +272,7 @@ async def delete_image(
|
||||
image = await images_repo.get_image(image_path)
|
||||
except MultipleResultsFound:
|
||||
raise ControllerBadRequestError(f"Image '{image_path}' matches multiple images. "
|
||||
f"Please include the relative path of the image")
|
||||
f"Please include the absolute path of the image")
|
||||
|
||||
if not image:
|
||||
raise ControllerNotFoundError(f"Image '{image_path}' not found")
|
||||
@ -236,20 +290,3 @@ async def delete_image(
|
||||
success = await images_repo.delete_image(image_path)
|
||||
if not success:
|
||||
raise ControllerError(f"Image '{image_path}' could not be deleted")
|
||||
|
||||
|
||||
@router.post(
|
||||
"/prune",
|
||||
status_code=status.HTTP_204_NO_CONTENT,
|
||||
dependencies=[Depends(has_privilege("Image.Allocate"))]
|
||||
)
|
||||
async def prune_images(
|
||||
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
||||
) -> None:
|
||||
"""
|
||||
Prune images not attached to any template.
|
||||
|
||||
Required privilege: Image.Allocate
|
||||
"""
|
||||
|
||||
await images_repo.prune_images()
|
||||
|
@ -18,6 +18,7 @@
|
||||
API routes for templates.
|
||||
"""
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
@ -34,6 +35,8 @@ from gns3server.db.repositories.templates import TemplatesRepository
|
||||
from gns3server.services.templates import TemplatesService
|
||||
from gns3server.db.repositories.rbac import RbacRepository
|
||||
from gns3server.db.repositories.images import ImagesRepository
|
||||
from gns3server.controller.controller_error import ControllerError
|
||||
from gns3server.utils.images import get_builtin_disks
|
||||
|
||||
from .dependencies.authentication import get_current_active_user
|
||||
from .dependencies.rbac import has_privilege
|
||||
@ -132,10 +135,28 @@ async def delete_template(
|
||||
Required privilege: Template.Allocate
|
||||
"""
|
||||
|
||||
images = await templates_repo.get_template_images(template_id)
|
||||
await TemplatesService(templates_repo).delete_template(template_id)
|
||||
await rbac_repo.delete_all_ace_starting_with_path(f"/templates/{template_id}")
|
||||
if prune_images:
|
||||
await images_repo.prune_images()
|
||||
if prune_images and images:
|
||||
skip_images = get_builtin_disks()
|
||||
for image in images:
|
||||
if image.filename in skip_images:
|
||||
continue
|
||||
templates = await images_repo.get_image_templates(image.image_id)
|
||||
if templates:
|
||||
template_names = ", ".join([template.name for template in templates])
|
||||
raise ControllerError(f"Image '{image.path}' is used by one or more templates: {template_names}")
|
||||
|
||||
try:
|
||||
os.remove(image.path)
|
||||
except OSError:
|
||||
log.warning(f"Could not delete image file {image.path}")
|
||||
|
||||
print(f"Deleting image '{image.path}'")
|
||||
success = await images_repo.delete_image(image.path)
|
||||
if not success:
|
||||
raise ControllerError(f"Image '{image.path}' could not removed from the database")
|
||||
|
||||
|
||||
@router.get(
|
||||
|
@ -115,7 +115,7 @@ class Docker(BaseManager):
|
||||
dst_path = self.resources_path()
|
||||
log.info(f"Installing Docker resources in '{dst_path}'")
|
||||
from gns3server.controller import Controller
|
||||
Controller.instance().install_resource_files(dst_path, "compute/docker/resources")
|
||||
await Controller.instance().install_resource_files(dst_path, "compute/docker/resources")
|
||||
await self.install_busybox(dst_path)
|
||||
except OSError as e:
|
||||
raise DockerError(f"Could not install Docker resources to {dst_path}: {e}")
|
||||
|
@ -28,10 +28,10 @@ try:
|
||||
except ImportError:
|
||||
from importlib import resources as importlib_resources
|
||||
|
||||
|
||||
from ..config import Config
|
||||
from ..utils import parse_version, md5sum
|
||||
from ..utils.images import default_images_directory
|
||||
from ..utils.asyncio import wait_run_in_executor
|
||||
|
||||
from .project import Project
|
||||
from .appliance import Appliance
|
||||
@ -43,6 +43,7 @@ from .topology import load_topology
|
||||
from .gns3vm import GNS3VM
|
||||
from .gns3vm.gns3_vm_error import GNS3VMError
|
||||
from .controller_error import ControllerError, ControllerNotFoundError
|
||||
from ..db.tasks import update_disk_checksums
|
||||
from ..version import __version__
|
||||
|
||||
import logging
|
||||
@ -72,8 +73,11 @@ class Controller:
|
||||
async def start(self, computes=None):
|
||||
|
||||
log.info("Controller is starting")
|
||||
self._install_base_configs()
|
||||
self._install_builtin_disks()
|
||||
await self._install_base_configs()
|
||||
installed_disks = await self._install_builtin_disks()
|
||||
if installed_disks:
|
||||
await update_disk_checksums(installed_disks)
|
||||
|
||||
server_config = Config.instance().settings.Server
|
||||
Config.instance().listen_for_config_changes(self._update_config)
|
||||
name = server_config.name
|
||||
@ -86,7 +90,7 @@ class Controller:
|
||||
if host == "0.0.0.0":
|
||||
host = "127.0.0.1"
|
||||
|
||||
self._load_controller_vars()
|
||||
await self._load_controller_vars()
|
||||
|
||||
if server_config.enable_ssl:
|
||||
self._ssl_context = self._create_ssl_context(server_config)
|
||||
@ -190,7 +194,7 @@ class Controller:
|
||||
async def reload(self):
|
||||
|
||||
log.info("Controller is reloading")
|
||||
self._load_controller_vars()
|
||||
await self._load_controller_vars()
|
||||
|
||||
# remove all projects deleted from disk.
|
||||
for project in self._projects.copy().values():
|
||||
@ -234,7 +238,7 @@ class Controller:
|
||||
except OSError as e:
|
||||
log.error(f"Cannot write controller vars file '{self._vars_file}': {e}")
|
||||
|
||||
def _load_controller_vars(self):
|
||||
async def _load_controller_vars(self):
|
||||
"""
|
||||
Reload the controller vars from disk
|
||||
"""
|
||||
@ -274,9 +278,9 @@ class Controller:
|
||||
builtin_appliances_path = self._appliance_manager.builtin_appliances_path()
|
||||
if not previous_version or \
|
||||
parse_version(__version__.split("+")[0]) > parse_version(previous_version.split("+")[0]):
|
||||
self._appliance_manager.install_builtin_appliances()
|
||||
await self._appliance_manager.install_builtin_appliances()
|
||||
elif not os.listdir(builtin_appliances_path):
|
||||
self._appliance_manager.install_builtin_appliances()
|
||||
await self._appliance_manager.install_builtin_appliances()
|
||||
else:
|
||||
log.info(f"Built-in appliances are installed in '{builtin_appliances_path}'")
|
||||
|
||||
@ -307,18 +311,21 @@ class Controller:
|
||||
|
||||
|
||||
@staticmethod
|
||||
def install_resource_files(dst_path, resource_name, upgrade_resources=True):
|
||||
async def install_resource_files(dst_path, resource_name, upgrade_resources=True):
|
||||
"""
|
||||
Install files from resources to user's file system
|
||||
"""
|
||||
|
||||
def should_copy(src, dst, upgrade_resources):
|
||||
installed_resources = []
|
||||
async def should_copy(src, dst, upgrade_resources):
|
||||
if not os.path.exists(dst):
|
||||
return True
|
||||
if upgrade_resources is False:
|
||||
return False
|
||||
# copy the resource if it is different
|
||||
return md5sum(src) != md5sum(dst)
|
||||
src_md5 = await wait_run_in_executor(md5sum, src)
|
||||
dst_md5 = await wait_run_in_executor(md5sum, dst)
|
||||
return src_md5 != dst_md5
|
||||
|
||||
if hasattr(sys, "frozen") and sys.platform.startswith("win"):
|
||||
resource_path = os.path.normpath(os.path.join(os.path.dirname(sys.executable), resource_name))
|
||||
@ -328,14 +335,16 @@ class Controller:
|
||||
else:
|
||||
for entry in importlib_resources.files('gns3server').joinpath(resource_name).iterdir():
|
||||
full_path = os.path.join(dst_path, entry.name)
|
||||
if entry.is_file() and should_copy(str(entry), full_path, upgrade_resources):
|
||||
if entry.is_file() and await should_copy(str(entry), full_path, upgrade_resources):
|
||||
log.debug(f'Installing {resource_name} resource file "{entry.name}" to "{full_path}"')
|
||||
shutil.copy(str(entry), os.path.join(dst_path, entry.name))
|
||||
shutil.copy(str(entry), os.path.join(full_path))
|
||||
installed_resources.append(full_path)
|
||||
elif entry.is_dir():
|
||||
os.makedirs(full_path, exist_ok=True)
|
||||
Controller.install_resource_files(full_path, os.path.join(resource_name, entry.name))
|
||||
await Controller.install_resource_files(full_path, os.path.join(resource_name, entry.name))
|
||||
return installed_resources
|
||||
|
||||
def _install_base_configs(self):
|
||||
async def _install_base_configs(self):
|
||||
"""
|
||||
At startup we copy base configs to the user location to allow
|
||||
them to customize it
|
||||
@ -345,11 +354,11 @@ class Controller:
|
||||
log.info(f"Installing base configs in '{dst_path}'")
|
||||
try:
|
||||
# do not overwrite base configs because they may have been customized by the user
|
||||
Controller.install_resource_files(dst_path, "configs", upgrade_resources=False)
|
||||
await Controller.install_resource_files(dst_path, "configs", upgrade_resources=False)
|
||||
except OSError as e:
|
||||
log.error(f"Could not install base config files to {dst_path}: {e}")
|
||||
|
||||
def _install_builtin_disks(self):
|
||||
async def _install_builtin_disks(self):
|
||||
"""
|
||||
At startup we copy built-in Qemu disks to the user location to allow
|
||||
them to use with appliances
|
||||
@ -358,7 +367,7 @@ class Controller:
|
||||
dst_path = self.disks_path()
|
||||
log.info(f"Installing built-in disks in '{dst_path}'")
|
||||
try:
|
||||
Controller.install_resource_files(dst_path, "disks")
|
||||
return await Controller.install_resource_files(dst_path, "disks")
|
||||
except OSError as e:
|
||||
log.error(f"Could not install disk files to {dst_path}: {e}")
|
||||
|
||||
|
@ -110,7 +110,7 @@ class ApplianceManager:
|
||||
os.makedirs(appliances_dir, exist_ok=True)
|
||||
return appliances_dir
|
||||
|
||||
def install_builtin_appliances(self):
|
||||
async def install_builtin_appliances(self):
|
||||
"""
|
||||
At startup we copy the built-in appliances files.
|
||||
"""
|
||||
@ -119,7 +119,7 @@ class ApplianceManager:
|
||||
log.info(f"Installing built-in appliances in '{dst_path}'")
|
||||
from . import Controller
|
||||
try:
|
||||
Controller.instance().install_resource_files(dst_path, "appliances")
|
||||
await Controller.instance().install_resource_files(dst_path, "appliances")
|
||||
except OSError as e:
|
||||
log.error(f"Could not install built-in appliance files to {dst_path}: {e}")
|
||||
|
||||
|
@ -18,14 +18,19 @@
|
||||
import ipaddress
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import async_timeout
|
||||
import socket
|
||||
import json
|
||||
import sys
|
||||
import io
|
||||
|
||||
from fastapi import HTTPException
|
||||
from aiohttp import web
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from asyncio import timeout as asynctimeout
|
||||
else:
|
||||
from async_timeout import timeout as asynctimeout
|
||||
|
||||
from ..utils import parse_version
|
||||
from ..utils.asyncio import locking
|
||||
from ..controller.controller_error import (
|
||||
@ -503,7 +508,7 @@ class Compute:
|
||||
return self._getUrl(path)
|
||||
|
||||
async def _run_http_query(self, method, path, data=None, timeout=120, raw=False):
|
||||
async with async_timeout.timeout(delay=timeout):
|
||||
async with asynctimeout(delay=timeout):
|
||||
url = self._getUrl(path)
|
||||
headers = {"content-type": "application/json"}
|
||||
chunked = None
|
||||
|
@ -58,7 +58,7 @@ class CrashReport:
|
||||
Report crash to a third party service
|
||||
"""
|
||||
|
||||
DSN = "https://847198b87dbd50ef8962901641918a08@o19455.ingest.us.sentry.io/38482"
|
||||
DSN = "https://9cf53e6b9adfe49b867f1847b7cc4d72@o19455.ingest.us.sentry.io/38482"
|
||||
_instance = None
|
||||
|
||||
def __init__(self):
|
||||
|
@ -18,7 +18,7 @@
|
||||
import os
|
||||
|
||||
from typing import Optional, List
|
||||
from sqlalchemy import select, delete
|
||||
from sqlalchemy import select, delete, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from .base import BaseRepository
|
||||
@ -103,6 +103,22 @@ class ImagesRepository(BaseRepository):
|
||||
await self._db_session.refresh(db_image)
|
||||
return db_image
|
||||
|
||||
async def update_image(self, image_path: str, checksum: str, checksum_algorithm: str) -> models.Image:
|
||||
"""
|
||||
Update an image.
|
||||
"""
|
||||
|
||||
query = update(models.Image).\
|
||||
where(models.Image.path == image_path).\
|
||||
values(checksum=checksum, checksum_algorithm=checksum_algorithm)
|
||||
|
||||
await self._db_session.execute(query)
|
||||
await self._db_session.commit()
|
||||
image_db = await self.get_image_by_checksum(checksum)
|
||||
if image_db:
|
||||
await self._db_session.refresh(image_db) # force refresh of updated_at value
|
||||
return image_db
|
||||
|
||||
async def delete_image(self, image_path: str) -> bool:
|
||||
"""
|
||||
Delete an image.
|
||||
@ -119,7 +135,7 @@ class ImagesRepository(BaseRepository):
|
||||
await self._db_session.commit()
|
||||
return result.rowcount > 0
|
||||
|
||||
async def prune_images(self) -> int:
|
||||
async def prune_images(self, skip_images: list[str] = None) -> int:
|
||||
"""
|
||||
Prune images not attached to any template.
|
||||
"""
|
||||
@ -130,12 +146,15 @@ class ImagesRepository(BaseRepository):
|
||||
images = result.scalars().all()
|
||||
images_deleted = 0
|
||||
for image in images:
|
||||
if skip_images and image.filename in skip_images:
|
||||
log.debug(f"Skipping image '{image.path}' for pruning")
|
||||
continue
|
||||
try:
|
||||
log.debug(f"Deleting image '{image.path}'")
|
||||
os.remove(image.path)
|
||||
except OSError:
|
||||
log.warning(f"Could not delete image file {image.path}")
|
||||
if await self.delete_image(image.filename):
|
||||
if await self.delete_image(image.path):
|
||||
images_deleted += 1
|
||||
log.info(f"{images_deleted} image(s) have been deleted")
|
||||
return images_deleted
|
||||
|
@ -170,3 +170,14 @@ class TemplatesRepository(BaseRepository):
|
||||
await self._db_session.commit()
|
||||
await self._db_session.refresh(template_in_db)
|
||||
return template_in_db
|
||||
|
||||
async def get_template_images(self, template_id: UUID) -> List[models.Image]:
|
||||
"""
|
||||
Return all images attached to a template.
|
||||
"""
|
||||
|
||||
query = select(models.Image).\
|
||||
join(models.Image.templates).\
|
||||
filter(models.Template.template_id == template_id)
|
||||
result = await self._db_session.execute(query)
|
||||
return result.scalars().all()
|
||||
|
@ -16,13 +16,11 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import asyncio
|
||||
import signal
|
||||
import time
|
||||
import os
|
||||
|
||||
from fastapi import FastAPI
|
||||
from pydantic import ValidationError
|
||||
from watchfiles import awatch, Change
|
||||
|
||||
from typing import List
|
||||
from sqlalchemy import event
|
||||
from sqlalchemy.engine import Engine
|
||||
@ -32,10 +30,13 @@ from alembic import command, config
|
||||
from alembic.script import ScriptDirectory
|
||||
from alembic.runtime.migration import MigrationContext
|
||||
from alembic.util.exc import CommandError
|
||||
from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEvent, PatternMatchingEventHandler
|
||||
|
||||
from gns3server.db.repositories.computes import ComputesRepository
|
||||
from gns3server.db.repositories.images import ImagesRepository
|
||||
from gns3server.utils.images import discover_images, check_valid_image_header, read_image_info, default_images_directory, InvalidImageError
|
||||
from gns3server.utils.images import md5sum, discover_images, read_image_info, InvalidImageError
|
||||
from gns3server.utils.asyncio import wait_run_in_executor
|
||||
from gns3server import schemas
|
||||
|
||||
from .models import Base
|
||||
@ -130,81 +131,7 @@ async def get_computes(app: FastAPI) -> List[dict]:
|
||||
return computes
|
||||
|
||||
|
||||
def image_filter(change: Change, path: str) -> bool:
|
||||
|
||||
if change == Change.added and os.path.isfile(path):
|
||||
if path.endswith(".tmp") or path.endswith(".md5sum") or path.startswith("."):
|
||||
return False
|
||||
if "/lib/" in path or "/lib64/" in path:
|
||||
# ignore custom IOU libraries
|
||||
return False
|
||||
header_magic_len = 7
|
||||
with open(path, "rb") as f:
|
||||
image_header = f.read(header_magic_len) # read the first 7 bytes of the file
|
||||
if len(image_header) >= header_magic_len:
|
||||
try:
|
||||
check_valid_image_header(image_header)
|
||||
except InvalidImageError as e:
|
||||
log.debug(f"New image '{path}': {e}")
|
||||
return False
|
||||
else:
|
||||
log.debug(f"New image '{path}': size is too small to be valid")
|
||||
return False
|
||||
return True
|
||||
# FIXME: should we support image deletion?
|
||||
# elif change == Change.deleted:
|
||||
# return True
|
||||
return False
|
||||
|
||||
|
||||
async def monitor_images_on_filesystem(app: FastAPI):
|
||||
|
||||
directories_to_monitor = []
|
||||
for image_type in ("qemu", "ios", "iou"):
|
||||
image_dir = default_images_directory(image_type)
|
||||
if os.path.isdir(image_dir):
|
||||
log.debug(f"Monitoring for new images in '{image_dir}'")
|
||||
directories_to_monitor.append(image_dir)
|
||||
|
||||
try:
|
||||
async for changes in awatch(
|
||||
*directories_to_monitor,
|
||||
watch_filter=image_filter,
|
||||
raise_interrupt=True
|
||||
):
|
||||
async with AsyncSession(app.state._db_engine) as db_session:
|
||||
images_repository = ImagesRepository(db_session)
|
||||
for change in changes:
|
||||
change_type, image_path = change
|
||||
if change_type == Change.added:
|
||||
try:
|
||||
image = await read_image_info(image_path)
|
||||
except InvalidImageError as e:
|
||||
log.warning(str(e))
|
||||
continue
|
||||
try:
|
||||
if await images_repository.get_image(image_path):
|
||||
continue
|
||||
await images_repository.add_image(**image)
|
||||
log.info(f"Discovered image '{image_path}' has been added to the database")
|
||||
except SQLAlchemyError as e:
|
||||
log.warning(f"Error while adding image '{image_path}' to the database: {e}")
|
||||
# if change_type == Change.deleted:
|
||||
# try:
|
||||
# if await images_repository.get_image(image_path):
|
||||
# success = await images_repository.delete_image(image_path)
|
||||
# if not success:
|
||||
# log.warning(f"Could not delete image '{image_path}' from the database")
|
||||
# else:
|
||||
# log.info(f"Image '{image_path}' has been deleted from the database")
|
||||
# except SQLAlchemyError as e:
|
||||
# log.warning(f"Error while deleting image '{image_path}' from the database: {e}")
|
||||
except KeyboardInterrupt:
|
||||
# send SIGTERM to the server PID so uvicorn can shutdown the process
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
|
||||
|
||||
async def discover_images_on_filesystem(app: FastAPI):
|
||||
async def discover_images_on_filesystem(app: FastAPI) -> None:
|
||||
|
||||
async with AsyncSession(app.state._db_engine) as db_session:
|
||||
images_repository = ImagesRepository(db_session)
|
||||
@ -228,3 +155,117 @@ async def discover_images_on_filesystem(app: FastAPI):
|
||||
|
||||
# monitor if images have been manually added
|
||||
asyncio.create_task(monitor_images_on_filesystem(app))
|
||||
|
||||
|
||||
async def update_disk_checksums(updated_disks: List[str]) -> None:
|
||||
"""
|
||||
Update the checksum of a list of disks in the database.
|
||||
|
||||
:param updated_disks: list of updated disks
|
||||
"""
|
||||
|
||||
from gns3server.api.server import app
|
||||
async with AsyncSession(app.state._db_engine) as db_session:
|
||||
images_repository = ImagesRepository(db_session)
|
||||
for path in updated_disks:
|
||||
image = await images_repository.get_image(path)
|
||||
if image:
|
||||
log.info(f"Updating image '{path}' in the database")
|
||||
checksum = await wait_run_in_executor(md5sum, path, cache_to_md5file=False)
|
||||
if image.checksum != checksum:
|
||||
await images_repository.update_image(path, checksum, "md5")
|
||||
|
||||
class EventHandler(PatternMatchingEventHandler):
|
||||
"""
|
||||
Watchdog event handler.
|
||||
"""
|
||||
|
||||
def __init__(self, queue: asyncio.Queue, loop: asyncio.BaseEventLoop, **kwargs):
|
||||
|
||||
self._loop = loop
|
||||
self._queue = queue
|
||||
|
||||
# ignore temporary files, md5sum files, hidden files and directories
|
||||
super().__init__(ignore_patterns=["*.tmp", "*.md5sum", ".*"], ignore_directories = True, **kwargs)
|
||||
|
||||
def on_closed(self, event: FileSystemEvent) -> None:
|
||||
# monitor for closed files (e.g. when a file has finished to be copied)
|
||||
if "/lib/" in event.src_path or "/lib64/" in event.src_path:
|
||||
return # ignore custom IOU libraries
|
||||
self._loop.call_soon_threadsafe(self._queue.put_nowait, event)
|
||||
|
||||
class EventIterator(object):
|
||||
"""
|
||||
Watchdog Event iterator.
|
||||
"""
|
||||
|
||||
def __init__(self, queue: asyncio.Queue):
|
||||
self.queue = queue
|
||||
|
||||
def __aiter__(self):
|
||||
return self
|
||||
|
||||
async def __anext__(self):
|
||||
|
||||
item = await self.queue.get()
|
||||
if item is None:
|
||||
raise StopAsyncIteration
|
||||
return item
|
||||
|
||||
async def monitor_images_on_filesystem(app: FastAPI):
|
||||
|
||||
def watchdog(
|
||||
path: str,
|
||||
queue: asyncio.Queue,
|
||||
loop: asyncio.BaseEventLoop,
|
||||
app: FastAPI, recursive: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Thread to monitor a directory for new images.
|
||||
"""
|
||||
|
||||
handler = EventHandler(queue, loop)
|
||||
observer = Observer()
|
||||
observer.schedule(handler, str(path), recursive=recursive)
|
||||
observer.start()
|
||||
log.info(f"Monitoring for new images in '{path}'")
|
||||
while True:
|
||||
time.sleep(1)
|
||||
# stop when the app is exiting
|
||||
if app.state.exiting:
|
||||
observer.stop()
|
||||
observer.join(10)
|
||||
log.info(f"Stopping monitoring for new images in '{path}'")
|
||||
loop.call_soon_threadsafe(queue.put_nowait, None)
|
||||
break
|
||||
|
||||
queue = asyncio.Queue()
|
||||
loop = asyncio.get_event_loop()
|
||||
server_config = Config.instance().settings.Server
|
||||
image_dir = os.path.expanduser(server_config.images_path)
|
||||
asyncio.get_event_loop().run_in_executor(None, watchdog,image_dir, queue, loop, app, True)
|
||||
|
||||
async for filesystem_event in EventIterator(queue):
|
||||
# read the file system event from the queue
|
||||
image_path = filesystem_event.src_path
|
||||
expected_image_type = None
|
||||
if "IOU" in image_path:
|
||||
expected_image_type = "iou"
|
||||
elif "QEMU" in image_path:
|
||||
expected_image_type = "qemu"
|
||||
elif "IOS" in image_path:
|
||||
expected_image_type = "ios"
|
||||
async with AsyncSession(app.state._db_engine) as db_session:
|
||||
images_repository = ImagesRepository(db_session)
|
||||
try:
|
||||
image = await read_image_info(image_path, expected_image_type)
|
||||
except InvalidImageError as e:
|
||||
log.warning(str(e))
|
||||
continue
|
||||
try:
|
||||
if await images_repository.get_image(image_path):
|
||||
continue
|
||||
await images_repository.add_image(**image)
|
||||
log.info(f"Discovered image '{image_path}' has been added to the database")
|
||||
except SQLAlchemyError as e:
|
||||
log.warning(f"Error while adding image '{image_path}' to the database: {e}")
|
||||
|
@ -267,9 +267,9 @@ class Server:
|
||||
else:
|
||||
log.info(f"Compute authentication is enabled with username '{config.Server.compute_username}'")
|
||||
|
||||
# we only support Python 3 version >= 3.8
|
||||
if sys.version_info < (3, 8, 0):
|
||||
raise SystemExit("Python 3.8 or higher is required")
|
||||
# we only support Python 3 version >= 3.9
|
||||
if sys.version_info < (3, 9, 0):
|
||||
raise SystemExit("Python 3.9 or higher is required")
|
||||
|
||||
log.info(
|
||||
"Running with Python {major}.{minor}.{micro} and has PID {pid}".format(
|
||||
|
@ -14,8 +14,9 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
from jose import JWTError, jwt
|
||||
from joserfc import jwt
|
||||
from joserfc.jwk import OctKey
|
||||
from joserfc.errors import JoseError
|
||||
from datetime import datetime, timedelta, timezone
|
||||
import bcrypt
|
||||
|
||||
@ -56,7 +57,8 @@ class AuthService:
|
||||
secret_key = DEFAULT_JWT_SECRET_KEY
|
||||
log.error("A JWT secret key must be configured to secure the server, using an unsecured default key!")
|
||||
algorithm = Config.instance().settings.Controller.jwt_algorithm
|
||||
encoded_jwt = jwt.encode(to_encode, secret_key, algorithm=algorithm)
|
||||
key = OctKey.import_key(secret_key)
|
||||
encoded_jwt = jwt.encode({"alg": algorithm}, to_encode, key)
|
||||
return encoded_jwt
|
||||
|
||||
def get_username_from_token(self, token: str, secret_key: str = None) -> Optional[str]:
|
||||
@ -73,11 +75,12 @@ class AuthService:
|
||||
secret_key = DEFAULT_JWT_SECRET_KEY
|
||||
log.error("A JWT secret key must be configured to secure the server, using an unsecured default key!")
|
||||
algorithm = Config.instance().settings.Controller.jwt_algorithm
|
||||
payload = jwt.decode(token, secret_key, algorithms=[algorithm])
|
||||
username: str = payload.get("sub")
|
||||
key = OctKey.import_key(secret_key)
|
||||
payload = jwt.decode(token, key, algorithms=[algorithm])
|
||||
username: str = payload.claims.get("sub")
|
||||
if username is None:
|
||||
raise credentials_exception
|
||||
token_data = TokenData(username=username)
|
||||
except (JWTError, ValidationError):
|
||||
except (JoseError, ValidationError, ValueError):
|
||||
raise credentials_exception
|
||||
return token_data.username
|
||||
|
@ -46,6 +46,6 @@
|
||||
|
||||
gtag('config', 'G-0BT7QQV1W1');
|
||||
</script>
|
||||
<script src="runtime.24fa95b7061d7056.js" type="module"></script><script src="polyfills.319c79dd175e50d0.js" type="module"></script><script src="main.e55eeff5c0ba1cf4.js" type="module"></script>
|
||||
<script src="runtime.24fa95b7061d7056.js" type="module"></script><script src="polyfills.319c79dd175e50d0.js" type="module"></script><script src="main.62c99707e4709a56.js" type="module"></script>
|
||||
|
||||
</body></html>
|
1
gns3server/static/web-ui/main.62c99707e4709a56.js
Normal file
1
gns3server/static/web-ui/main.62c99707e4709a56.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -97,18 +97,10 @@ async def wait_for_process_termination(process, timeout=10):
|
||||
:param timeout: Timeout in seconds
|
||||
"""
|
||||
|
||||
if sys.version_info >= (3, 5):
|
||||
try:
|
||||
await asyncio.wait_for(process.wait(), timeout=timeout)
|
||||
except ProcessLookupError:
|
||||
return
|
||||
else:
|
||||
while timeout > 0:
|
||||
if process.returncode is not None:
|
||||
return
|
||||
await asyncio.sleep(0.1)
|
||||
timeout -= 0.1
|
||||
raise asyncio.TimeoutError()
|
||||
try:
|
||||
await asyncio.wait_for(process.wait(), timeout=timeout)
|
||||
except ProcessLookupError:
|
||||
return
|
||||
|
||||
|
||||
async def _check_process(process, termination_callback):
|
||||
|
@ -40,10 +40,7 @@ class Pool:
|
||||
while len(self._tasks) > 0 or len(pending) > 0:
|
||||
while len(self._tasks) > 0 and len(pending) < self._concurrency:
|
||||
task, args, kwargs = self._tasks.pop(0)
|
||||
if sys.version_info >= (3, 7):
|
||||
t = asyncio.create_task(task(*args, **kwargs))
|
||||
else:
|
||||
t = asyncio.get_event_loop().create_task(task(*args, **kwargs))
|
||||
t = asyncio.create_task(task(*args, **kwargs))
|
||||
pending.add(t)
|
||||
(done, pending) = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
|
||||
for task in done:
|
||||
|
@ -20,6 +20,11 @@ import stat
|
||||
import aiofiles
|
||||
import shutil
|
||||
|
||||
try:
|
||||
import importlib_resources
|
||||
except ImportError:
|
||||
from importlib import resources as importlib_resources
|
||||
|
||||
from typing import List, AsyncGenerator
|
||||
from ..config import Config
|
||||
from . import force_unix_path
|
||||
@ -111,6 +116,14 @@ async def list_images(image_type):
|
||||
return images
|
||||
|
||||
|
||||
def get_builtin_disks() -> List[str]:
|
||||
builtin_disks = []
|
||||
for entry in importlib_resources.files('gns3server').joinpath("disks").iterdir():
|
||||
if entry.is_file():
|
||||
builtin_disks.append(entry.name)
|
||||
return builtin_disks
|
||||
|
||||
|
||||
async def read_image_info(path: str, expected_image_type: str = None) -> dict:
|
||||
|
||||
header_magic_len = 7
|
||||
@ -118,7 +131,7 @@ async def read_image_info(path: str, expected_image_type: str = None) -> dict:
|
||||
async with aiofiles.open(path, "rb") as f:
|
||||
image_header = await f.read(header_magic_len) # read the first 7 bytes of the file
|
||||
if len(image_header) >= header_magic_len:
|
||||
detected_image_type = check_valid_image_header(image_header)
|
||||
detected_image_type = check_valid_image_header(path, image_header)
|
||||
if expected_image_type and detected_image_type != expected_image_type:
|
||||
raise InvalidImageError(f"Detected image type for '{path}' is {detected_image_type}, "
|
||||
f"expected type is {expected_image_type}")
|
||||
@ -302,7 +315,7 @@ class InvalidImageError(Exception):
|
||||
return self._message
|
||||
|
||||
|
||||
def check_valid_image_header(data: bytes, allow_raw_image: bool = False) -> str:
|
||||
def check_valid_image_header(path: str, data: bytes, allow_raw_image: bool = False) -> str:
|
||||
|
||||
if data[:7] == b'\x7fELF\x01\x02\x01':
|
||||
# for IOS images: file must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
||||
@ -317,7 +330,7 @@ def check_valid_image_header(data: bytes, allow_raw_image: bool = False) -> str:
|
||||
else:
|
||||
if allow_raw_image is True:
|
||||
return "qemu"
|
||||
raise InvalidImageError("Could not detect image type, please make sure it is a valid image")
|
||||
raise InvalidImageError(f"{path}: could not detect image type, please make sure it is a valid image")
|
||||
|
||||
|
||||
async def write_image(
|
||||
@ -342,7 +355,7 @@ async def write_image(
|
||||
async for chunk in stream:
|
||||
if check_image_header and len(chunk) >= header_magic_len:
|
||||
check_image_header = False
|
||||
image_type = check_valid_image_header(chunk, allow_raw_image)
|
||||
image_type = check_valid_image_header(image_path, chunk, allow_raw_image)
|
||||
await f.write(chunk)
|
||||
checksum.update(chunk)
|
||||
|
||||
|
@ -22,8 +22,8 @@
|
||||
# or negative for a release candidate or beta (after the base version
|
||||
# number has been incremented)
|
||||
|
||||
__version__ = "3.0.1"
|
||||
__version_info__ = (3, 0, 1, 0)
|
||||
__version__ = "3.0.2"
|
||||
__version_info__ = (3, 0, 2, 0)
|
||||
|
||||
if "dev" in __version__:
|
||||
try:
|
||||
|
@ -10,7 +10,7 @@ authors = [
|
||||
{ name = "Jeremy Grossmann", email = "developers@gns3.com" }
|
||||
]
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.9"
|
||||
classifiers = [
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: Console",
|
||||
@ -21,11 +21,11 @@ classifiers = [
|
||||
"Natural Language :: English",
|
||||
"Operating System :: POSIX :: Linux",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
"Programming Language :: Python :: Implementation :: CPython"
|
||||
]
|
||||
|
||||
|
@ -1,23 +1,24 @@
|
||||
uvicorn==0.32.0
|
||||
pydantic==2.9.2
|
||||
fastapi==0.115.5
|
||||
python-multipart==0.0.16
|
||||
websockets==13.1
|
||||
uvicorn==0.33.0 # uvicorn 0.33 is the last version supporting Python 3.8
|
||||
pydantic==2.10.4
|
||||
fastapi==0.115.6
|
||||
python-multipart==0.0.20
|
||||
websockets==14.1
|
||||
aiohttp>=3.10.10,<3.11
|
||||
async-timeout==4.0.3
|
||||
async-timeout==5.0.1; python_version < '3.11'
|
||||
aiofiles>=24.1.0,<25.0
|
||||
Jinja2>=3.1.4,<3.2
|
||||
sentry-sdk>=2.17,<2.18 # optional dependency
|
||||
psutil>=6.1.0
|
||||
Jinja2>=3.1.5,<3.2
|
||||
sentry-sdk>=2.19.2,<2.20 # optional dependency
|
||||
psutil>=6.1.1
|
||||
distro>=1.9.0
|
||||
py-cpuinfo>=9.0.0,<10.0
|
||||
greenlet==3.1.1 # necessary to run sqlalchemy on Python 3.13
|
||||
sqlalchemy==2.0.36
|
||||
aiosqlite==0.20.0
|
||||
alembic==1.13.3
|
||||
bcrypt==4.2.0
|
||||
python-jose[cryptography]==3.3.0
|
||||
alembic==1.14.0
|
||||
bcrypt==4.2.1
|
||||
joserfc==1.0.1
|
||||
email-validator==2.2.0
|
||||
watchfiles==0.24.0
|
||||
watchdog==6.0.0
|
||||
zstandard==0.23.0
|
||||
platformdirs>=2.4.0,<3 # platformdirs >=3 conflicts when building Debian packages
|
||||
importlib-resources>=1.3; python_version <= '3.9'
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
# Copyright (C) 2024 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@ -16,19 +16,20 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#
|
||||
# Install GNS3 on a remote Ubuntu LTS server
|
||||
# This create a dedicated user and setup all the package
|
||||
# and optionnaly a VPN
|
||||
# Install GNS3 on a remote Ubuntu server
|
||||
# This creates a dedicated user and setup all the packages
|
||||
# and optionally a VPN
|
||||
#
|
||||
|
||||
function help {
|
||||
echo "Usage:" >&2
|
||||
echo "--with-openvpn: Install OpenVPN" >&2
|
||||
echo "--with-iou: Install IOU" >&2
|
||||
echo "--with-i386-repository: Add the i386 repositories required by IOU if they are not already available on the system. Warning: this will replace your source.list in order to use the official Ubuntu mirror" >&2
|
||||
echo "--with-iou: Install IOU support" >&2
|
||||
echo "--with-i386-repository: Add the i386 repositories required by IOU i386 images. This is not needed for recent x86_64 IOU images." >&2
|
||||
echo "--with-welcome: Install GNS3-VM welcome.py script" >&2
|
||||
echo "--without-kvm: Disable KVM, required if system do not support it (limitation in some hypervisors and cloud providers). Warning: only disable KVM if strictly necessary as this will degrade performance" >&2
|
||||
echo "--unstable: Use the GNS3 unstable repository"
|
||||
echo "--unstable: Use the GNS3 unstable repository" >&2
|
||||
echo "--custom-repository <repository>: Use a custom repository" >&2
|
||||
echo "--help: This help" >&2
|
||||
}
|
||||
|
||||
@ -43,15 +44,17 @@ then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Default repository
|
||||
REPOSITORY="ppa"
|
||||
|
||||
# Read the options
|
||||
USE_VPN=0
|
||||
USE_IOU=0
|
||||
I386_REPO=0
|
||||
DISABLE_KVM=0
|
||||
UNSTABLE=0
|
||||
WELCOME_SETUP=0
|
||||
|
||||
TEMP=`getopt -o h --long with-openvpn,with-iou,with-i386-repository,with-welcome,without-kvm,unstable,help -n 'gns3-remote-install.sh' -- "$@"`
|
||||
TEMP=`getopt -o h --long with-openvpn,with-iou,with-i386-repository,with-welcome,without-kvm,unstable,custom-repository:,help -n 'gns3-remote-install.sh' -- "$@"`
|
||||
if [ $? != 0 ]
|
||||
then
|
||||
help
|
||||
@ -83,9 +86,13 @@ while true ; do
|
||||
shift
|
||||
;;
|
||||
--unstable)
|
||||
UNSTABLE=1
|
||||
REPOSITORY="unstable"
|
||||
shift
|
||||
;;
|
||||
--custom-repository)
|
||||
REPOSITORY="$2"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
help
|
||||
exit 1
|
||||
@ -103,91 +110,93 @@ UBUNTU_CODENAME=`lsb_release -c -s`
|
||||
|
||||
log "Add GNS3 repository"
|
||||
|
||||
if [ "$UBUNTU_CODENAME" == "trusty" ]
|
||||
if [ ! -f "/etc/apt/sources.list.d/ubuntu.sources" ]
|
||||
then
|
||||
if [ $UNSTABLE == 1 ]
|
||||
then
|
||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||
deb http://ppa.launchpad.net/gns3/unstable/ubuntu $UBUNTU_CODENAME main
|
||||
deb-src http://ppa.launchpad.net/gns3/unstable/ubuntu $UBUNTU_CODENAME main
|
||||
deb http://ppa.launchpad.net/gns3/qemu/ubuntu $UBUNTU_CODENAME main
|
||||
deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu $UBUNTU_CODENAME main
|
||||
EOFLIST
|
||||
else
|
||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B83AAABFFBD82D21B543C8EA86C22C2EC6A24D7F
|
||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||
deb http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
||||
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
||||
deb http://ppa.launchpad.net/gns3/qemu/ubuntu $UBUNTU_CODENAME main
|
||||
deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu $UBUNTU_CODENAME main
|
||||
EOFLIST
|
||||
fi
|
||||
|
||||
else
|
||||
if [ $UNSTABLE == 1 ]
|
||||
then
|
||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||
deb http://ppa.launchpad.net/gns3/unstable/ubuntu $UBUNTU_CODENAME main
|
||||
deb-src http://ppa.launchpad.net/gns3/unstable/ubuntu $UBUNTU_CODENAME main
|
||||
|
||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3-ppa.sources
|
||||
Types: deb
|
||||
URIs: https://ppa.launchpadcontent.net/gns3/$REPOSITORY/ubuntu/
|
||||
Suites: $UBUNTU_CODENAME
|
||||
Components: main
|
||||
Signed-By:
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
.
|
||||
mQINBGY0jSYBEADMH5CvX8ZVX4XzAxdQ2CmF7t86IjFnQgtI18Q19nVnpKEGNyB5
|
||||
pgotDMzkhGnxuhvz2zE9PZhd8VgkodB81V607d/Dy8FfI7t1BVQhLvJDx0H/q6RE
|
||||
n2y9WxiuBzTHitoQTCTY3hjcr7AUNFFI64gUqwbkQmYbCWWsYOlDpRSkWKg8P8WK
|
||||
08RetwTI0Iwoz8j+BkbPlubuImiVfh1TeH23FBuGIwL1r1Cps0wel6JAi+jaU9WG
|
||||
j8MX3mQYFTAtk7f1lRubqWosB/A4xIu609pF1e1tAkWAGltYAeoFhDn+PfA9KgmV
|
||||
fvxfVR7zmxp31imTJgXgUFCz+H0Xb3vpve8XsrsHZUP6StJ3+6cFXjNBV6PuO1FT
|
||||
JWp86a+AYHg7+sUWcoJRZPCTbb/pOcCa0q1ch5qcLkiYEOGK+pYhbPptq6y8IsJW
|
||||
N6EDNCVvVqVyTJy14FZWoOqxcpUiDOQ+su28j8++V+PMo+FO3SQqwEZwJXk7LF/4
|
||||
wUipDCUh/WNjDqqgmYLoO+ttiiJPbEw3jtbO+zopbzYpyEC1f06Nz7uz1daOIN3J
|
||||
etFPzSqWCE7Eq+hoVmAAm8gVmQir3rFJbIGBAvAaOLQEOkUlOlS7AezqUhdyhGER
|
||||
Zrvc3eNqxY7G61SEHipEJ7/hpcDq0RRWCXHsoQqyHaPje826n2pGkJYt4QARAQAB
|
||||
tBZMYXVuY2hwYWQgUFBBIGZvciBHTlMziQJOBBMBCgA4FiEEuDqqv/vYLSG1Q8jq
|
||||
hsIsLsaiTX8FAmY0jSYCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQhsIs
|
||||
LsaiTX9z9xAAq1uHmRgfYmELS0cr2YEnTWHPVE6s95Qx+0cr5zzNeWfmoAS9uSyl
|
||||
z8bCm+Q2ZapzU/nOtkozU+RGjgcRRTKMVTyS0PjFX22965xHCRWnw79fPyrYouUw
|
||||
H2cAT8WSGYEeVAbqhJSns0RnDpXuaxmWE1wT+iitY/QAjeXo22Z2mjv2bFTitKbY
|
||||
hZbE5Eu8Olc5YHCVI0ofq84/Ii921iMibU6EDMmm/iOnMK2uHGbC59t0YG8Rm7mK
|
||||
uk6+TpxOULjFeCWSkF2Dr33m8JQmtYZuFUnmqWPuSdBo3J0O1b0qTg+EP9FbDAtj
|
||||
CoEKT/V1ccMBd3r77o23CGsvpV7bzEU60A+NsU8vb/AkOmouYiF+qaYDFGZDfWhK
|
||||
p1HFmd1kt7YdgxsmoKoFJkbt1bBdcFJLV0Jcad5sfArg2aFDYf2giMxAw4iQ+9jc
|
||||
MCuwWxiqWicPqJ5erNTzVfayBkjuZqBDVTO9wmG3DL4QmNosIBS7kq+NGrT8Ql22
|
||||
FqYfdIZJDlKVtJKHK8eKJSB0dbFawV2h5p/CvQlIm6nthg5FzOyjvCkPkvxvveq+
|
||||
SuNxFEscumFCgo7j7RMWHW9HWK3TUvMmYLMVjxL8kXyCwknp9GklBQHA/IPxRa/2
|
||||
eFqqkmVbmNAoMzzw5wqa/BPcFEbgn+E+TFyZqbzp0F4QzPJZFkz16SA=
|
||||
=xnj5
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
EOFLIST
|
||||
else
|
||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||
deb http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
||||
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
||||
EOFLIST
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
if [ $I386_REPO == 1 ]
|
||||
then
|
||||
cat <<EOFLIST2 >> /etc/apt/sources.list
|
||||
###### Ubuntu Main Repos
|
||||
deb http://archive.ubuntu.com/ubuntu/ $UBUNTU_CODENAME main universe multiverse
|
||||
deb-src http://archive.ubuntu.com/ubuntu/ $UBUNTU_CODENAME main universe multiverse
|
||||
log "Updating system packages and installing curl"
|
||||
apt update
|
||||
apt install -y curl
|
||||
|
||||
###### Ubuntu Update Repos
|
||||
deb http://archive.ubuntu.com/ubuntu/ ${UBUNTU_CODENAME}-security main universe multiverse
|
||||
deb http://archive.ubuntu.com/ubuntu/ ${UBUNTU_CODENAME}-updates main universe multiverse
|
||||
deb-src http://archive.ubuntu.com/ubuntu/ ${UBUNTU_CODENAME}-security main universe multiverse
|
||||
deb-src http://archive.ubuntu.com/ubuntu/ ${UBUNTU_CODENAME}-updates main universe multiverse
|
||||
EOFLIST2
|
||||
fi
|
||||
log "Upgrading packages"
|
||||
apt upgrade --yes --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
|
||||
|
||||
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys A2E3EF7B
|
||||
log "Installing the GNS3 server and its dependencies"
|
||||
apt install -y gns3-server
|
||||
|
||||
log "Update system packages"
|
||||
apt-get update
|
||||
|
||||
log "Upgrade packages"
|
||||
apt-get upgrade --yes --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
|
||||
|
||||
log "Install GNS3 packages"
|
||||
apt-get install -y gns3-server
|
||||
|
||||
log "Create user GNS3 with /opt/gns3 as home directory"
|
||||
log "Creating user GNS3 with /opt/gns3 as home directory"
|
||||
if [ ! -d "/opt/gns3" ]
|
||||
then
|
||||
useradd -m -d /opt/gns3 gns3
|
||||
fi
|
||||
|
||||
|
||||
log "Add GNS3 to the ubridge group"
|
||||
log "Adding GNS3 to the ubridge group"
|
||||
usermod -aG ubridge gns3
|
||||
|
||||
log "Install docker"
|
||||
log "Installing Docker"
|
||||
if [ ! -f "/usr/bin/docker" ]
|
||||
then
|
||||
curl -sSL https://get.docker.com | bash
|
||||
fi
|
||||
|
||||
log "Add GNS3 to the docker group"
|
||||
log "Adding GNS3 to the docker group"
|
||||
usermod -aG docker gns3
|
||||
|
||||
if [ $USE_IOU == 1 ]
|
||||
then
|
||||
log "Setup IOU"
|
||||
dpkg --add-architecture i386
|
||||
apt-get update
|
||||
log "Setting up IOU support"
|
||||
if [ $I386_REPO == 1 ]
|
||||
then
|
||||
log "Enabling i386 architecture for IOU support"
|
||||
dpkg --add-architecture i386
|
||||
apt update
|
||||
fi
|
||||
|
||||
apt-get install -y gns3-iou
|
||||
apt install -y gns3-iou
|
||||
|
||||
# Force the host name to gns3vm
|
||||
echo gns3vm > /etc/hostname
|
||||
@ -196,31 +205,18 @@ then
|
||||
|
||||
# Force hostid for IOU
|
||||
dd if=/dev/zero bs=4 count=1 of=/etc/hostid
|
||||
|
||||
# Block potential IOU phone home call (xml.cisco.com is not in use at this time)
|
||||
log "Block IOU phone home call"
|
||||
if [ "$UBUNTU_CODENAME" == "focal" ]
|
||||
then
|
||||
iptables -I OUTPUT -p udp --dport 53 -m string --hex-string "|03|xml|05|cisco|03|com" --algo bm -j DROP
|
||||
echo iptables-persistent iptables-persistent/autosave_v4 boolean true | debconf-set-selections
|
||||
echo iptables-persistent iptables-persistent/autosave_v6 boolean true | debconf-set-selections
|
||||
apt-get install -y iptables-persistent
|
||||
else
|
||||
echo "127.0.0.254 xml.cisco.com" | tee --append /etc/hosts
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
log "Add gns3 to the kvm group"
|
||||
log "Adding gns3 to the kvm group"
|
||||
usermod -aG kvm gns3
|
||||
|
||||
log "Setup GNS3 server"
|
||||
log "Setting up the GNS3 server configuration"
|
||||
|
||||
mkdir -p /etc/gns3
|
||||
cat <<EOFC > /etc/gns3/gns3_server.conf
|
||||
[Server]
|
||||
host = 0.0.0.0
|
||||
port = 3080
|
||||
port = 3080
|
||||
images_path = /opt/gns3/images
|
||||
projects_path = /opt/gns3/projects
|
||||
appliances_path = /opt/gns3/appliances
|
||||
@ -234,52 +230,15 @@ EOFC
|
||||
|
||||
if [ $DISABLE_KVM == 1 ]
|
||||
then
|
||||
log "Disable KVM support"
|
||||
log "Disabling KVM support"
|
||||
sed -i 's/hardware_acceleration = True/hardware_acceleration = False/g' /etc/gns3/gns3_server.conf
|
||||
fi
|
||||
|
||||
chown -R gns3:gns3 /etc/gns3
|
||||
chmod -R 700 /etc/gns3
|
||||
|
||||
if [ "$UBUNTU_CODENAME" == "trusty" ]
|
||||
then
|
||||
cat <<EOFI > /etc/init/gns3.conf
|
||||
description "GNS3 server"
|
||||
author "GNS3 Team"
|
||||
|
||||
start on filesystem or runlevel [2345]
|
||||
stop on runlevel [016]
|
||||
respawn
|
||||
console log
|
||||
|
||||
|
||||
script
|
||||
exec start-stop-daemon --start --make-pidfile --pidfile /var/run/gns3.pid --chuid gns3 --exec "/usr/bin/gns3server"
|
||||
end script
|
||||
|
||||
pre-start script
|
||||
echo "" > /var/log/upstart/gns3.log
|
||||
echo "[`date`] GNS3 Starting"
|
||||
end script
|
||||
|
||||
pre-stop script
|
||||
echo "[`date`] GNS3 Stopping"
|
||||
end script
|
||||
EOFI
|
||||
|
||||
chown root:root /etc/init/gns3.conf
|
||||
chmod 644 /etc/init/gns3.conf
|
||||
|
||||
|
||||
log "Start GNS3 service"
|
||||
set +e
|
||||
service gns3 stop
|
||||
set -e
|
||||
service gns3 start
|
||||
|
||||
else
|
||||
# Install systemd service
|
||||
cat <<EOFI > /lib/systemd/system/gns3.service
|
||||
log "Installing the GNS3 systemd service"
|
||||
cat <<EOFI > /lib/systemd/system/gns3.service
|
||||
[Unit]
|
||||
Description=GNS3 server
|
||||
After=network-online.target
|
||||
@ -302,15 +261,15 @@ LimitNOFILE=16384
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOFI
|
||||
chmod 755 /lib/systemd/system/gns3.service
|
||||
chown root:root /lib/systemd/system/gns3.service
|
||||
|
||||
log "Start GNS3 service"
|
||||
systemctl enable gns3
|
||||
systemctl start gns3
|
||||
fi
|
||||
chmod 755 /lib/systemd/system/gns3.service
|
||||
chown root:root /lib/systemd/system/gns3.service
|
||||
|
||||
log "GNS3 installed with success"
|
||||
log "Starting the GNS3 service"
|
||||
systemctl enable gns3
|
||||
systemctl start gns3
|
||||
|
||||
log "GNS3 has been installed with success"
|
||||
|
||||
if [ $WELCOME_SETUP == 1 ]
|
||||
then
|
||||
@ -319,11 +278,9 @@ gns3 ALL = (ALL) NOPASSWD: /usr/bin/apt-key
|
||||
gns3 ALL = (ALL) NOPASSWD: /usr/bin/apt-get
|
||||
gns3 ALL = (ALL) NOPASSWD: /usr/sbin/reboot
|
||||
EOFI
|
||||
NEEDRESTART_MODE=a apt-get install -y net-tools
|
||||
NEEDRESTART_MODE=a apt-get install -y python3-pip
|
||||
NEEDRESTART_MODE=a apt-get install -y dialog
|
||||
pip install --no-input --upgrade pip
|
||||
pip install --no-input pythondialog
|
||||
NEEDRESTART_MODE=a apt install -y net-tools
|
||||
NEEDRESTART_MODE=a apt install -y dialog
|
||||
NEEDRESTART_MODE=a apt install -y python3-dialog
|
||||
|
||||
#Pull down welcome script from repo
|
||||
curl https://raw.githubusercontent.com/GNS3/gns3-server/master/scripts/welcome.py > /usr/local/bin/welcome.py
|
||||
@ -350,19 +307,15 @@ fi
|
||||
|
||||
if [ $USE_VPN == 1 ]
|
||||
then
|
||||
log "Setup VPN"
|
||||
log "Setting up OpenVPN"
|
||||
|
||||
log "Change GNS3 to listen on VPN interface"
|
||||
log "Changing the GNS3 server configuration to listen on VPN interface"
|
||||
|
||||
sed -i 's/host = 0.0.0.0/host = 172.16.253.1/' /etc/gns3/gns3_server.conf
|
||||
|
||||
log "Install packages for OpenVPN"
|
||||
log "Installing the OpenVPN packages"
|
||||
|
||||
apt-get install -y \
|
||||
openvpn \
|
||||
uuid \
|
||||
dnsutils \
|
||||
nginx-light
|
||||
apt install -y openvpn uuid dnsutils nginx-light
|
||||
|
||||
MY_IP_ADDR=$(dig @ns1.google.com -t txt o-o.myaddr.l.google.com +short -4 | sed 's/"//g')
|
||||
|
||||
@ -370,7 +323,7 @@ log "IP detected: $MY_IP_ADDR"
|
||||
|
||||
UUID=$(uuid)
|
||||
|
||||
log "Update motd"
|
||||
log "Updating motd"
|
||||
|
||||
cat <<EOFMOTD > /etc/update-motd.d/70-openvpn
|
||||
#!/bin/sh
|
||||
@ -381,7 +334,7 @@ echo "http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn"
|
||||
echo ""
|
||||
echo "And add it to your openvpn client."
|
||||
echo ""
|
||||
echo "apt-get remove nginx-light to disable the HTTP server."
|
||||
echo "apt remove nginx-light to disable the HTTP server."
|
||||
echo "And remove this file with rm /etc/update-motd.d/70-openvpn"
|
||||
EOFMOTD
|
||||
chmod 755 /etc/update-motd.d/70-openvpn
|
||||
@ -391,7 +344,7 @@ mkdir -p /etc/openvpn/
|
||||
[ -d /dev/net ] || mkdir -p /dev/net
|
||||
[ -c /dev/net/tun ] || mknod /dev/net/tun c 10 200
|
||||
|
||||
log "Create keys"
|
||||
log "Creating OpenVPN keys"
|
||||
|
||||
[ -f /etc/openvpn/dh.pem ] || openssl dhparam -out /etc/openvpn/dh.pem 2048
|
||||
[ -f /etc/openvpn/key.pem ] || openssl genrsa -out /etc/openvpn/key.pem 2048
|
||||
@ -399,7 +352,7 @@ chmod 600 /etc/openvpn/key.pem
|
||||
[ -f /etc/openvpn/csr.pem ] || openssl req -new -key /etc/openvpn/key.pem -out /etc/openvpn/csr.pem -subj /CN=OpenVPN/
|
||||
[ -f /etc/openvpn/cert.pem ] || openssl x509 -req -in /etc/openvpn/csr.pem -out /etc/openvpn/cert.pem -signkey /etc/openvpn/key.pem -days 24855
|
||||
|
||||
log "Create client configuration"
|
||||
log "Creating OpenVPN client configuration"
|
||||
cat <<EOFCLIENT > /root/client.ovpn
|
||||
client
|
||||
nobind
|
||||
@ -441,7 +394,7 @@ status openvpn-status-1194.log
|
||||
log-append /var/log/openvpn-udp1194.log
|
||||
EOFUDP
|
||||
|
||||
log "Setup HTTP server for serving client certificate"
|
||||
log "Setting up an HTTP server for serving client certificate"
|
||||
mkdir -p /usr/share/nginx/openvpn/$UUID
|
||||
cp /root/client.ovpn /usr/share/nginx/openvpn/$UUID/$HOSTNAME.ovpn
|
||||
touch /usr/share/nginx/openvpn/$UUID/index.html
|
||||
@ -458,7 +411,7 @@ EOFNGINX
|
||||
service nginx stop
|
||||
service nginx start
|
||||
|
||||
log "Restart OpenVPN and GNS3"
|
||||
log "Restarting OpenVPN and GNS3"
|
||||
|
||||
set +e
|
||||
service openvpn stop
|
||||
@ -466,15 +419,15 @@ service openvpn start
|
||||
service gns3 stop
|
||||
service gns3 start
|
||||
|
||||
log "Download http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn to setup your OpenVPN client after rebooting the server"
|
||||
log "Please download http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn to setup your OpenVPN client after rebooting the server"
|
||||
|
||||
fi
|
||||
|
||||
if [ $WELCOME_SETUP == 1 ]
|
||||
then
|
||||
NEEDRESTART_MODE=a apt-get update
|
||||
NEEDRESTART_MODE=a apt-get upgrade
|
||||
python3 -c 'import sys; sys.path.append("/usr/local/bin/"); import welcome; ws = welcome.Welcome_dialog(); ws.repair_remote_install()'
|
||||
cd /opt/gns3
|
||||
su gns3
|
||||
NEEDRESTART_MODE=a apt update
|
||||
NEEDRESTART_MODE=a apt upgrade
|
||||
python3 -c 'import sys; sys.path.append("/usr/local/bin/"); import welcome; ws = welcome.Welcome_dialog(); ws.repair_remote_install()'
|
||||
cd /opt/gns3
|
||||
su gns3
|
||||
fi
|
||||
|
@ -28,14 +28,16 @@ pytestmark = pytest.mark.asyncio
|
||||
|
||||
class TestApplianceRoutes:
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _install_builtin_appliances(self, controller: Controller):
|
||||
# @pytest.fixture(autouse=True)
|
||||
# def _install_builtin_appliances(self, controller: Controller):
|
||||
#
|
||||
# controller.appliance_manager.install_builtin_appliances()
|
||||
# controller.appliance_manager.load_appliances()
|
||||
|
||||
controller.appliance_manager.install_builtin_appliances()
|
||||
async def test_appliances_list(self, app: FastAPI, client: AsyncClient, controller: Controller) -> None:
|
||||
|
||||
await controller.appliance_manager.install_builtin_appliances()
|
||||
controller.appliance_manager.load_appliances()
|
||||
|
||||
async def test_appliances_list(self, app: FastAPI, client: AsyncClient) -> None:
|
||||
|
||||
response = await client.get(app.url_path_for("get_appliances"))
|
||||
assert response.status_code == status.HTTP_200_OK
|
||||
assert len(response.json()) > 0
|
||||
|
@ -19,6 +19,7 @@ import os
|
||||
import pytest
|
||||
import hashlib
|
||||
|
||||
from tests.utils import asyncio_patch
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from fastapi import FastAPI, status
|
||||
from httpx import AsyncClient
|
||||
@ -261,10 +262,13 @@ class TestImageRoutes:
|
||||
|
||||
async def test_prune_images(self, app: FastAPI, client: AsyncClient, db_session: AsyncSession) -> None:
|
||||
|
||||
response = await client.post(app.url_path_for("prune_images"))
|
||||
images_repo = ImagesRepository(db_session)
|
||||
images_in_db = await images_repo.get_images()
|
||||
assert len(images_in_db) != 0
|
||||
|
||||
response = await client.delete(app.url_path_for("prune_images"))
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
|
||||
images_repo = ImagesRepository(db_session)
|
||||
images_in_db = await images_repo.get_images()
|
||||
assert len(images_in_db) == 0
|
||||
|
||||
@ -275,7 +279,7 @@ class TestImageRoutes:
|
||||
controller: Controller
|
||||
) -> None:
|
||||
|
||||
controller.appliance_manager.install_builtin_appliances()
|
||||
await controller.appliance_manager.install_builtin_appliances()
|
||||
controller.appliance_manager.load_appliances() # make sure appliances are loaded
|
||||
image_path = "tests/resources/empty30G.qcow2"
|
||||
image_name = os.path.basename(image_path)
|
||||
@ -292,3 +296,32 @@ class TestImageRoutes:
|
||||
assert len(templates) == 1
|
||||
assert templates[0].name == "Empty VM"
|
||||
assert templates[0].version == "30G"
|
||||
await templates_repo.delete_template(templates[0].template_id)
|
||||
|
||||
async def test_install_all(
|
||||
self, app: FastAPI,
|
||||
client: AsyncClient,
|
||||
db_session: AsyncSession,
|
||||
controller: Controller
|
||||
) -> None:
|
||||
|
||||
image_path = "tests/resources/empty100G.qcow2"
|
||||
image_name = os.path.basename(image_path)
|
||||
with open(image_path, "rb") as f:
|
||||
image_data = f.read()
|
||||
response = await client.post(
|
||||
app.url_path_for("upload_image", image_path=image_name),
|
||||
content=image_data)
|
||||
assert response.status_code == status.HTTP_201_CREATED
|
||||
|
||||
controller.appliance_manager.load_appliances() # make sure appliances are loaded
|
||||
with asyncio_patch("gns3server.api.routes.controller.images.get_builtin_disks", return_value=[]) as mock:
|
||||
response = await client.post(app.url_path_for("install_images"))
|
||||
assert mock.called
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
|
||||
templates_repo = TemplatesRepository(db_session)
|
||||
templates = await templates_repo.get_templates()
|
||||
assert len(templates) == 1
|
||||
assert templates[0].name == "Empty VM"
|
||||
assert templates[0].version == "100G"
|
@ -16,6 +16,8 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import pytest
|
||||
import uuid
|
||||
import unittest.mock
|
||||
@ -145,29 +147,39 @@ class TestTemplateRoutes:
|
||||
tmpdir: str,
|
||||
) -> None:
|
||||
|
||||
path = os.path.join(tmpdir, "test.qcow2")
|
||||
with open(path, "wb+") as f:
|
||||
image1 = os.path.join(tmpdir, "image1.qcow2")
|
||||
with open(image1, "wb+") as f:
|
||||
f.write(b'\x42\x42\x42\x42')
|
||||
|
||||
image2 = os.path.join(tmpdir, "image2.qcow2")
|
||||
with open(image2, "wb+") as f:
|
||||
f.write(b'\x42\x42\x42\x42')
|
||||
|
||||
images_repo = ImagesRepository(db_session)
|
||||
await images_repo.add_image("test.qcow2", "qemu", 42, path, "e342eb86c1229b6c154367a5476969b5", "md5")
|
||||
await images_repo.add_image("image1.qcow2", "qemu", 42, image1, "e342eb86c1229b6c154367a5476969b5", "md5")
|
||||
await images_repo.add_image("image2.qcow2", "qemu", 42, image2, "e342eb86c1229b6c154367a5476969b5", "md5")
|
||||
|
||||
template_id = str(uuid.uuid4())
|
||||
params = {"template_id": template_id,
|
||||
"name": "QEMU_TEMPLATE",
|
||||
"compute_id": "local",
|
||||
"hda_disk_image": "test.qcow2",
|
||||
"hda_disk_image": "image1.qcow2",
|
||||
"hdb_disk_image": "image2.qcow2",
|
||||
"template_type": "qemu"}
|
||||
|
||||
response = await client.post(app.url_path_for("create_template"), json=params)
|
||||
assert response.status_code == status.HTTP_201_CREATED
|
||||
|
||||
templates_repo = TemplatesRepository(db_session)
|
||||
images = await templates_repo.get_template_images(response.json().get("template_id"))
|
||||
assert len(images) == 2
|
||||
|
||||
response = await client.delete(
|
||||
app.url_path_for("delete_template", template_id=template_id),
|
||||
params={"prune_images": True}
|
||||
)
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
|
||||
images_repo = ImagesRepository(db_session)
|
||||
images = await images_repo.get_images()
|
||||
assert len(images) == 0
|
||||
|
||||
|
@ -21,7 +21,8 @@ from typing import Optional
|
||||
from fastapi import FastAPI, HTTPException, status
|
||||
from sqlalchemy import update
|
||||
from httpx import AsyncClient
|
||||
from jose import jwt
|
||||
from joserfc import jwt
|
||||
from joserfc.jwk import OctKey
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from gns3server.db.repositories.users import UsersRepository
|
||||
@ -166,16 +167,23 @@ class TestAuthTokens:
|
||||
|
||||
jwt_secret = config.settings.Controller.jwt_secret_key
|
||||
token = auth_service.create_access_token(test_user.username)
|
||||
payload = jwt.decode(token, jwt_secret, algorithms=["HS256"])
|
||||
username = payload.get("sub")
|
||||
key = OctKey.import_key(jwt_secret)
|
||||
payload = jwt.decode(token, key, algorithms=["HS256"])
|
||||
username = payload.claims.get("sub")
|
||||
assert username == test_user.username
|
||||
|
||||
async def test_token_missing_user_is_invalid(self, app: FastAPI, client: AsyncClient, config: Config) -> None:
|
||||
async def test_decode_token_with_wrong_algorithm(
|
||||
self,
|
||||
app: FastAPI,
|
||||
client: AsyncClient,
|
||||
test_user: User,
|
||||
config: Config
|
||||
) -> None:
|
||||
|
||||
jwt_secret = config.settings.Controller.jwt_secret_key
|
||||
token = auth_service.create_access_token(None)
|
||||
with pytest.raises(jwt.JWTError):
|
||||
jwt.decode(token, jwt_secret, algorithms=["HS256"])
|
||||
token = auth_service.create_access_token(test_user.username)
|
||||
with pytest.raises(ValueError):
|
||||
jwt.decode(token, jwt_secret, algorithms=["ES256"])
|
||||
|
||||
async def test_can_retrieve_username_from_token(
|
||||
self,
|
||||
@ -236,9 +244,10 @@ class TestUserLogin:
|
||||
|
||||
# check that token exists in response and has user encoded within it
|
||||
token = response.json().get("access_token")
|
||||
payload = jwt.decode(token, jwt_secret, algorithms=["HS256"])
|
||||
assert "sub" in payload
|
||||
username = payload.get("sub")
|
||||
key = OctKey.import_key(jwt_secret)
|
||||
payload = jwt.decode(token, key, algorithms=["HS256"])
|
||||
assert "sub" in payload.claims
|
||||
username = payload.claims.get("sub")
|
||||
assert username == test_user.username
|
||||
|
||||
# check that token is proper type
|
||||
|
@ -400,10 +400,12 @@ def run_around_tests(monkeypatch, config, port_manager):
|
||||
config.settings.VMware.vmrun_path = tmppath
|
||||
config.settings.Dynamips.dynamips_path = tmppath
|
||||
|
||||
|
||||
# Force turn off KVM because it's not available on CI
|
||||
config.settings.Qemu.enable_hardware_acceleration = False
|
||||
|
||||
# avoid monitoring for new images while testing
|
||||
config.settings.Server.auto_discover_images = False
|
||||
|
||||
monkeypatch.setattr("gns3server.utils.path.get_default_project_directory", lambda *args: os.path.join(tmppath, 'projects'))
|
||||
|
||||
# Force sys.platform to the original value. Because it seems not be restored correctly after each test
|
||||
|
@ -245,7 +245,8 @@ async def test_start(controller):
|
||||
}
|
||||
|
||||
#with asyncio_patch("gns3server.controller.compute.Compute.connect") as mock:
|
||||
await controller.start()
|
||||
with asyncio_patch("gns3server.controller.Controller._install_builtin_disks", return_value=[]):
|
||||
await controller.start()
|
||||
#assert mock.called
|
||||
assert len(controller.computes) == 1 # Local compute is created
|
||||
assert controller.computes["local"].name == f"{socket.gethostname()} (controller)"
|
||||
@ -266,8 +267,9 @@ async def test_start_vm(controller):
|
||||
with asyncio_patch("gns3server.controller.gns3vm.vmware_gns3_vm.VMwareGNS3VM.start") as mock:
|
||||
with asyncio_patch("gns3server.controller.gns3vm.GNS3VM._check_network"):
|
||||
with asyncio_patch("gns3server.controller.compute.Compute.connect"):
|
||||
await controller.start()
|
||||
assert mock.called
|
||||
with asyncio_patch("gns3server.controller.Controller._install_builtin_disks", return_value=[]):
|
||||
await controller.start()
|
||||
assert mock.called
|
||||
assert "local" in controller.computes
|
||||
assert "vm" in controller.computes
|
||||
assert len(controller.computes) == 2 # Local compute and vm are created
|
||||
@ -356,7 +358,7 @@ async def test_install_base_configs(controller, config, tmpdir):
|
||||
with open(str(tmpdir / 'iou_l2_base_startup-config.txt'), 'w+') as f:
|
||||
f.write('test')
|
||||
|
||||
controller._install_base_configs()
|
||||
await controller._install_base_configs()
|
||||
assert os.path.exists(str(tmpdir / 'iou_l3_base_startup-config.txt'))
|
||||
|
||||
# Check is the file has not been overwritten
|
||||
@ -385,12 +387,13 @@ async def test_install_base_configs(controller, config, tmpdir):
|
||||
async def test_install_builtin_disks(controller, config, tmpdir, builtin_disk):
|
||||
|
||||
config.settings.Server.images_path = str(tmpdir)
|
||||
controller._install_builtin_disks()
|
||||
await controller._install_builtin_disks()
|
||||
# we only install Qemu empty disks at this time
|
||||
assert os.path.exists(str(tmpdir / "QEMU" / builtin_disk))
|
||||
|
||||
|
||||
def test_appliances(controller, config, tmpdir):
|
||||
@pytest.mark.asyncio
|
||||
async def test_appliances(controller, config, tmpdir):
|
||||
|
||||
my_appliance = {
|
||||
"name": "My Appliance",
|
||||
@ -406,7 +409,7 @@ def test_appliances(controller, config, tmpdir):
|
||||
json.dump(my_appliance, f)
|
||||
|
||||
config.settings.Server.appliances_path = str(tmpdir)
|
||||
controller.appliance_manager.install_builtin_appliances()
|
||||
await controller.appliance_manager.install_builtin_appliances()
|
||||
controller.appliance_manager.load_appliances()
|
||||
assert len(controller.appliance_manager.appliances) > 0
|
||||
for appliance in controller.appliance_manager.appliances.values():
|
||||
|
BIN
tests/resources/empty100G.qcow2
Normal file
BIN
tests/resources/empty100G.qcow2
Normal file
Binary file not shown.
Reference in New Issue
Block a user