mirror of
https://github.com/GNS3/gns3-server.git
synced 2025-02-08 20:10:17 +00:00
Raise error if using a non linked clone VM twice
Fix https://github.com/GNS3/gns3-gui/issues/1593
This commit is contained in:
parent
597e1e618d
commit
ae1e027031
@ -34,6 +34,7 @@ from ..config import Config
|
|||||||
from ..utils.asyncio import wait_run_in_executor
|
from ..utils.asyncio import wait_run_in_executor
|
||||||
from ..utils import force_unix_path
|
from ..utils import force_unix_path
|
||||||
from .project_manager import ProjectManager
|
from .project_manager import ProjectManager
|
||||||
|
from .port_manager import PortManager
|
||||||
|
|
||||||
from .nios.nio_udp import NIOUDP
|
from .nios.nio_udp import NIOUDP
|
||||||
from .nios.nio_tap import NIOTAP
|
from .nios.nio_tap import NIOTAP
|
||||||
@ -102,7 +103,8 @@ class BaseManager:
|
|||||||
|
|
||||||
:returns: Port manager
|
:returns: Port manager
|
||||||
"""
|
"""
|
||||||
|
if self._port_manager is None:
|
||||||
|
self._port_manager = PortManager.instance()
|
||||||
return self._port_manager
|
return self._port_manager
|
||||||
|
|
||||||
@port_manager.setter
|
@port_manager.setter
|
||||||
@ -518,3 +520,9 @@ class BaseManager:
|
|||||||
md5sum(path)
|
md5sum(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Could not write image: {} because {}".format(filename, e))
|
raise aiohttp.web.HTTPConflict(text="Could not write image: {} because {}".format(filename, e))
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""
|
||||||
|
Reset module for tests
|
||||||
|
"""
|
||||||
|
self._nodes = {}
|
||||||
|
@ -49,13 +49,15 @@ class BaseNode:
|
|||||||
:param console: TCP console port
|
:param console: TCP console port
|
||||||
:param aux: TCP aux console port
|
:param aux: TCP aux console port
|
||||||
:param allocate_aux: Boolean if true will allocate an aux console port
|
:param allocate_aux: Boolean if true will allocate an aux console port
|
||||||
|
:param linked_clone: The node base image is duplicate/overlay (Each node data are independent)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name, node_id, project, manager, console=None, console_type="telnet", aux=None, allocate_aux=False):
|
def __init__(self, name, node_id, project, manager, console=None, console_type="telnet", aux=None, allocate_aux=False, linked_clone=True):
|
||||||
|
|
||||||
self._name = name
|
self._name = name
|
||||||
self._usage = ""
|
self._usage = ""
|
||||||
self._id = node_id
|
self._id = node_id
|
||||||
|
self._linked_clone = linked_clone
|
||||||
self._project = project
|
self._project = project
|
||||||
self._manager = manager
|
self._manager = manager
|
||||||
self._console = console
|
self._console = console
|
||||||
@ -104,6 +106,14 @@ class BaseNode:
|
|||||||
if os.path.exists(self._temporary_directory):
|
if os.path.exists(self._temporary_directory):
|
||||||
shutil.rmtree(self._temporary_directory, ignore_errors=True)
|
shutil.rmtree(self._temporary_directory, ignore_errors=True)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def linked_clone(self):
|
||||||
|
return self._linked_clone
|
||||||
|
|
||||||
|
@linked_clone.setter
|
||||||
|
def linked_clone(self, val):
|
||||||
|
self._linked_clone = val
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def status(self):
|
def status(self):
|
||||||
"""
|
"""
|
||||||
|
@ -69,7 +69,6 @@ class QemuVM(BaseNode):
|
|||||||
server_config = manager.config.get_section_config("Server")
|
server_config = manager.config.get_section_config("Server")
|
||||||
self._host = server_config.get("host", "127.0.0.1")
|
self._host = server_config.get("host", "127.0.0.1")
|
||||||
self._monitor_host = server_config.get("monitor_host", "127.0.0.1")
|
self._monitor_host = server_config.get("monitor_host", "127.0.0.1")
|
||||||
self._linked_clone = linked_clone
|
|
||||||
self._process = None
|
self._process = None
|
||||||
self._cpulimit_process = None
|
self._cpulimit_process = None
|
||||||
self._monitor = None
|
self._monitor = None
|
||||||
@ -197,6 +196,24 @@ class QemuVM(BaseNode):
|
|||||||
else:
|
else:
|
||||||
self.qemu_path = "qemu-system-{}".format(platform)
|
self.qemu_path = "qemu-system-{}".format(platform)
|
||||||
|
|
||||||
|
def _disk_setter(self, variable, value):
|
||||||
|
"""
|
||||||
|
Use by disk image setter for checking and apply modifications
|
||||||
|
|
||||||
|
:param variable: Variable name in the class
|
||||||
|
:param value: New disk value
|
||||||
|
"""
|
||||||
|
value = self.manager.get_abs_image_path(value)
|
||||||
|
if not self.linked_clone:
|
||||||
|
for node in self.manager.nodes:
|
||||||
|
if node != self and getattr(node, variable) == value:
|
||||||
|
raise QemuError("Sorry a node without the linked base setting enabled can only be used once on your server. {} is already used by {}".format(value, node.name))
|
||||||
|
setattr(self, "_" + variable, value)
|
||||||
|
log.info('QEMU VM "{name}" [{id}] has set the QEMU {variable} path to {disk_image}'.format(name=self._name,
|
||||||
|
variable=variable,
|
||||||
|
id=self._id,
|
||||||
|
disk_image=value))
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def hda_disk_image(self):
|
def hda_disk_image(self):
|
||||||
"""
|
"""
|
||||||
@ -214,10 +231,7 @@ class QemuVM(BaseNode):
|
|||||||
|
|
||||||
:param hda_disk_image: QEMU hda disk image path
|
:param hda_disk_image: QEMU hda disk image path
|
||||||
"""
|
"""
|
||||||
self._hda_disk_image = self.manager.get_abs_image_path(hda_disk_image)
|
self._disk_setter("hda_disk_image", hda_disk_image)
|
||||||
log.info('QEMU VM "{name}" [{id}] has set the QEMU hda disk image path to {disk_image}'.format(name=self._name,
|
|
||||||
id=self._id,
|
|
||||||
disk_image=self._hda_disk_image))
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def hdb_disk_image(self):
|
def hdb_disk_image(self):
|
||||||
@ -237,10 +251,7 @@ class QemuVM(BaseNode):
|
|||||||
:param hdb_disk_image: QEMU hdb disk image path
|
:param hdb_disk_image: QEMU hdb disk image path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self._hdb_disk_image = self.manager.get_abs_image_path(hdb_disk_image)
|
self._disk_setter("hdb_disk_image", hdb_disk_image)
|
||||||
log.info('QEMU VM "{name}" [{id}] has set the QEMU hdb disk image path to {disk_image}'.format(name=self._name,
|
|
||||||
id=self._id,
|
|
||||||
disk_image=self._hdb_disk_image))
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def hdc_disk_image(self):
|
def hdc_disk_image(self):
|
||||||
@ -260,10 +271,7 @@ class QemuVM(BaseNode):
|
|||||||
:param hdc_disk_image: QEMU hdc disk image path
|
:param hdc_disk_image: QEMU hdc disk image path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self._hdc_disk_image = self.manager.get_abs_image_path(hdc_disk_image)
|
self._disk_setter("hdc_disk_image", hdc_disk_image)
|
||||||
log.info('QEMU VM "{name}" [{id}] has set the QEMU hdc disk image path to {disk_image}'.format(name=self._name,
|
|
||||||
id=self._id,
|
|
||||||
disk_image=self._hdc_disk_image))
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def hdd_disk_image(self):
|
def hdd_disk_image(self):
|
||||||
@ -283,11 +291,7 @@ class QemuVM(BaseNode):
|
|||||||
:param hdd_disk_image: QEMU hdd disk image path
|
:param hdd_disk_image: QEMU hdd disk image path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self._hdd_disk_image = hdd_disk_image
|
self._disk_setter("hdd_disk_image", hdd_disk_image)
|
||||||
self._hdd_disk_image = self.manager.get_abs_image_path(hdd_disk_image)
|
|
||||||
log.info('QEMU VM "{name}" [{id}] has set the QEMU hdd disk image path to {disk_image}'.format(name=self._name,
|
|
||||||
id=self._id,
|
|
||||||
disk_image=self._hdd_disk_image))
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def hda_disk_interface(self):
|
def hda_disk_interface(self):
|
||||||
@ -1312,7 +1316,7 @@ class QemuVM(BaseNode):
|
|||||||
raise QemuError("{} disk image '{}' linked to '{}' is not accessible".format(disk_name, disk_image, os.path.realpath(disk_image)))
|
raise QemuError("{} disk image '{}' linked to '{}' is not accessible".format(disk_name, disk_image, os.path.realpath(disk_image)))
|
||||||
else:
|
else:
|
||||||
raise QemuError("{} disk image '{}' is not accessible".format(disk_name, disk_image))
|
raise QemuError("{} disk image '{}' is not accessible".format(disk_name, disk_image))
|
||||||
if self._linked_clone:
|
if self.linked_clone:
|
||||||
disk = os.path.join(self.working_dir, "{}_disk.qcow2".format(disk_name))
|
disk = os.path.join(self.working_dir, "{}_disk.qcow2".format(disk_name))
|
||||||
if not os.path.exists(disk):
|
if not os.path.exists(disk):
|
||||||
# create the disk
|
# create the disk
|
||||||
|
@ -50,12 +50,11 @@ class VirtualBoxVM(BaseNode):
|
|||||||
VirtualBox VM implementation.
|
VirtualBox VM implementation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name, node_id, project, manager, vmname, linked_clone, console=None, adapters=0):
|
def __init__(self, name, node_id, project, manager, vmname, linked_clone=False, console=None, adapters=0):
|
||||||
|
|
||||||
super().__init__(name, node_id, project, manager, console=console)
|
super().__init__(name, node_id, project, manager, console=console, linked_clone=linked_clone)
|
||||||
|
|
||||||
self._maximum_adapters = 8
|
self._maximum_adapters = 8
|
||||||
self._linked_clone = linked_clone
|
|
||||||
self._system_properties = {}
|
self._system_properties = {}
|
||||||
self._telnet_server_thread = None
|
self._telnet_server_thread = None
|
||||||
self._serial_pipe = None
|
self._serial_pipe = None
|
||||||
@ -67,6 +66,11 @@ class VirtualBoxVM(BaseNode):
|
|||||||
self._headless = False
|
self._headless = False
|
||||||
self._acpi_shutdown = False
|
self._acpi_shutdown = False
|
||||||
self._enable_remote_console = False
|
self._enable_remote_console = False
|
||||||
|
if not self.linked_clone:
|
||||||
|
for node in self.manager.nodes:
|
||||||
|
if node.vmname == vmname:
|
||||||
|
raise VirtualBoxError("Sorry a node without the linked clone setting enabled can only be used once on your server. {} is already used by {}".format(vmname, node.name))
|
||||||
|
|
||||||
self._vmname = vmname
|
self._vmname = vmname
|
||||||
self._use_any_adapter = False
|
self._use_any_adapter = False
|
||||||
self._ram = 0
|
self._ram = 0
|
||||||
@ -87,8 +91,8 @@ class VirtualBoxVM(BaseNode):
|
|||||||
"ram": self.ram,
|
"ram": self.ram,
|
||||||
"status": self.status,
|
"status": self.status,
|
||||||
"use_any_adapter": self.use_any_adapter,
|
"use_any_adapter": self.use_any_adapter,
|
||||||
"linked_clone": self._linked_clone}
|
"linked_clone": self.linked_clone}
|
||||||
if self._linked_clone:
|
if self.linked_clone:
|
||||||
json["node_directory"] = self.working_dir
|
json["node_directory"] = self.working_dir
|
||||||
else:
|
else:
|
||||||
json["node_directory"] = None
|
json["node_directory"] = None
|
||||||
@ -156,7 +160,7 @@ class VirtualBoxVM(BaseNode):
|
|||||||
raise VirtualBoxError("The VirtualBox API version is lower than 4.3")
|
raise VirtualBoxError("The VirtualBox API version is lower than 4.3")
|
||||||
log.info("VirtualBox VM '{name}' [{id}] created".format(name=self.name, id=self.id))
|
log.info("VirtualBox VM '{name}' [{id}] created".format(name=self.name, id=self.id))
|
||||||
|
|
||||||
if self._linked_clone:
|
if self.linked_clone:
|
||||||
if self.id and os.path.isdir(os.path.join(self.working_dir, self._vmname)):
|
if self.id and os.path.isdir(os.path.join(self.working_dir, self._vmname)):
|
||||||
vbox_file = os.path.join(self.working_dir, self._vmname, self._vmname + ".vbox")
|
vbox_file = os.path.join(self.working_dir, self._vmname, self._vmname + ".vbox")
|
||||||
yield from self.manager.execute("registervm", [vbox_file])
|
yield from self.manager.execute("registervm", [vbox_file])
|
||||||
@ -377,7 +381,7 @@ class VirtualBoxVM(BaseNode):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
hdd_table = []
|
hdd_table = []
|
||||||
if self._linked_clone:
|
if self.linked_clone:
|
||||||
if os.path.exists(self.working_dir):
|
if os.path.exists(self.working_dir):
|
||||||
hdd_files = yield from self._get_all_hdd_files()
|
hdd_files = yield from self._get_all_hdd_files()
|
||||||
vm_info = yield from self._get_vm_info()
|
vm_info = yield from self._get_vm_info()
|
||||||
@ -446,7 +450,7 @@ class VirtualBoxVM(BaseNode):
|
|||||||
self.acpi_shutdown = False
|
self.acpi_shutdown = False
|
||||||
yield from self.stop()
|
yield from self.stop()
|
||||||
|
|
||||||
if self._linked_clone:
|
if self.linked_clone:
|
||||||
hdd_table = yield from self.save_linked_hdds_info()
|
hdd_table = yield from self.save_linked_hdds_info()
|
||||||
for hdd in hdd_table.copy():
|
for hdd in hdd_table.copy():
|
||||||
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
|
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
|
||||||
@ -593,7 +597,7 @@ class VirtualBoxVM(BaseNode):
|
|||||||
:param vmname: VirtualBox VM name
|
:param vmname: VirtualBox VM name
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self._linked_clone:
|
if self.linked_clone:
|
||||||
yield from self._modify_vm('--name "{}"'.format(vmname))
|
yield from self._modify_vm('--name "{}"'.format(vmname))
|
||||||
|
|
||||||
log.info("VirtualBox VM '{name}' [{id}] has set the VM name to '{vmname}'".format(name=self.name, id=self.id, vmname=vmname))
|
log.info("VirtualBox VM '{name}' [{id}] has set the VM name to '{vmname}'".format(name=self.name, id=self.id, vmname=vmname))
|
||||||
|
@ -48,11 +48,10 @@ class VMwareVM(BaseNode):
|
|||||||
VMware VM implementation.
|
VMware VM implementation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name, node_id, project, manager, vmx_path, linked_clone, console=None):
|
def __init__(self, name, node_id, project, manager, vmx_path, linked_clone=False, console=None):
|
||||||
|
|
||||||
super().__init__(name, node_id, project, manager, console=console)
|
super().__init__(name, node_id, project, manager, console=console, linked_clone=linked_clone)
|
||||||
|
|
||||||
self._linked_clone = linked_clone
|
|
||||||
self._vmx_pairs = OrderedDict()
|
self._vmx_pairs = OrderedDict()
|
||||||
self._telnet_server_thread = None
|
self._telnet_server_thread = None
|
||||||
self._serial_pipe = None
|
self._serial_pipe = None
|
||||||
@ -61,6 +60,11 @@ class VMwareVM(BaseNode):
|
|||||||
self._started = False
|
self._started = False
|
||||||
self._closed = False
|
self._closed = False
|
||||||
|
|
||||||
|
if not self.linked_clone:
|
||||||
|
for node in self.manager.nodes:
|
||||||
|
if node.vmx_path == vmx_path:
|
||||||
|
raise VMwareError("Sorry a node without the linked clone setting enabled can only be used once on your server. {} is already used by {}".format(vmx_path, node.name))
|
||||||
|
|
||||||
# VMware VM settings
|
# VMware VM settings
|
||||||
self._headless = False
|
self._headless = False
|
||||||
self._vmx_path = vmx_path
|
self._vmx_path = vmx_path
|
||||||
@ -89,7 +93,7 @@ class VMwareVM(BaseNode):
|
|||||||
"use_any_adapter": self.use_any_adapter,
|
"use_any_adapter": self.use_any_adapter,
|
||||||
"status": self.status,
|
"status": self.status,
|
||||||
"node_directory": self.working_dir,
|
"node_directory": self.working_dir,
|
||||||
"linked_clone": self._linked_clone}
|
"linked_clone": self.linked_clone}
|
||||||
return json
|
return json
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -141,7 +145,7 @@ class VMwareVM(BaseNode):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
yield from self.manager.check_vmrun_version()
|
yield from self.manager.check_vmrun_version()
|
||||||
if self._linked_clone and not os.path.exists(os.path.join(self.working_dir, os.path.basename(self._vmx_path))):
|
if self.linked_clone and not os.path.exists(os.path.join(self.working_dir, os.path.basename(self._vmx_path))):
|
||||||
if self.manager.host_type == "player":
|
if self.manager.host_type == "player":
|
||||||
raise VMwareError("Linked clones are not supported by VMware Player")
|
raise VMwareError("Linked clones are not supported by VMware Player")
|
||||||
# create the base snapshot for linked clones
|
# create the base snapshot for linked clones
|
||||||
@ -555,7 +559,7 @@ class VMwareVM(BaseNode):
|
|||||||
except VMwareError:
|
except VMwareError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if self._linked_clone:
|
if self.linked_clone:
|
||||||
yield from self.manager.remove_from_vmware_inventory(self._vmx_path)
|
yield from self.manager.remove_from_vmware_inventory(self._vmx_path)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -56,7 +56,7 @@ class VirtualBoxHandler:
|
|||||||
request.match_info["project_id"],
|
request.match_info["project_id"],
|
||||||
request.json.get("node_id"),
|
request.json.get("node_id"),
|
||||||
request.json.pop("vmname"),
|
request.json.pop("vmname"),
|
||||||
request.json.pop("linked_clone"),
|
linked_clone=request.json.pop("linked_clone"),
|
||||||
console=request.json.get("console", None),
|
console=request.json.get("console", None),
|
||||||
adapters=request.json.get("adapters", 0))
|
adapters=request.json.get("adapters", 0))
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ class VMwareHandler:
|
|||||||
request.match_info["project_id"],
|
request.match_info["project_id"],
|
||||||
request.json.get("node_id"),
|
request.json.get("node_id"),
|
||||||
request.json.pop("vmx_path"),
|
request.json.pop("vmx_path"),
|
||||||
request.json.pop("linked_clone"),
|
linked_clone=request.json.pop("linked_clone"),
|
||||||
console=request.json.get("console", None))
|
console=request.json.get("console", None))
|
||||||
|
|
||||||
for name, value in request.json.items():
|
for name, value in request.json.items():
|
||||||
|
@ -33,7 +33,7 @@ from unittest.mock import patch, MagicMock, PropertyMock, call
|
|||||||
from gns3server.config import Config
|
from gns3server.config import Config
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture()
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = Docker.instance()
|
m = Docker.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -28,7 +28,7 @@ from gns3server.compute.dynamips.dynamips_error import DynamipsError
|
|||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = Dynamips.instance()
|
m = Dynamips.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -26,7 +26,7 @@ from gns3server.compute.dynamips import Dynamips
|
|||||||
from gns3server.config import Config
|
from gns3server.config import Config
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = Dynamips.instance()
|
m = Dynamips.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -39,7 +39,7 @@ if not sys.platform.startswith("win"):
|
|||||||
from gns3server.config import Config
|
from gns3server.config import Config
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = IOU.instance()
|
m = IOU.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -35,7 +35,7 @@ from gns3server.utils import force_unix_path, macaddress_to_int, int_to_macaddre
|
|||||||
from gns3server.compute.notification_manager import NotificationManager
|
from gns3server.compute.notification_manager import NotificationManager
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = Qemu.instance()
|
m = Qemu.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
@ -601,6 +601,22 @@ def test_hda_disk_image(vm, images_dir):
|
|||||||
assert vm.hda_disk_image == force_unix_path(os.path.join(images_dir, "QEMU", "test2"))
|
assert vm.hda_disk_image == force_unix_path(os.path.join(images_dir, "QEMU", "test2"))
|
||||||
|
|
||||||
|
|
||||||
|
def test_hda_disk_image_non_linked_clone(vm, images_dir, project, manager, fake_qemu_binary, fake_qemu_img_binary):
|
||||||
|
"""
|
||||||
|
Two non linked can't use the same image at the same time
|
||||||
|
"""
|
||||||
|
|
||||||
|
open(os.path.join(images_dir, "test1"), "w+").close()
|
||||||
|
vm.linked_clone = False
|
||||||
|
vm.hda_disk_image = os.path.join(images_dir, "test1")
|
||||||
|
vm.manager._nodes[vm.id] = vm
|
||||||
|
|
||||||
|
vm2 = QemuVM("test", "00010203-0405-0607-0809-0a0b0c0d0eaa", project, manager, qemu_path=fake_qemu_binary)
|
||||||
|
vm2.linked_clone = False
|
||||||
|
with pytest.raises(QemuError):
|
||||||
|
vm2.hda_disk_image = os.path.join(images_dir, "test1")
|
||||||
|
|
||||||
|
|
||||||
def test_hda_disk_image_ova(vm, images_dir):
|
def test_hda_disk_image_ova(vm, images_dir):
|
||||||
|
|
||||||
os.makedirs(os.path.join(images_dir, "QEMU", "test.ovf"))
|
os.makedirs(os.path.join(images_dir, "QEMU", "test.ovf"))
|
||||||
|
@ -30,7 +30,7 @@ from gns3server.compute.error import NodeError
|
|||||||
from gns3server.compute.vpcs import VPCS
|
from gns3server.compute.vpcs import VPCS
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="function")
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = VPCS.instance()
|
m = VPCS.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -33,7 +33,7 @@ from gns3server.compute.vpcs import VPCS, VPCSVM
|
|||||||
from gns3server.config import Config
|
from gns3server.config import Config
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="function")
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = VPCS.instance()
|
m = VPCS.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -30,7 +30,7 @@ from gns3server.compute.virtualbox.virtualbox_error import VirtualBoxError
|
|||||||
from tests.utils import asyncio_patch
|
from tests.utils import asyncio_patch
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = VirtualBox.instance()
|
m = VirtualBox.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -24,7 +24,7 @@ from gns3server.compute.virtualbox.virtualbox_error import VirtualBoxError
|
|||||||
from gns3server.compute.virtualbox import VirtualBox
|
from gns3server.compute.virtualbox import VirtualBox
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = VirtualBox.instance()
|
m = VirtualBox.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -29,7 +29,7 @@ from gns3server.compute.vmware import VMware
|
|||||||
from tests.utils import asyncio_patch
|
from tests.utils import asyncio_patch
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = VMware.instance()
|
m = VMware.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -24,7 +24,7 @@ from gns3server.compute.vmware.vmware_error import VMwareError
|
|||||||
from gns3server.compute.vmware import VMware
|
from gns3server.compute.vmware import VMware
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = VMware.instance()
|
m = VMware.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -31,7 +31,7 @@ from gns3server.compute.vpcs import VPCS
|
|||||||
from gns3server.compute.notification_manager import NotificationManager
|
from gns3server.compute.notification_manager import NotificationManager
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture
|
||||||
def manager(port_manager):
|
def manager(port_manager):
|
||||||
m = VPCS.instance()
|
m = VPCS.instance()
|
||||||
m.port_manager = port_manager
|
m.port_manager = port_manager
|
||||||
|
@ -82,9 +82,6 @@ def http_server(request, loop, port_manager, monkeypatch, controller):
|
|||||||
app = web.Application()
|
app = web.Application()
|
||||||
for method, route, handler in Route.get_routes():
|
for method, route, handler in Route.get_routes():
|
||||||
app.router.add_route(method, route, handler)
|
app.router.add_route(method, route, handler)
|
||||||
for module in MODULES:
|
|
||||||
instance = module.instance()
|
|
||||||
instance.port_manager = port_manager
|
|
||||||
|
|
||||||
host = "localhost"
|
host = "localhost"
|
||||||
|
|
||||||
@ -145,7 +142,7 @@ def project(tmpdir):
|
|||||||
return p
|
return p
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="function")
|
||||||
def port_manager():
|
def port_manager():
|
||||||
"""An instance of port manager"""
|
"""An instance of port manager"""
|
||||||
|
|
||||||
@ -198,6 +195,9 @@ def run_around_tests(monkeypatch, port_manager, controller, config):
|
|||||||
|
|
||||||
tmppath = tempfile.mkdtemp()
|
tmppath = tempfile.mkdtemp()
|
||||||
|
|
||||||
|
for module in MODULES:
|
||||||
|
module._instance = None
|
||||||
|
|
||||||
port_manager._instance = port_manager
|
port_manager._instance = port_manager
|
||||||
os.makedirs(os.path.join(tmppath, 'projects'))
|
os.makedirs(os.path.join(tmppath, 'projects'))
|
||||||
config.set("Server", "projects_path", os.path.join(tmppath, 'projects'))
|
config.set("Server", "projects_path", os.path.join(tmppath, 'projects'))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user