mirror of
https://github.com/GNS3/gns3-server.git
synced 2025-02-21 09:51:50 +00:00
Merge branch 'master' into 1.5
This commit is contained in:
commit
03414993dc
@ -19,6 +19,7 @@ import aiohttp
|
|||||||
import asyncio
|
import asyncio
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import psutil
|
||||||
|
|
||||||
from ...web.route import Route
|
from ...web.route import Route
|
||||||
from ...schemas.project import PROJECT_OBJECT_SCHEMA, PROJECT_CREATE_SCHEMA, PROJECT_UPDATE_SCHEMA, PROJECT_FILE_LIST_SCHEMA, PROJECT_LIST_SCHEMA
|
from ...schemas.project import PROJECT_OBJECT_SCHEMA, PROJECT_CREATE_SCHEMA, PROJECT_UPDATE_SCHEMA, PROJECT_FILE_LIST_SCHEMA, PROJECT_LIST_SCHEMA
|
||||||
@ -205,7 +206,7 @@ class ProjectHandler:
|
|||||||
queue = project.get_listen_queue()
|
queue = project.get_listen_queue()
|
||||||
ProjectHandler._notifications_listening.setdefault(project.id, 0)
|
ProjectHandler._notifications_listening.setdefault(project.id, 0)
|
||||||
ProjectHandler._notifications_listening[project.id] += 1
|
ProjectHandler._notifications_listening[project.id] += 1
|
||||||
response.write("{\"action\": \"ping\"}\n".encode("utf-8"))
|
response.write("{}\n".format(json.dumps(ProjectHandler._getPingMessage())).encode("utf-8"))
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
(action, msg) = yield from asyncio.wait_for(queue.get(), 5)
|
(action, msg) = yield from asyncio.wait_for(queue.get(), 5)
|
||||||
@ -218,11 +219,26 @@ class ProjectHandler:
|
|||||||
except asyncio.futures.CancelledError as e:
|
except asyncio.futures.CancelledError as e:
|
||||||
break
|
break
|
||||||
except asyncio.futures.TimeoutError:
|
except asyncio.futures.TimeoutError:
|
||||||
response.write("{\"action\": \"ping\"}\n".encode("utf-8"))
|
response.write("{}\n".format(json.dumps(ProjectHandler._getPingMessage())).encode("utf-8"))
|
||||||
project.stop_listen_queue(queue)
|
project.stop_listen_queue(queue)
|
||||||
if project.id in ProjectHandler._notifications_listening:
|
if project.id in ProjectHandler._notifications_listening:
|
||||||
ProjectHandler._notifications_listening[project.id] -= 1
|
ProjectHandler._notifications_listening[project.id] -= 1
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _getPingMessage(cls):
|
||||||
|
"""
|
||||||
|
The ping message is regulary send to the client to
|
||||||
|
keep the connection open. We send with it some informations
|
||||||
|
about server load.
|
||||||
|
|
||||||
|
:returns: hash
|
||||||
|
"""
|
||||||
|
stats = {}
|
||||||
|
# Non blocking call in order to get cpu usage. First call will return 0
|
||||||
|
stats["cpu_usage_percent"] = psutil.cpu_percent(interval=None)
|
||||||
|
stats["memory_usage_percent"] = psutil.virtual_memory().percent
|
||||||
|
return {"action": "ping", "event": stats}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.get(
|
@Route.get(
|
||||||
r"/projects/{project_id}/files",
|
r"/projects/{project_id}/files",
|
||||||
|
@ -458,9 +458,10 @@ class Dynamips(BaseManager):
|
|||||||
nio = NIOLinuxEthernet(node.hypervisor, ethernet_device)
|
nio = NIOLinuxEthernet(node.hypervisor, ethernet_device)
|
||||||
elif nio_settings["type"] == "nio_tap":
|
elif nio_settings["type"] == "nio_tap":
|
||||||
tap_device = nio_settings["tap_device"]
|
tap_device = nio_settings["tap_device"]
|
||||||
if not is_interface_up(tap_device):
|
|
||||||
raise aiohttp.web.HTTPConflict(text="TAP interface {} is down".format(tap_device))
|
|
||||||
nio = NIOTAP(node.hypervisor, tap_device)
|
nio = NIOTAP(node.hypervisor, tap_device)
|
||||||
|
if not is_interface_up(tap_device):
|
||||||
|
# test after the TAP interface has been created (if it doesn't exist yet)
|
||||||
|
raise aiohttp.web.HTTPConflict(text="TAP interface {} is down".format(tap_device))
|
||||||
elif nio_settings["type"] == "nio_unix":
|
elif nio_settings["type"] == "nio_unix":
|
||||||
local_file = nio_settings["local_file"]
|
local_file = nio_settings["local_file"]
|
||||||
remote_file = nio_settings["remote_file"]
|
remote_file = nio_settings["remote_file"]
|
||||||
|
@ -49,21 +49,17 @@ class Qemu(BaseManager):
|
|||||||
"""
|
"""
|
||||||
kvm = []
|
kvm = []
|
||||||
|
|
||||||
try:
|
if not os.path.exists("/dev/kvm"):
|
||||||
process = yield from asyncio.create_subprocess_exec("kvm-ok")
|
|
||||||
yield from process.wait()
|
|
||||||
except OSError:
|
|
||||||
return kvm
|
return kvm
|
||||||
|
|
||||||
if process.returncode == 0:
|
arch = platform.machine()
|
||||||
arch = platform.machine()
|
if arch == "x86_64":
|
||||||
if arch == "x86_64":
|
kvm.append("x86_64")
|
||||||
kvm.append("x86_64")
|
kvm.append("i386")
|
||||||
kvm.append("i386")
|
elif arch == "i386":
|
||||||
elif arch == "i386":
|
kvm.append("i386")
|
||||||
kvm.append("i386")
|
else:
|
||||||
else:
|
kvm.append(platform.machine())
|
||||||
kvm.append(platform.machine())
|
|
||||||
return kvm
|
return kvm
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -80,10 +80,12 @@ class QemuVM(BaseVM):
|
|||||||
try:
|
try:
|
||||||
self.qemu_path = qemu_path
|
self.qemu_path = qemu_path
|
||||||
except QemuError as e:
|
except QemuError as e:
|
||||||
|
# If the binary is not found for topologies 1.4 and later
|
||||||
|
# search via the platform otherwise use the binary name
|
||||||
if platform:
|
if platform:
|
||||||
self.platform = platform
|
self.platform = platform
|
||||||
else:
|
else:
|
||||||
raise e
|
self.qemu_path = os.path.basename(qemu_path)
|
||||||
else:
|
else:
|
||||||
self.platform = platform
|
self.platform = platform
|
||||||
|
|
||||||
@ -694,6 +696,8 @@ class QemuVM(BaseVM):
|
|||||||
log.info('QEMU VM "{name}" [{id}] has set the QEMU initrd path to {initrd}'.format(name=self._name,
|
log.info('QEMU VM "{name}" [{id}] has set the QEMU initrd path to {initrd}'.format(name=self._name,
|
||||||
id=self._id,
|
id=self._id,
|
||||||
initrd=initrd))
|
initrd=initrd))
|
||||||
|
if "asa" in initrd:
|
||||||
|
self.project.emit("log.warning", {"message": "Warning ASA 8 is not officialy supported by GNS3 and Cisco, we recommend to use ASAv. Depending of your hardware this could not work or you could be limited to one instance."})
|
||||||
self._initrd = initrd
|
self._initrd = initrd
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -1498,7 +1502,6 @@ class QemuVM(BaseVM):
|
|||||||
answer[field] = getattr(self, field)
|
answer[field] = getattr(self, field)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
answer["hda_disk_image"] = self.manager.get_relative_image_path(self._hda_disk_image)
|
answer["hda_disk_image"] = self.manager.get_relative_image_path(self._hda_disk_image)
|
||||||
answer["hda_disk_image_md5sum"] = md5sum(self._hda_disk_image)
|
answer["hda_disk_image_md5sum"] = md5sum(self._hda_disk_image)
|
||||||
answer["hdb_disk_image"] = self.manager.get_relative_image_path(self._hdb_disk_image)
|
answer["hdb_disk_image"] = self.manager.get_relative_image_path(self._hdb_disk_image)
|
||||||
|
@ -48,6 +48,7 @@ class VMware(BaseManager):
|
|||||||
|
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self._execute_lock = asyncio.Lock()
|
self._execute_lock = asyncio.Lock()
|
||||||
|
self._vmware_inventory_lock = asyncio.Lock()
|
||||||
self._vmrun_path = None
|
self._vmrun_path = None
|
||||||
self._vmnets = []
|
self._vmnets = []
|
||||||
self._vmnet_start_range = 2
|
self._vmnet_start_range = 2
|
||||||
@ -191,9 +192,11 @@ class VMware(BaseManager):
|
|||||||
if int(version) < 6:
|
if int(version) < 6:
|
||||||
raise VMwareError("Using VMware Player requires version 6 or above")
|
raise VMwareError("Using VMware Player requires version 6 or above")
|
||||||
if version is None:
|
if version is None:
|
||||||
log.warning("Could not find VMware version")
|
log.warning("Could not find VMware version. Output of VMware: {}".format(output))
|
||||||
|
raise VMwareError("Could not find VMware version. Output of VMware: {}".format(output))
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
log.error("Error while looking for the VMware version: {}".format(e))
|
log.error("Error while looking for the VMware version: {}".format(e))
|
||||||
|
raise VMwareError("Error while looking for the VMware version: {}".format(e))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_vmnet_interfaces_registry():
|
def _get_vmnet_interfaces_registry():
|
||||||
@ -355,6 +358,39 @@ class VMware(BaseManager):
|
|||||||
|
|
||||||
return stdout_data.decode("utf-8", errors="ignore").splitlines()
|
return stdout_data.decode("utf-8", errors="ignore").splitlines()
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def remove_from_vmware_inventory(self, vmx_path):
|
||||||
|
"""
|
||||||
|
Removes a linked clone from the VMware inventory file.
|
||||||
|
|
||||||
|
:param vmx_path: path of the linked clone VMX file
|
||||||
|
"""
|
||||||
|
|
||||||
|
with (yield from self._vmware_inventory_lock):
|
||||||
|
inventory_path = self.get_vmware_inventory_path()
|
||||||
|
if os.path.exists(inventory_path):
|
||||||
|
try:
|
||||||
|
inventory_pairs = self.parse_vmware_file(inventory_path)
|
||||||
|
except OSError as e:
|
||||||
|
log.warning('Could not read VMware inventory file "{}": {}'.format(inventory_path, e))
|
||||||
|
return
|
||||||
|
|
||||||
|
vmlist_entry = None
|
||||||
|
for name, value in inventory_pairs.items():
|
||||||
|
if value == vmx_path:
|
||||||
|
vmlist_entry = name.split(".", 1)[0]
|
||||||
|
break
|
||||||
|
|
||||||
|
if vmlist_entry is not None:
|
||||||
|
for name in inventory_pairs.keys():
|
||||||
|
if name.startswith(vmlist_entry):
|
||||||
|
del inventory_pairs[name]
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.write_vmware_file(inventory_path, inventory_pairs)
|
||||||
|
except OSError as e:
|
||||||
|
raise VMwareError('Could not write VMware inventory file "{}": {}'.format(inventory_path, e))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_vmware_file(path):
|
def parse_vmware_file(path):
|
||||||
"""
|
"""
|
||||||
|
@ -375,6 +375,8 @@ class VMwareVM(BaseVM):
|
|||||||
vnet = "ethernet{}.vnet".format(adapter_number)
|
vnet = "ethernet{}.vnet".format(adapter_number)
|
||||||
if vnet not in self._vmx_pairs:
|
if vnet not in self._vmx_pairs:
|
||||||
raise VMwareError("vnet {} not in VMX file".format(vnet))
|
raise VMwareError("vnet {} not in VMX file".format(vnet))
|
||||||
|
if not self._ubridge_hypervisor:
|
||||||
|
raise VMwareError("Cannot start the packet capture: uBridge is not running")
|
||||||
yield from self._ubridge_hypervisor.send('bridge start_capture {name} "{output_file}"'.format(name=vnet,
|
yield from self._ubridge_hypervisor.send('bridge start_capture {name} "{output_file}"'.format(name=vnet,
|
||||||
output_file=output_file))
|
output_file=output_file))
|
||||||
|
|
||||||
@ -389,6 +391,8 @@ class VMwareVM(BaseVM):
|
|||||||
vnet = "ethernet{}.vnet".format(adapter_number)
|
vnet = "ethernet{}.vnet".format(adapter_number)
|
||||||
if vnet not in self._vmx_pairs:
|
if vnet not in self._vmx_pairs:
|
||||||
raise VMwareError("vnet {} not in VMX file".format(vnet))
|
raise VMwareError("vnet {} not in VMX file".format(vnet))
|
||||||
|
if not self._ubridge_hypervisor:
|
||||||
|
raise VMwareError("Cannot stop the packet capture: uBridge is not running")
|
||||||
yield from self._ubridge_hypervisor.send("bridge stop_capture {name}".format(name=vnet))
|
yield from self._ubridge_hypervisor.send("bridge stop_capture {name}".format(name=vnet))
|
||||||
|
|
||||||
def check_hw_virtualization(self):
|
def check_hw_virtualization(self):
|
||||||
@ -560,31 +564,7 @@ class VMwareVM(BaseVM):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
if self._linked_clone:
|
if self._linked_clone:
|
||||||
# clean the VMware inventory path from this linked clone
|
yield from self.manager.remove_from_vmware_inventory(self._vmx_path)
|
||||||
inventory_path = self.manager.get_vmware_inventory_path()
|
|
||||||
inventory_pairs = {}
|
|
||||||
if os.path.exists(inventory_path):
|
|
||||||
try:
|
|
||||||
inventory_pairs = self.manager.parse_vmware_file(inventory_path)
|
|
||||||
except OSError as e:
|
|
||||||
log.warning('Could not read VMware inventory file "{}": {}'.format(inventory_path, e))
|
|
||||||
return
|
|
||||||
|
|
||||||
vmlist_entry = None
|
|
||||||
for name, value in inventory_pairs.items():
|
|
||||||
if value == self._vmx_path:
|
|
||||||
vmlist_entry = name.split(".", 1)[0]
|
|
||||||
break
|
|
||||||
|
|
||||||
if vmlist_entry is not None:
|
|
||||||
for name in inventory_pairs.keys():
|
|
||||||
if name.startswith(vmlist_entry):
|
|
||||||
del inventory_pairs[name]
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.manager.write_vmware_file(inventory_path, inventory_pairs)
|
|
||||||
except OSError as e:
|
|
||||||
raise VMwareError('Could not write VMware inventory file "{}": {}'.format(inventory_path, e))
|
|
||||||
|
|
||||||
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
|
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
|
||||||
self._closed = True
|
self._closed = True
|
||||||
|
@ -147,7 +147,9 @@ def run_around_tests(monkeypatch, port_manager):
|
|||||||
port_manager._instance = port_manager
|
port_manager._instance = port_manager
|
||||||
config = Config.instance()
|
config = Config.instance()
|
||||||
config.clear()
|
config.clear()
|
||||||
config.set("Server", "project_directory", tmppath)
|
os.makedirs(os.path.join(tmppath, 'projects'))
|
||||||
|
config.set("Server", "project_directory", os.path.join(tmppath, 'projects'))
|
||||||
|
config.set("Server", "images_path", os.path.join(tmppath, 'images'))
|
||||||
config.set("Server", "auth", False)
|
config.set("Server", "auth", False)
|
||||||
|
|
||||||
# Prevent executions of the VM if we forgot to mock something
|
# Prevent executions of the VM if we forgot to mock something
|
||||||
@ -158,7 +160,7 @@ def run_around_tests(monkeypatch, port_manager):
|
|||||||
# Force turn off KVM because it's not available on CI
|
# Force turn off KVM because it's not available on CI
|
||||||
config.set("Qemu", "enable_kvm", False)
|
config.set("Qemu", "enable_kvm", False)
|
||||||
|
|
||||||
monkeypatch.setattr("gns3server.modules.project.Project._get_default_project_directory", lambda *args: tmppath)
|
monkeypatch.setattr("gns3server.modules.project.Project._get_default_project_directory", lambda *args: os.path.join(tmppath, 'projects'))
|
||||||
|
|
||||||
# Force sys.platform to the original value. Because it seem not be restore correctly at each tests
|
# Force sys.platform to the original value. Because it seem not be restore correctly at each tests
|
||||||
sys.platform = sys.original_platform
|
sys.platform = sys.original_platform
|
||||||
|
@ -207,9 +207,9 @@ def test_notification(server, project, loop):
|
|||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def go(future):
|
def go(future):
|
||||||
response = yield from aiohttp.request("GET", server.get_url("/projects/{project_id}/notifications".format(project_id=project.id), 1))
|
response = yield from aiohttp.request("GET", server.get_url("/projects/{project_id}/notifications".format(project_id=project.id), 1))
|
||||||
response.body = yield from response.content.read(19)
|
response.body = yield from response.content.read(200)
|
||||||
project.emit("vm.created", {"a": "b"})
|
project.emit("vm.created", {"a": "b"})
|
||||||
response.body += yield from response.content.read(47)
|
response.body += yield from response.content.read(50)
|
||||||
response.close()
|
response.close()
|
||||||
future.set_result(response)
|
future.set_result(response)
|
||||||
|
|
||||||
@ -217,7 +217,9 @@ def test_notification(server, project, loop):
|
|||||||
asyncio.async(go(future))
|
asyncio.async(go(future))
|
||||||
response = loop.run_until_complete(future)
|
response = loop.run_until_complete(future)
|
||||||
assert response.status == 200
|
assert response.status == 200
|
||||||
assert response.body == b'{"action": "ping"}\n{"action": "vm.created", "event": {"a": "b"}}\n'
|
assert b'"action": "ping"' in response.body
|
||||||
|
assert b'"cpu_usage_percent"' in response.body
|
||||||
|
assert b'{"action": "vm.created", "event": {"a": "b"}}\n' in response.body
|
||||||
|
|
||||||
|
|
||||||
def test_notification_invalid_id(server):
|
def test_notification_invalid_id(server):
|
||||||
|
@ -23,7 +23,6 @@ from tests.utils import asyncio_patch
|
|||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
from gns3server.config import Config
|
from gns3server.config import Config
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def fake_qemu_bin():
|
def fake_qemu_bin():
|
||||||
|
|
||||||
@ -40,7 +39,10 @@ def fake_qemu_bin():
|
|||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def fake_qemu_vm(tmpdir):
|
def fake_qemu_vm(tmpdir):
|
||||||
|
|
||||||
bin_path = os.path.join(str(tmpdir / "linux.img"))
|
img_dir = Config.instance().get_section_config("Server").get("images_path")
|
||||||
|
img_dir = os.path.join(img_dir, "QEMU")
|
||||||
|
os.makedirs(img_dir)
|
||||||
|
bin_path = os.path.join(img_dir, "linux载.img")
|
||||||
with open(bin_path, "w+") as f:
|
with open(bin_path, "w+") as f:
|
||||||
f.write("1")
|
f.write("1")
|
||||||
os.chmod(bin_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
os.chmod(bin_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||||
@ -86,15 +88,17 @@ def test_qemu_create_platform(server, project, base_params, fake_qemu_bin):
|
|||||||
def test_qemu_create_with_params(server, project, base_params, fake_qemu_vm):
|
def test_qemu_create_with_params(server, project, base_params, fake_qemu_vm):
|
||||||
params = base_params
|
params = base_params
|
||||||
params["ram"] = 1024
|
params["ram"] = 1024
|
||||||
params["hda_disk_image"] = "linux.img"
|
params["hda_disk_image"] = "linux载.img"
|
||||||
|
|
||||||
response = server.post("/projects/{project_id}/qemu/vms".format(project_id=project.id), params, example=True)
|
response = server.post("/projects/{project_id}/qemu/vms".format(project_id=project.id), params, example=True)
|
||||||
|
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/qemu/vms"
|
assert response.route == "/projects/{project_id}/qemu/vms"
|
||||||
assert response.json["name"] == "PC TEST 1"
|
assert response.json["name"] == "PC TEST 1"
|
||||||
assert response.json["project_id"] == project.id
|
assert response.json["project_id"] == project.id
|
||||||
assert response.json["ram"] == 1024
|
assert response.json["ram"] == 1024
|
||||||
assert response.json["hda_disk_image"] == "linux.img"
|
assert response.json["hda_disk_image"] == "linux载.img"
|
||||||
|
assert response.json["hda_disk_image_md5sum"] == "c4ca4238a0b923820dcc509a6f75849b"
|
||||||
|
|
||||||
|
|
||||||
def test_qemu_get(server, project, vm):
|
def test_qemu_get(server, project, vm):
|
||||||
@ -220,10 +224,9 @@ def test_qemu_list_binaries_filter(server, vm):
|
|||||||
|
|
||||||
def test_vms(server, tmpdir, fake_qemu_vm):
|
def test_vms(server, tmpdir, fake_qemu_vm):
|
||||||
|
|
||||||
with patch("gns3server.modules.Qemu.get_images_directory", return_value=str(tmpdir), example=True):
|
response = server.get("/qemu/vms")
|
||||||
response = server.get("/qemu/vms")
|
|
||||||
assert response.status == 200
|
assert response.status == 200
|
||||||
assert response.json == [{"filename": "linux.img", "path": "linux.img"}]
|
assert response.json == [{"filename": "linux载.img", "path": "linux载.img"}]
|
||||||
|
|
||||||
|
|
||||||
def test_upload_vm(server, tmpdir):
|
def test_upload_vm(server, tmpdir):
|
||||||
|
@ -177,19 +177,15 @@ def test_create_image_exist(loop, tmpdir, fake_qemu_img_binary):
|
|||||||
assert not process.called
|
assert not process.called
|
||||||
|
|
||||||
|
|
||||||
def test_get_kvm_archs_no_kvm(loop):
|
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", side_effect=FileNotFoundError('kvm-ok')):
|
|
||||||
archs = loop.run_until_complete(asyncio.async(Qemu.get_kvm_archs()))
|
|
||||||
assert archs == []
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_kvm_archs_kvm_ok(loop):
|
def test_get_kvm_archs_kvm_ok(loop):
|
||||||
|
|
||||||
process = MagicMock()
|
with patch("os.path.exists", return_value=True):
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
|
||||||
process.returncode = 0
|
|
||||||
archs = loop.run_until_complete(asyncio.async(Qemu.get_kvm_archs()))
|
archs = loop.run_until_complete(asyncio.async(Qemu.get_kvm_archs()))
|
||||||
if platform.machine() == 'x86_64':
|
if platform.machine() == 'x86_64':
|
||||||
assert archs == ['x86_64', 'i386']
|
assert archs == ['x86_64', 'i386']
|
||||||
else:
|
else:
|
||||||
assert archs == platform.machine()
|
assert archs == platform.machine()
|
||||||
|
|
||||||
|
with patch("os.path.exists", return_value=False):
|
||||||
|
archs = loop.run_until_complete(asyncio.async(Qemu.get_kvm_archs()))
|
||||||
|
assert archs == []
|
||||||
|
@ -85,6 +85,22 @@ def test_vm(project, manager, fake_qemu_binary):
|
|||||||
assert vm.id == "00010203-0405-0607-0809-0a0b0c0d0e0f"
|
assert vm.id == "00010203-0405-0607-0809-0a0b0c0d0e0f"
|
||||||
|
|
||||||
|
|
||||||
|
def test_vm_invalid_qemu_with_platform(project, manager, fake_qemu_binary):
|
||||||
|
|
||||||
|
vm = QemuVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager, qemu_path="/usr/fake/bin/qemu-system-64", platform="x86_64")
|
||||||
|
|
||||||
|
assert vm.qemu_path == fake_qemu_binary
|
||||||
|
assert vm.platform == "x86_64"
|
||||||
|
|
||||||
|
|
||||||
|
def test_vm_invalid_qemu_without_platform(project, manager, fake_qemu_binary):
|
||||||
|
|
||||||
|
vm = QemuVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager, qemu_path="/usr/fake/bin/qemu-system-x86_64")
|
||||||
|
|
||||||
|
assert vm.qemu_path == fake_qemu_binary
|
||||||
|
assert vm.platform == "x86_64"
|
||||||
|
|
||||||
|
|
||||||
def test_is_running(vm, running_subprocess_mock):
|
def test_is_running(vm, running_subprocess_mock):
|
||||||
|
|
||||||
vm._process = None
|
vm._process = None
|
||||||
@ -459,6 +475,30 @@ def test_hdd_disk_image(vm, tmpdir):
|
|||||||
assert vm.hdd_disk_image == force_unix_path(str(tmpdir / "QEMU" / "test"))
|
assert vm.hdd_disk_image == force_unix_path(str(tmpdir / "QEMU" / "test"))
|
||||||
|
|
||||||
|
|
||||||
|
def test_initrd(vm, tmpdir):
|
||||||
|
|
||||||
|
vm.manager.config.set("Server", "images_path", str(tmpdir))
|
||||||
|
|
||||||
|
with patch("gns3server.modules.project.Project.emit") as mock:
|
||||||
|
vm.initrd = str(tmpdir / "test")
|
||||||
|
assert vm.initrd == force_unix_path(str(tmpdir / "test"))
|
||||||
|
vm.initrd = "test"
|
||||||
|
assert vm.initrd == force_unix_path(str(tmpdir / "QEMU" / "test"))
|
||||||
|
assert not mock.called
|
||||||
|
|
||||||
|
|
||||||
|
def test_initrd_asa(vm, tmpdir):
|
||||||
|
|
||||||
|
vm.manager.config.set("Server", "images_path", str(tmpdir))
|
||||||
|
|
||||||
|
with patch("gns3server.modules.project.Project.emit") as mock:
|
||||||
|
vm.initrd = str(tmpdir / "asa842-initrd.gz")
|
||||||
|
assert vm.initrd == force_unix_path(str(tmpdir / "asa842-initrd.gz"))
|
||||||
|
vm.initrd = "asa842-initrd.gz"
|
||||||
|
assert vm.initrd == force_unix_path(str(tmpdir / "QEMU" / "asa842-initrd.gz"))
|
||||||
|
assert mock.called
|
||||||
|
|
||||||
|
|
||||||
def test_options(linux_platform, vm):
|
def test_options(linux_platform, vm):
|
||||||
vm.kvm = False
|
vm.kvm = False
|
||||||
vm.options = "-usb"
|
vm.options = "-usb"
|
||||||
|
@ -21,13 +21,13 @@ from gns3server.utils.images import md5sum, remove_checksum
|
|||||||
|
|
||||||
|
|
||||||
def test_md5sum(tmpdir):
|
def test_md5sum(tmpdir):
|
||||||
fake_img = str(tmpdir / 'hello')
|
fake_img = str(tmpdir / 'hello载')
|
||||||
|
|
||||||
with open(fake_img, 'w+') as f:
|
with open(fake_img, 'w+') as f:
|
||||||
f.write('hello')
|
f.write('hello')
|
||||||
|
|
||||||
assert md5sum(fake_img) == '5d41402abc4b2a76b9719d911017c592'
|
assert md5sum(fake_img) == '5d41402abc4b2a76b9719d911017c592'
|
||||||
with open(str(tmpdir / 'hello.md5sum')) as f:
|
with open(str(tmpdir / 'hello载.md5sum')) as f:
|
||||||
assert f.read() == '5d41402abc4b2a76b9719d911017c592'
|
assert f.read() == '5d41402abc4b2a76b9719d911017c592'
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user