mirror of
https://github.com/GNS3/gns3-server.git
synced 2024-12-21 05:43:12 +00:00
Merge branch '2.1' into 2.2
# Conflicts: # gns3server/compute/builtin/nodes/cloud.py # gns3server/compute/docker/docker_vm.py # gns3server/compute/dynamips/nodes/ethernet_switch.py # gns3server/compute/iou/iou_vm.py # gns3server/compute/qemu/qemu_vm.py # gns3server/compute/virtualbox/virtualbox_vm.py # gns3server/compute/vmware/vmware_vm.py # gns3server/controller/__init__.py # gns3server/version.py # gns3server/web/web_server.py
This commit is contained in:
commit
9de13c570c
28
CHANGELOG
28
CHANGELOG
@ -1,5 +1,31 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## 2.1.11 28/09/2018
|
||||||
|
|
||||||
|
* Catch some exceptions.
|
||||||
|
|
||||||
|
## 2.1.10 15/09/2018
|
||||||
|
|
||||||
|
* Include locale information and GNS3 VM version in crash reports.
|
||||||
|
* Fix small errors like unhandled exceptions etc.
|
||||||
|
* Import encodings.idna to avoid LookupError when standard library is in a zip file.
|
||||||
|
* Catch exceptions in various locations to fix small issues reported by Sentry.
|
||||||
|
* Check if serial pipe can be opened for VMware and VirtualBox VMs.
|
||||||
|
* Improve the invalid port format detection. Fixes https://github.com/GNS3/gns3-gui/issues/2580
|
||||||
|
* Update aiohttp verion requirement in order to support Python 3.7. Fixes https://github.com/GNS3/gns3-gui/issues/2566
|
||||||
|
* Update setup.py and fix minor issues.
|
||||||
|
* Catch asyncio.CancelledError when shutting down the server.
|
||||||
|
* Report GNS3 VM errors to the GUI server summary. Ref #1359.
|
||||||
|
* Replace vboxnet0 (if it does not exist) by the first available vboxnet interface on Windows. Fixes https://github.com/GNS3/gns3-vm/issues/102
|
||||||
|
* Check if the VirtualBox host-only network exists when starting a GNS3 VM running on VirtualBox. Ref https://github.com/GNS3/gns3-vm/issues/102
|
||||||
|
* Change file timestamps if necessary because ZIP does not support timestamps before 1980. Fixes #1360.
|
||||||
|
* Add missing coroutine decorator Ref https://github.com/GNS3/gns3-gui/issues/2566
|
||||||
|
* Refactor asyncio locking system for Python 3.7 support. Ref https://github.com/GNS3/gns3-gui/issues/2566 Ref https://github.com/GNS3/gns3-gui/issues/2568
|
||||||
|
* Use asyncio.ensure_future() instead of asyncio.async() with conservative approach to support Python < 3.4.4. Fixes https://github.com/GNS3/gns3-gui/issues/2566
|
||||||
|
* Forbid controller and compute servers to be different versions. Report last compute server error to clients and display in the server summary.
|
||||||
|
* Fix exception with short names for Dynamips interfaces. Fixes #1386.
|
||||||
|
* Add missing Qemu boot priority values. Fixes https://github.com/GNS3/gns3-server/issues/1385
|
||||||
|
|
||||||
## 2.1.9 13/08/2018
|
## 2.1.9 13/08/2018
|
||||||
|
|
||||||
* Fix some more problems with interface short names. Fixes https://github.com/GNS3/gns3-gui/issues/2562
|
* Fix some more problems with interface short names. Fixes https://github.com/GNS3/gns3-gui/issues/2562
|
||||||
@ -655,7 +681,7 @@
|
|||||||
* Fix naming of IOU serial interfaces
|
* Fix naming of IOU serial interfaces
|
||||||
* Improve timeout management
|
* Improve timeout management
|
||||||
* When exporting debug information export GNS3 VM vmx content
|
* When exporting debug information export GNS3 VM vmx content
|
||||||
* /debug for exporting debug informations
|
* /debug for exporting debug information
|
||||||
* Raise error if using a non linked clone VM twice
|
* Raise error if using a non linked clone VM twice
|
||||||
* Fix a possible deadlock at exit
|
* Fix a possible deadlock at exit
|
||||||
* Fix import of some old dynamips topologies
|
* Fix import of some old dynamips topologies
|
||||||
|
@ -25,6 +25,13 @@
|
|||||||
"kvm": "require"
|
"kvm": "require"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "nxosv-final.9.2.1.qcow2",
|
||||||
|
"version": "9.2.1",
|
||||||
|
"md5sum": "1d7fa4654602d7ffbf62544edfe71986",
|
||||||
|
"filesize": 1330315264,
|
||||||
|
"download_url": "https://software.cisco.com/download/home/286312239/type/282088129/release/9.2%25281%2529"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "nxosv-final.7.0.3.I7.4.qcow2",
|
"filename": "nxosv-final.7.0.3.I7.4.qcow2",
|
||||||
"version": "7.0.3.I7.4",
|
"version": "7.0.3.I7.4",
|
||||||
@ -85,6 +92,13 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "9.2.1",
|
||||||
|
"images": {
|
||||||
|
"bios_image": "OVMF-20160813.fd",
|
||||||
|
"hda_disk_image": "nxosv-final.9.2.1.qcow2"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "7.0.3.I7.4",
|
"name": "7.0.3.I7.4",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -29,6 +29,13 @@
|
|||||||
"options": "-smp 2 -cpu host"
|
"options": "-smp 2 -cpu host"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "BIG-IQ-5.4.0.2.24.7467.qcow2",
|
||||||
|
"version": "5.4.0.2",
|
||||||
|
"md5sum": "e3e6389438ba1e1676f507658f767e95",
|
||||||
|
"filesize": 3480748032,
|
||||||
|
"download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-iq/big-iq_cm/5.4.0/english/virtual-edition_base-plus-hf2/&sw=BIG-IQ&pro=big-iq_CM&ver=5.4.0&container=Virtual-Edition_Base-Plus-HF2&file=BIG-IQ-5.4.0.2.24.7467.qcow2.zip"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "BIG-IQ-5.4.0.0.0.7437.qcow2",
|
"filename": "BIG-IQ-5.4.0.0.0.7437.qcow2",
|
||||||
"version": "5.4.0",
|
"version": "5.4.0",
|
||||||
@ -81,6 +88,13 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "5.4.0.2",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "BIG-IQ-5.4.0.2.24.7467.qcow2",
|
||||||
|
"hdb_disk_image": "empty100G.qcow2"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "5.4.0",
|
"name": "5.4.0",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -20,6 +20,14 @@
|
|||||||
"kvm": "require"
|
"kvm": "require"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "kali-linux-2018.3-amd64.iso",
|
||||||
|
"version": "2018.3",
|
||||||
|
"md5sum": "6dc3e57177249f73492b9edb95d082d1",
|
||||||
|
"filesize": 3188391936,
|
||||||
|
"download_url": "https://www.kali.org/downloads/",
|
||||||
|
"direct_download_url": "http://cdimage.kali.org/kali-2018.3/kali-linux-2018.3-amd64.iso"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "kali-linux-2018.1-amd64.iso",
|
"filename": "kali-linux-2018.1-amd64.iso",
|
||||||
"version": "2018.1",
|
"version": "2018.1",
|
||||||
@ -79,7 +87,12 @@
|
|||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
{
|
{
|
||||||
"name": "2018.8",
|
"name": "2018.3",
|
||||||
|
"images": {
|
||||||
|
"cdrom_image": "kali-linux-2018.3-amd64.iso"
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"name": "2018.1",
|
||||||
"images": {
|
"images": {
|
||||||
"cdrom_image": "kali-linux-2018.1-amd64.iso"
|
"cdrom_image": "kali-linux-2018.1-amd64.iso"
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
"ram": 2048,
|
"ram": 2048,
|
||||||
"arch": "x86_64",
|
"arch": "x86_64",
|
||||||
"console_type": "telnet",
|
"console_type": "telnet",
|
||||||
"boot_priority": "dc",
|
|
||||||
"kvm": "allow",
|
"kvm": "allow",
|
||||||
"process_priority": "normal"
|
"process_priority": "normal"
|
||||||
},
|
},
|
||||||
|
43
gns3server/appliances/tacacs-gui.gns3a
Normal file
43
gns3server/appliances/tacacs-gui.gns3a
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
{
|
||||||
|
"name": "TacacsGUI",
|
||||||
|
"category": "guest",
|
||||||
|
"description": "TacacsGUI Free Access Control Server for Your Network Devices. GUI for powerful daemon. The project of Alexey Mochalin, based on tacacs daemon by Marc Huber",
|
||||||
|
"vendor_name": "TacacsGUI",
|
||||||
|
"vendor_url": "https://tacacsgui.com/",
|
||||||
|
"documentation_url": "https://tacacsgui.com/documentation/",
|
||||||
|
"product_name": "TacacsGUI",
|
||||||
|
"product_url": "https://drive.google.com/open?id=1U8tbj14NqEyCmarayhZm54qTyjgsJm4B",
|
||||||
|
"registry_version": 3,
|
||||||
|
"status": "stable",
|
||||||
|
"maintainer": "GNS3 Team",
|
||||||
|
"maintainer_email": "developers@gns3.net",
|
||||||
|
"usage": "Credentials: SSH ---> username: root ---> password: 1234 MySQL DB: ---> username: root --> password: tacacs Web interface: ---> username: tacgui ---> password: abc123",
|
||||||
|
"port_name_format": "Port{port1}",
|
||||||
|
"qemu": {
|
||||||
|
"adapter_type": "e1000",
|
||||||
|
"adapters": 1,
|
||||||
|
"ram": 1024,
|
||||||
|
"hda_disk_interface": "ide",
|
||||||
|
"arch": "x86_64",
|
||||||
|
"console_type": "telnet",
|
||||||
|
"boot_priority": "c",
|
||||||
|
"kvm": "allow"
|
||||||
|
},
|
||||||
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "tac_plus.qcow2",
|
||||||
|
"version": "201710201114",
|
||||||
|
"md5sum": "6b5e66590051124dae586b8640b2eb11",
|
||||||
|
"filesize": 160301056,
|
||||||
|
"download_url": "https://drive.google.com/open?id=1U8tbj14NqEyCmarayhZm54qTyjgsJm4B"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "201710201114",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "tac_plus.qcow2"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -259,7 +259,7 @@ class Cloud(BaseNode):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
output = yield from gns3server.utils.asyncio.subprocess_check_output("networksetup", "-listallhardwareports")
|
output = yield from gns3server.utils.asyncio.subprocess_check_output("networksetup", "-listallhardwareports")
|
||||||
except (FileNotFoundError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
log.warning("Could not execute networksetup: {}".format(e))
|
log.warning("Could not execute networksetup: {}".format(e))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@ from gns3server.utils.asyncio.telnet_server import AsyncioTelnetServer
|
|||||||
from gns3server.utils.asyncio.raw_command_server import AsyncioRawCommandServer
|
from gns3server.utils.asyncio.raw_command_server import AsyncioRawCommandServer
|
||||||
from gns3server.utils.asyncio import wait_for_file_creation
|
from gns3server.utils.asyncio import wait_for_file_creation
|
||||||
from gns3server.utils.asyncio import asyncio_ensure_future
|
from gns3server.utils.asyncio import asyncio_ensure_future
|
||||||
|
from gns3server.utils.asyncio import monitor_process
|
||||||
from gns3server.utils.get_resource import get_resource
|
from gns3server.utils.get_resource import get_resource
|
||||||
|
|
||||||
from gns3server.ubridge.ubridge_error import UbridgeError, UbridgeNamespaceError
|
from gns3server.ubridge.ubridge_error import UbridgeError, UbridgeNamespaceError
|
||||||
@ -231,7 +232,10 @@ class DockerVM(BaseNode):
|
|||||||
binds = ["{}:/gns3:ro".format(resources)]
|
binds = ["{}:/gns3:ro".format(resources)]
|
||||||
|
|
||||||
# We mount our own etc/network
|
# We mount our own etc/network
|
||||||
network_config = self._create_network_config()
|
try:
|
||||||
|
network_config = self._create_network_config()
|
||||||
|
except OSError as e:
|
||||||
|
raise DockerError("Could not create network config in the container: {}".format(e))
|
||||||
binds.append("{}:/gns3volumes/etc/network:rw".format(network_config))
|
binds.append("{}:/gns3volumes/etc/network:rw".format(network_config))
|
||||||
|
|
||||||
self._volumes = ["/etc/network"]
|
self._volumes = ["/etc/network"]
|
||||||
@ -290,11 +294,12 @@ class DockerVM(BaseNode):
|
|||||||
try:
|
try:
|
||||||
image_infos = yield from self._get_image_information()
|
image_infos = yield from self._get_image_information()
|
||||||
except DockerHttp404Error:
|
except DockerHttp404Error:
|
||||||
log.info("Image %s is missing pulling it from docker hub", self._image)
|
log.info("Image '{}' is missing, pulling it from Docker hub...".format(self._image))
|
||||||
yield from self.pull_image(self._image)
|
yield from self.pull_image(self._image)
|
||||||
image_infos = yield from self._get_image_information()
|
image_infos = yield from self._get_image_information()
|
||||||
if image_infos is None:
|
|
||||||
raise DockerError("Can't get image informations, please try again.")
|
if image_infos is None:
|
||||||
|
raise DockerError("Cannot get information for image '{}', please try again.".format(self._image))
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"Hostname": self._name,
|
"Hostname": self._name,
|
||||||
@ -318,7 +323,10 @@ class DockerVM(BaseNode):
|
|||||||
if params["Entrypoint"] is None:
|
if params["Entrypoint"] is None:
|
||||||
params["Entrypoint"] = []
|
params["Entrypoint"] = []
|
||||||
if self._start_command:
|
if self._start_command:
|
||||||
params["Cmd"] = shlex.split(self._start_command)
|
try:
|
||||||
|
params["Cmd"] = shlex.split(self._start_command)
|
||||||
|
except ValueError as e:
|
||||||
|
raise DockerError("Invalid start command '{}': {}".format(self._start_command, e))
|
||||||
if len(params["Cmd"]) == 0:
|
if len(params["Cmd"]) == 0:
|
||||||
params["Cmd"] = image_infos.get("Config", {"Cmd": []})["Cmd"]
|
params["Cmd"] = image_infos.get("Config", {"Cmd": []})["Cmd"]
|
||||||
if params["Cmd"] is None:
|
if params["Cmd"] is None:
|
||||||
@ -360,8 +368,7 @@ class DockerVM(BaseNode):
|
|||||||
|
|
||||||
result = yield from self.manager.query("POST", "containers/create", data=params)
|
result = yield from self.manager.query("POST", "containers/create", data=params)
|
||||||
self._cid = result['Id']
|
self._cid = result['Id']
|
||||||
log.info("Docker container '{name}' [{id}] created".format(
|
log.info("Docker container '{name}' [{id}] created".format(name=self._name, id=self._id))
|
||||||
name=self._name, id=self._id))
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _format_env(self, variables, env):
|
def _format_env(self, variables, env):
|
||||||
@ -457,16 +464,19 @@ class DockerVM(BaseNode):
|
|||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _start_aux(self):
|
def _start_aux(self):
|
||||||
"""
|
"""
|
||||||
Starts an auxiliary console
|
Start an auxiliary console
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# We can not use the API because docker doesn't expose a websocket api for exec
|
# We can not use the API because docker doesn't expose a websocket api for exec
|
||||||
# https://github.com/GNS3/gns3-gui/issues/1039
|
# https://github.com/GNS3/gns3-gui/issues/1039
|
||||||
process = yield from asyncio.subprocess.create_subprocess_exec(
|
try:
|
||||||
"docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "script", "-qfc", "while true; do TERM=vt100 /gns3/bin/busybox sh; done", "/dev/null",
|
process = yield from asyncio.subprocess.create_subprocess_exec(
|
||||||
stdout=asyncio.subprocess.PIPE,
|
"docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "script", "-qfc", "while true; do TERM=vt100 /gns3/bin/busybox sh; done", "/dev/null",
|
||||||
stderr=asyncio.subprocess.STDOUT,
|
stdout=asyncio.subprocess.PIPE,
|
||||||
stdin=asyncio.subprocess.PIPE)
|
stderr=asyncio.subprocess.STDOUT,
|
||||||
|
stdin=asyncio.subprocess.PIPE)
|
||||||
|
except OSError as e:
|
||||||
|
raise DockerError("Could not start auxiliary console process: {}".format(e))
|
||||||
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=True, echo=True)
|
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=True, echo=True)
|
||||||
try:
|
try:
|
||||||
self._telnet_servers.append((yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux)))
|
self._telnet_servers.append((yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux)))
|
||||||
@ -489,21 +499,25 @@ class DockerVM(BaseNode):
|
|||||||
for volume in self._volumes:
|
for volume in self._volumes:
|
||||||
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(
|
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(
|
||||||
name=self._name, image=self._image, path=volume))
|
name=self._name, image=self._image, path=volume))
|
||||||
process = yield from asyncio.subprocess.create_subprocess_exec(
|
|
||||||
"docker",
|
try:
|
||||||
"exec",
|
process = yield from asyncio.subprocess.create_subprocess_exec(
|
||||||
self._cid,
|
"docker",
|
||||||
"/gns3/bin/busybox",
|
"exec",
|
||||||
"sh",
|
self._cid,
|
||||||
"-c",
|
"/gns3/bin/busybox",
|
||||||
"("
|
"sh",
|
||||||
"/gns3/bin/busybox find \"{path}\" -depth -print0"
|
"-c",
|
||||||
" | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c '%a:%u:%g:%n' > \"{path}/.gns3_perms\""
|
"("
|
||||||
")"
|
"/gns3/bin/busybox find \"{path}\" -depth -print0"
|
||||||
" && /gns3/bin/busybox chmod -R u+rX \"{path}\""
|
" | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c '%a:%u:%g:%n' > \"{path}/.gns3_perms\""
|
||||||
" && /gns3/bin/busybox chown {uid}:{gid} -R \"{path}\""
|
")"
|
||||||
.format(uid=os.getuid(), gid=os.getgid(), path=volume),
|
" && /gns3/bin/busybox chmod -R u+rX \"{path}\""
|
||||||
)
|
" && /gns3/bin/busybox chown {uid}:{gid} -R \"{path}\""
|
||||||
|
.format(uid=os.getuid(), gid=os.getgid(), path=volume),
|
||||||
|
)
|
||||||
|
except OSError as e:
|
||||||
|
raise DockerError("Could not fix permissions for {}: {}".format(volume, e))
|
||||||
yield from process.wait()
|
yield from process.wait()
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
@ -514,7 +528,7 @@ class DockerVM(BaseNode):
|
|||||||
|
|
||||||
self._display = self._get_free_display_port()
|
self._display = self._get_free_display_port()
|
||||||
if shutil.which("Xvfb") is None or shutil.which("x11vnc") is None:
|
if shutil.which("Xvfb") is None or shutil.which("x11vnc") is None:
|
||||||
raise DockerError("Please install Xvfb and x11vnc before using the VNC support")
|
raise DockerError("Please install Xvfb and x11vnc before using VNC support")
|
||||||
self._xvfb_process = yield from asyncio.create_subprocess_exec("Xvfb", "-nolisten", "tcp", ":{}".format(self._display), "-screen", "0", self._console_resolution + "x16")
|
self._xvfb_process = yield from asyncio.create_subprocess_exec("Xvfb", "-nolisten", "tcp", ":{}".format(self._display), "-screen", "0", self._console_resolution + "x16")
|
||||||
# We pass a port for TCPV6 due to a crash in X11VNC if not here: https://github.com/GNS3/gns3-server/issues/569
|
# We pass a port for TCPV6 due to a crash in X11VNC if not here: https://github.com/GNS3/gns3-server/issues/569
|
||||||
self._x11vnc_process = yield from asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-shared", "-geometry", self._console_resolution, "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-rfbportv6", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host)
|
self._x11vnc_process = yield from asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-shared", "-geometry", self._console_resolution, "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-rfbportv6", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host)
|
||||||
@ -522,6 +536,29 @@ class DockerVM(BaseNode):
|
|||||||
x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display))
|
x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display))
|
||||||
yield from wait_for_file_creation(x11_socket)
|
yield from wait_for_file_creation(x11_socket)
|
||||||
|
|
||||||
|
#monitor_process(self._xvfb_process, self._xvfb_callback)
|
||||||
|
#monitor_process(self._x11vnc_process, self._x11vnc_callback)
|
||||||
|
|
||||||
|
def _xvfb_callback(self, returncode):
|
||||||
|
"""
|
||||||
|
Called when the process has stopped.
|
||||||
|
|
||||||
|
:param returncode: Process returncode
|
||||||
|
"""
|
||||||
|
|
||||||
|
if returncode != 0:
|
||||||
|
self.project.emit("log.error", {"message": "The Xvfb process has stopped, return code: {}.".format(returncode)})
|
||||||
|
|
||||||
|
def _x11vnc_callback(self, returncode):
|
||||||
|
"""
|
||||||
|
Called when the process has stopped.
|
||||||
|
|
||||||
|
:param returncode: Process returncode
|
||||||
|
"""
|
||||||
|
|
||||||
|
if returncode != 0:
|
||||||
|
self.project.emit("log.error", {"message": "The x11vnc process has stopped, return code: {}.".format(returncode)})
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _start_http(self):
|
def _start_http(self):
|
||||||
"""
|
"""
|
||||||
|
@ -87,6 +87,7 @@ class EthernetSwitch(Device):
|
|||||||
self._mappings = {}
|
self._mappings = {}
|
||||||
self._telnet_console = None
|
self._telnet_console = None
|
||||||
self._telnet_shell = None
|
self._telnet_shell = None
|
||||||
|
self._telnet_server = None
|
||||||
self._console = console
|
self._console = console
|
||||||
self._console_type = console_type
|
self._console_type = console_type
|
||||||
|
|
||||||
@ -233,7 +234,8 @@ class EthernetSwitch(Device):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
yield from self._telnet.close()
|
yield from self._telnet.close()
|
||||||
self._telnet_server.close()
|
if self._telnet_server:
|
||||||
|
self._telnet_server.close()
|
||||||
|
|
||||||
for nio in self._nios.values():
|
for nio in self._nios.values():
|
||||||
if nio:
|
if nio:
|
||||||
|
@ -369,7 +369,7 @@ class IOUVM(BaseNode):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
output = yield from gns3server.utils.asyncio.subprocess_check_output("ldd", self._path)
|
output = yield from gns3server.utils.asyncio.subprocess_check_output("ldd", self._path)
|
||||||
except (FileNotFoundError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
log.warning("Could not determine the shared library dependencies for {}: {}".format(self._path, e))
|
log.warning("Could not determine the shared library dependencies for {}: {}".format(self._path, e))
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -422,7 +422,7 @@ class IOUVM(BaseNode):
|
|||||||
hostid = (yield from gns3server.utils.asyncio.subprocess_check_output("hostid")).strip()
|
hostid = (yield from gns3server.utils.asyncio.subprocess_check_output("hostid")).strip()
|
||||||
except FileNotFoundError as e:
|
except FileNotFoundError as e:
|
||||||
raise IOUError("Could not find hostid: {}".format(e))
|
raise IOUError("Could not find hostid: {}".format(e))
|
||||||
except subprocess.SubprocessError as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
raise IOUError("Could not execute hostid: {}".format(e))
|
raise IOUError("Could not execute hostid: {}".format(e))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -23,7 +23,7 @@ import logging
|
|||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# This ports are disallowed by Chrome and Firefox to avoid issues, we skip them as well
|
# These ports are disallowed by Chrome and Firefox to avoid issues, we skip them as well
|
||||||
BANNED_PORTS = set((1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87, 95, 101, 102, 103,
|
BANNED_PORTS = set((1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87, 95, 101, 102, 103,
|
||||||
104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139, 143, 179, 389, 465, 512, 513, 514, 515, 526,
|
104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139, 143, 179, 389, 465, 512, 513, 514, 515, 526,
|
||||||
530, 531, 532, 540, 556, 563, 587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667,
|
530, 531, 532, 540, 556, 563, 587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667,
|
||||||
|
@ -194,7 +194,7 @@ class Qemu(BaseManager):
|
|||||||
return version
|
return version
|
||||||
else:
|
else:
|
||||||
raise QemuError("Could not determine the Qemu version for {}".format(qemu_path))
|
raise QemuError("Could not determine the Qemu version for {}".format(qemu_path))
|
||||||
except subprocess.SubprocessError as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
raise QemuError("Error while looking for the Qemu version: {}".format(e))
|
raise QemuError("Error while looking for the Qemu version: {}".format(e))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -214,7 +214,7 @@ class Qemu(BaseManager):
|
|||||||
return version
|
return version
|
||||||
else:
|
else:
|
||||||
raise QemuError("Could not determine the Qemu-img version for {}".format(qemu_img_path))
|
raise QemuError("Could not determine the Qemu-img version for {}".format(qemu_img_path))
|
||||||
except subprocess.SubprocessError as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
raise QemuError("Error while looking for the Qemu-img version: {}".format(e))
|
raise QemuError("Error while looking for the Qemu-img version: {}".format(e))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -1101,7 +1101,7 @@ class QemuVM(BaseNode):
|
|||||||
if expect in line:
|
if expect in line:
|
||||||
result = line.decode("utf-8").strip()
|
result = line.decode("utf-8").strip()
|
||||||
break
|
break
|
||||||
except EOFError as e:
|
except (ConnectionError, EOFError) as e:
|
||||||
log.warning("Could not read from QEMU monitor: {}".format(e))
|
log.warning("Could not read from QEMU monitor: {}".format(e))
|
||||||
writer.close()
|
writer.close()
|
||||||
return result
|
return result
|
||||||
|
@ -217,6 +217,8 @@ class VirtualBoxVM(BaseNode):
|
|||||||
except ET.ParseError:
|
except ET.ParseError:
|
||||||
raise VirtualBoxError("Cannot modify VirtualBox linked nodes file. "
|
raise VirtualBoxError("Cannot modify VirtualBox linked nodes file. "
|
||||||
"File {} is corrupted.".format(self._linked_vbox_file()))
|
"File {} is corrupted.".format(self._linked_vbox_file()))
|
||||||
|
except OSError as e:
|
||||||
|
raise VirtualBoxError("Cannot modify VirtualBox linked nodes file '{}': {}".format(self._linked_vbox_file(), e))
|
||||||
|
|
||||||
machine = tree.getroot().find("{http://www.virtualbox.org/}Machine")
|
machine = tree.getroot().find("{http://www.virtualbox.org/}Machine")
|
||||||
if machine is not None and machine.get("uuid") != "{" + self.id + "}":
|
if machine is not None and machine.get("uuid") != "{" + self.id + "}":
|
||||||
@ -245,6 +247,7 @@ class VirtualBoxVM(BaseNode):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@locking
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def start(self):
|
def start(self):
|
||||||
"""
|
"""
|
||||||
@ -949,7 +952,11 @@ class VirtualBoxVM(BaseNode):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if self.console and self.console_type == "telnet":
|
if self.console and self.console_type == "telnet":
|
||||||
self._remote_pipe = yield from asyncio_open_serial(self._get_pipe_name())
|
pipe_name = self._get_pipe_name()
|
||||||
|
try:
|
||||||
|
self._remote_pipe = yield from asyncio_open_serial(pipe_name)
|
||||||
|
except OSError as e:
|
||||||
|
raise VirtualBoxError("Could not open serial pipe '{}': {}".format(pipe_name, e))
|
||||||
server = AsyncioTelnetServer(reader=self._remote_pipe,
|
server = AsyncioTelnetServer(reader=self._remote_pipe,
|
||||||
writer=self._remote_pipe,
|
writer=self._remote_pipe,
|
||||||
binary=True,
|
binary=True,
|
||||||
|
@ -150,6 +150,7 @@ class VMware(BaseManager):
|
|||||||
VIX 1.13 was the release for Player 6.
|
VIX 1.13 was the release for Player 6.
|
||||||
VIX 1.14 was the release for Player 7.
|
VIX 1.14 was the release for Player 7.
|
||||||
VIX 1.15 was the release for Workstation Player 12.
|
VIX 1.15 was the release for Workstation Player 12.
|
||||||
|
VIX 1.17 was the release for Workstation Player 14.
|
||||||
|
|
||||||
:param player_version: VMware Player major version.
|
:param player_version: VMware Player major version.
|
||||||
"""
|
"""
|
||||||
@ -163,6 +164,8 @@ class VMware(BaseManager):
|
|||||||
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
|
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
|
||||||
elif player_version >= 12:
|
elif player_version >= 12:
|
||||||
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
|
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
|
||||||
|
elif player_version >= 14:
|
||||||
|
yield from self.check_vmrun_version(minimum_required_version="1.17.0")
|
||||||
self._host_type = "player"
|
self._host_type = "player"
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
@ -173,6 +176,7 @@ class VMware(BaseManager):
|
|||||||
VIX 1.13 was the release for Workstation 10.
|
VIX 1.13 was the release for Workstation 10.
|
||||||
VIX 1.14 was the release for Workstation 11.
|
VIX 1.14 was the release for Workstation 11.
|
||||||
VIX 1.15 was the release for Workstation Pro 12.
|
VIX 1.15 was the release for Workstation Pro 12.
|
||||||
|
VIX 1.17 was the release for Workstation Pro 14.
|
||||||
|
|
||||||
:param ws_version: VMware Workstation major version.
|
:param ws_version: VMware Workstation major version.
|
||||||
"""
|
"""
|
||||||
@ -186,6 +190,8 @@ class VMware(BaseManager):
|
|||||||
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
|
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
|
||||||
elif ws_version >= 12:
|
elif ws_version >= 12:
|
||||||
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
|
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
|
||||||
|
elif ws_version >= 14:
|
||||||
|
yield from self.check_vmrun_version(minimum_required_version="1.17.0")
|
||||||
self._host_type = "ws"
|
self._host_type = "ws"
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
|
@ -849,7 +849,11 @@ class VMwareVM(BaseNode):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if self.console and self.console_type == "telnet":
|
if self.console and self.console_type == "telnet":
|
||||||
self._remote_pipe = yield from asyncio_open_serial(self._get_pipe_name())
|
pipe_name = self._get_pipe_name()
|
||||||
|
try:
|
||||||
|
self._remote_pipe = yield from asyncio_open_serial(self._get_pipe_name())
|
||||||
|
except OSError as e:
|
||||||
|
raise VMwareError("Could not open serial pipe '{}': {}".format(pipe_name, e))
|
||||||
server = AsyncioTelnetServer(reader=self._remote_pipe,
|
server = AsyncioTelnetServer(reader=self._remote_pipe,
|
||||||
writer=self._remote_pipe,
|
writer=self._remote_pipe,
|
||||||
binary=True,
|
binary=True,
|
||||||
|
@ -316,7 +316,7 @@ class VPCSVM(BaseNode):
|
|||||||
if sys.platform.startswith("win32"):
|
if sys.platform.startswith("win32"):
|
||||||
try:
|
try:
|
||||||
self._process.send_signal(signal.CTRL_BREAK_EVENT)
|
self._process.send_signal(signal.CTRL_BREAK_EVENT)
|
||||||
except OSError:
|
except (SystemError, OSError):
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
|
@ -117,7 +117,7 @@ class Controller:
|
|||||||
|
|
||||||
self._appliance_templates = {}
|
self._appliance_templates = {}
|
||||||
for directory, builtin in ((get_resource('appliances'), True,), (self.appliances_path(), False,)):
|
for directory, builtin in ((get_resource('appliances'), True,), (self.appliances_path(), False,)):
|
||||||
if os.path.isdir(directory):
|
if directory and os.path.isdir(directory):
|
||||||
for file in os.listdir(directory):
|
for file in os.listdir(directory):
|
||||||
if not file.endswith('.gns3a') and not file.endswith('.gns3appliance'):
|
if not file.endswith('.gns3a') and not file.endswith('.gns3appliance'):
|
||||||
continue
|
continue
|
||||||
@ -208,8 +208,10 @@ class Controller:
|
|||||||
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "ethernet_hub"), {"node_type": "ethernet_hub", "name": "Ethernet hub", "category": 1, "symbol": ":/symbols/hub.svg"}, builtin=True))
|
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "ethernet_hub"), {"node_type": "ethernet_hub", "name": "Ethernet hub", "category": 1, "symbol": ":/symbols/hub.svg"}, builtin=True))
|
||||||
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "frame_relay_switch"), {"node_type": "frame_relay_switch", "name": "Frame Relay switch", "category": 1, "symbol": ":/symbols/frame_relay_switch.svg"}, builtin=True))
|
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "frame_relay_switch"), {"node_type": "frame_relay_switch", "name": "Frame Relay switch", "category": 1, "symbol": ":/symbols/frame_relay_switch.svg"}, builtin=True))
|
||||||
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "atm_switch"), {"node_type": "atm_switch", "name": "ATM switch", "category": 1, "symbol": ":/symbols/atm_switch.svg"}, builtin=True))
|
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "atm_switch"), {"node_type": "atm_switch", "name": "ATM switch", "category": 1, "symbol": ":/symbols/atm_switch.svg"}, builtin=True))
|
||||||
if sys.platform.startswith("win"):
|
|
||||||
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "traceng"), {"node_type": "traceng", "name": "TraceNG", "default_name_format": "TraceNG-{0}", "category": 2, "symbol": ":/symbols/traceng.svg", "properties": {}}, builtin=True))
|
#FIXME: disable TraceNG
|
||||||
|
#if sys.platform.startswith("win"):
|
||||||
|
# builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "traceng"), {"node_type": "traceng", "name": "TraceNG", "default_name_format": "TraceNG-{0}", "category": 2, "symbol": ":/symbols/traceng.svg", "properties": {}}, builtin=True))
|
||||||
for b in builtins:
|
for b in builtins:
|
||||||
self._appliances[b.id] = b
|
self._appliances[b.id] = b
|
||||||
|
|
||||||
@ -250,7 +252,7 @@ class Controller:
|
|||||||
for c in computes:
|
for c in computes:
|
||||||
try:
|
try:
|
||||||
yield from self.add_compute(**c)
|
yield from self.add_compute(**c)
|
||||||
except (aiohttp.web.HTTPConflict, KeyError):
|
except (aiohttp.web.HTTPError, KeyError):
|
||||||
pass # Skip not available servers at loading
|
pass # Skip not available servers at loading
|
||||||
yield from self.load_projects()
|
yield from self.load_projects()
|
||||||
try:
|
try:
|
||||||
|
@ -450,9 +450,13 @@ class Compute:
|
|||||||
self._capabilities = response.json
|
self._capabilities = response.json
|
||||||
|
|
||||||
if response.json["version"].split("-")[0] != __version__.split("-")[0]:
|
if response.json["version"].split("-")[0] != __version__.split("-")[0]:
|
||||||
msg = "GNS3 controller version {} is not the same as compute server {} version {}".format(__version__,
|
if self._name.startswith("GNS3 VM"):
|
||||||
self._name,
|
msg = "GNS3 version {} is not the same as the GNS3 VM version {}. Please upgrade the GNS3 VM.".format(__version__,
|
||||||
response.json["version"])
|
response.json["version"])
|
||||||
|
else:
|
||||||
|
msg = "GNS3 controller version {} is not the same as compute server {} version {}".format(__version__,
|
||||||
|
self._name,
|
||||||
|
response.json["version"])
|
||||||
if __version_info__[3] == 0:
|
if __version_info__[3] == 0:
|
||||||
# Stable release
|
# Stable release
|
||||||
log.error(msg)
|
log.error(msg)
|
||||||
|
@ -94,12 +94,12 @@ class Node:
|
|||||||
|
|
||||||
# Update node properties with additional elements
|
# Update node properties with additional elements
|
||||||
for prop in kwargs:
|
for prop in kwargs:
|
||||||
if prop not in ignore_properties:
|
if prop and prop not in ignore_properties:
|
||||||
if hasattr(self, prop):
|
if hasattr(self, prop):
|
||||||
try:
|
try:
|
||||||
setattr(self, prop, kwargs[prop])
|
setattr(self, prop, kwargs[prop])
|
||||||
except AttributeError as e:
|
except AttributeError as e:
|
||||||
log.critical("Can't set attribute %s", prop)
|
log.critical("Cannot set attribute '%s'".format(prop))
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
if prop not in self.CONTROLLER_ONLY_PROPERTIES and kwargs[prop] is not None and kwargs[prop] != "":
|
if prop not in self.CONTROLLER_ONLY_PROPERTIES and kwargs[prop] is not None and kwargs[prop] != "":
|
||||||
@ -186,9 +186,9 @@ class Node:
|
|||||||
if not os.path.isabs(path):
|
if not os.path.isabs(path):
|
||||||
path = os.path.join(self.project.controller.configs_path(), path)
|
path = os.path.join(self.project.controller.configs_path(), path)
|
||||||
try:
|
try:
|
||||||
with open(path) as f:
|
with open(path, encoding="utf-8") as f:
|
||||||
return f.read()
|
return f.read()
|
||||||
except (PermissionError, OSError):
|
except OSError:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -80,7 +80,7 @@ class StandardPortFactory:
|
|||||||
segment_number,
|
segment_number,
|
||||||
adapter=adapter_number,
|
adapter=adapter_number,
|
||||||
**cls._generate_replacement(interface_number, segment_number))
|
**cls._generate_replacement(interface_number, segment_number))
|
||||||
except (ValueError, KeyError) as e:
|
except (IndexError, ValueError, KeyError) as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Invalid port name format {}: {}".format(port_name_format, str(e)))
|
raise aiohttp.web.HTTPConflict(text="Invalid port name format {}: {}".format(port_name_format, str(e)))
|
||||||
|
|
||||||
port_name = custom_adapter_settings.get("port_name", port_name)
|
port_name = custom_adapter_settings.get("port_name", port_name)
|
||||||
|
@ -875,7 +875,7 @@ class Project:
|
|||||||
link = yield from self.add_link(link_id=link_data["link_id"])
|
link = yield from self.add_link(link_id=link_data["link_id"])
|
||||||
if "filters" in link_data:
|
if "filters" in link_data:
|
||||||
yield from link.update_filters(link_data["filters"])
|
yield from link.update_filters(link_data["filters"])
|
||||||
for node_link in link_data["nodes"]:
|
for node_link in link_data.get("nodes", []):
|
||||||
node = self.get_node(node_link["node_id"])
|
node = self.get_node(node_link["node_id"])
|
||||||
port = node.get_port(node_link["adapter_number"], node_link["port_number"])
|
port = node.get_port(node_link["adapter_number"], node_link["port_number"])
|
||||||
if port is None:
|
if port is None:
|
||||||
@ -903,7 +903,7 @@ class Project:
|
|||||||
try:
|
try:
|
||||||
if os.path.exists(path + ".backup"):
|
if os.path.exists(path + ".backup"):
|
||||||
shutil.copy(path + ".backup", path)
|
shutil.copy(path + ".backup", path)
|
||||||
except (PermissionError, OSError):
|
except OSError:
|
||||||
pass
|
pass
|
||||||
self._status = "closed"
|
self._status = "closed"
|
||||||
self._loading = False
|
self._loading = False
|
||||||
@ -968,7 +968,7 @@ class Project:
|
|||||||
with open(project_path, "rb") as f:
|
with open(project_path, "rb") as f:
|
||||||
project = yield from import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
|
project = yield from import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
|
||||||
except (ValueError, OSError, UnicodeEncodeError) as e:
|
except (ValueError, OSError, UnicodeEncodeError) as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Can not duplicate project: {}".format(str(e)))
|
raise aiohttp.web.HTTPConflict(text="Cannot duplicate project: {}".format(str(e)))
|
||||||
|
|
||||||
if previous_status == "closed":
|
if previous_status == "closed":
|
||||||
yield from self.close()
|
yield from self.close()
|
||||||
|
@ -22,6 +22,9 @@ from ..utils.get_resource import get_resource
|
|||||||
from ..utils.picture import get_size
|
from ..utils.picture import get_size
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
|
|
||||||
|
import logging
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Symbols:
|
class Symbols:
|
||||||
"""
|
"""
|
||||||
@ -73,19 +76,25 @@ class Symbols:
|
|||||||
def symbols_path(self):
|
def symbols_path(self):
|
||||||
directory = os.path.expanduser(Config.instance().get_section_config("Server").get("symbols_path", "~/GNS3/symbols"))
|
directory = os.path.expanduser(Config.instance().get_section_config("Server").get("symbols_path", "~/GNS3/symbols"))
|
||||||
if directory:
|
if directory:
|
||||||
os.makedirs(directory, exist_ok=True)
|
try:
|
||||||
|
os.makedirs(directory, exist_ok=True)
|
||||||
|
except OSError as e:
|
||||||
|
log.error("Could not create symbol directory '{}': {}".format(directory, e))
|
||||||
|
return None
|
||||||
return directory
|
return directory
|
||||||
|
|
||||||
def get_path(self, symbol_id):
|
def get_path(self, symbol_id):
|
||||||
try:
|
try:
|
||||||
return self._symbols_path[symbol_id]
|
return self._symbols_path[symbol_id]
|
||||||
# Symbol not found refresh cache
|
# Symbol not found, let's refresh the cache
|
||||||
except KeyError:
|
except KeyError:
|
||||||
self.list()
|
|
||||||
try:
|
try:
|
||||||
|
self.list()
|
||||||
return self._symbols_path[symbol_id]
|
return self._symbols_path[symbol_id]
|
||||||
except KeyError:
|
except (OSError, KeyError):
|
||||||
return self._symbols_path[":/symbols/computer.svg"]
|
log.warning("Could not retrieve symbol '{}'".format(symbol_id))
|
||||||
|
symbols_path = self._symbols_path
|
||||||
|
return symbols_path[":/symbols/computer.svg"]
|
||||||
|
|
||||||
def get_size(self, symbol_id):
|
def get_size(self, symbol_id):
|
||||||
try:
|
try:
|
||||||
|
@ -436,7 +436,7 @@ def _convert_1_3_later(topo, topo_path):
|
|||||||
symbol = old_node.get("symbol", ":/symbols/computer.svg")
|
symbol = old_node.get("symbol", ":/symbols/computer.svg")
|
||||||
old_node["ports"] = _create_cloud(node, old_node, symbol)
|
old_node["ports"] = _create_cloud(node, old_node, symbol)
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError("Conversion of {} is not supported".format(old_node["type"]))
|
raise aiohttp.web.HTTPConflict(text="Conversion of {} is not supported".format(old_node["type"]))
|
||||||
|
|
||||||
for prop in old_node.get("properties", {}):
|
for prop in old_node.get("properties", {}):
|
||||||
if prop not in ["console", "name", "console_type", "console_host", "use_ubridge"]:
|
if prop not in ["console", "name", "console_type", "console_host", "use_ubridge"]:
|
||||||
@ -635,13 +635,13 @@ def _create_cloud(node, old_node, icon):
|
|||||||
elif old_port["name"].startswith("nio_nat"):
|
elif old_port["name"].startswith("nio_nat"):
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError("The conversion of cloud with {} is not supported".format(old_port["name"]))
|
raise aiohttp.web.HTTPConflict(text="The conversion of cloud with {} is not supported".format(old_port["name"]))
|
||||||
|
|
||||||
if port_type == "udp":
|
if port_type == "udp":
|
||||||
try:
|
try:
|
||||||
_, lport, rhost, rport = old_port["name"].split(":")
|
_, lport, rhost, rport = old_port["name"].split(":")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise NotImplementedError("UDP tunnel using IPV6 is not supported in cloud")
|
raise aiohttp.web.HTTPConflict(text="UDP tunnel using IPV6 is not supported in cloud")
|
||||||
port = {
|
port = {
|
||||||
"name": "UDP tunnel {}".format(len(ports) + 1),
|
"name": "UDP tunnel {}".format(len(ports) + 1),
|
||||||
"port_number": len(ports) + 1,
|
"port_number": len(ports) + 1,
|
||||||
@ -672,7 +672,7 @@ def _convert_snapshots(topo_dir):
|
|||||||
old_snapshots_dir = os.path.join(topo_dir, "project-files", "snapshots")
|
old_snapshots_dir = os.path.join(topo_dir, "project-files", "snapshots")
|
||||||
if os.path.exists(old_snapshots_dir):
|
if os.path.exists(old_snapshots_dir):
|
||||||
new_snapshots_dir = os.path.join(topo_dir, "snapshots")
|
new_snapshots_dir = os.path.join(topo_dir, "snapshots")
|
||||||
os.makedirs(new_snapshots_dir)
|
os.makedirs(new_snapshots_dir, exist_ok=True)
|
||||||
|
|
||||||
for snapshot in os.listdir(old_snapshots_dir):
|
for snapshot in os.listdir(old_snapshots_dir):
|
||||||
snapshot_dir = os.path.join(old_snapshots_dir, snapshot)
|
snapshot_dir = os.path.join(old_snapshots_dir, snapshot)
|
||||||
|
@ -20,7 +20,7 @@ import sys
|
|||||||
import struct
|
import struct
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import platform
|
import platform
|
||||||
|
import locale
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import raven
|
import raven
|
||||||
@ -57,7 +57,7 @@ class CrashReport:
|
|||||||
Report crash to a third party service
|
Report crash to a third party service
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DSN = "sync+https://56af21e241ed4c1894ebe17bf06b1cd1:6075f91067954267b51e90b9638a6fad@sentry.io/38482"
|
DSN = "https://8a4a7325dfcf4661a0b04d92b0a7d32e:14f83f7a65e54df88e5f06abad85b152@sentry.io/38482"
|
||||||
if hasattr(sys, "frozen"):
|
if hasattr(sys, "frozen"):
|
||||||
cacert = get_resource("cacert.pem")
|
cacert = get_resource("cacert.pem")
|
||||||
if cacert is not None and os.path.isfile(cacert):
|
if cacert is not None and os.path.isfile(cacert):
|
||||||
@ -92,7 +92,8 @@ class CrashReport:
|
|||||||
"url": request.path,
|
"url": request.path,
|
||||||
"data": request.json,
|
"data": request.json,
|
||||||
})
|
})
|
||||||
self._client.tags_context({
|
|
||||||
|
context = {
|
||||||
"os:name": platform.system(),
|
"os:name": platform.system(),
|
||||||
"os:release": platform.release(),
|
"os:release": platform.release(),
|
||||||
"os:win_32": " ".join(platform.win32_ver()),
|
"os:win_32": " ".join(platform.win32_ver()),
|
||||||
@ -105,7 +106,28 @@ class CrashReport:
|
|||||||
"python:bit": struct.calcsize("P") * 8,
|
"python:bit": struct.calcsize("P") * 8,
|
||||||
"python:encoding": sys.getdefaultencoding(),
|
"python:encoding": sys.getdefaultencoding(),
|
||||||
"python:frozen": "{}".format(hasattr(sys, "frozen"))
|
"python:frozen": "{}".format(hasattr(sys, "frozen"))
|
||||||
})
|
}
|
||||||
|
|
||||||
|
if sys.platform.startswith("linux") and not hasattr(sys, "frozen"):
|
||||||
|
# add locale information
|
||||||
|
try:
|
||||||
|
language, encoding = locale.getlocale()
|
||||||
|
context["locale:language"] = language
|
||||||
|
context["locale:encoding"] = encoding
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# add GNS3 VM version if it exists
|
||||||
|
home = os.path.expanduser("~")
|
||||||
|
gns3vm_version = os.path.join(home, ".config", "GNS3", "gns3vm_version")
|
||||||
|
if os.path.isfile(gns3vm_version):
|
||||||
|
try:
|
||||||
|
with open(gns3vm_version) as fd:
|
||||||
|
context["gns3vm:version"] = fd.readline().strip()
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self._client.tags_context(context)
|
||||||
try:
|
try:
|
||||||
report = self._client.captureException()
|
report = self._client.captureException()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -59,7 +59,7 @@ class ComputeHandler:
|
|||||||
|
|
||||||
@Route.put(
|
@Route.put(
|
||||||
r"/computes/{compute_id}",
|
r"/computes/{compute_id}",
|
||||||
description="Get a compute server information",
|
description="Update a compute server",
|
||||||
status_codes={
|
status_codes={
|
||||||
200: "Compute server updated",
|
200: "Compute server updated",
|
||||||
400: "Invalid request",
|
400: "Invalid request",
|
||||||
|
@ -302,9 +302,9 @@ class ProjectHandler:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||||
datas = yield from export_project(
|
stream = yield from export_project(project,
|
||||||
project, tmp_dir,
|
tmp_dir,
|
||||||
include_images=bool(int(request.query.get("include_images", "0"))))
|
include_images=bool(int(request.query.get("include_images", "0"))))
|
||||||
# We need to do that now because export could failed and raise an HTTP error
|
# We need to do that now because export could failed and raise an HTTP error
|
||||||
# that why response start need to be the later possible
|
# that why response start need to be the later possible
|
||||||
response.content_type = 'application/gns3project'
|
response.content_type = 'application/gns3project'
|
||||||
@ -312,7 +312,7 @@ class ProjectHandler:
|
|||||||
response.enable_chunked_encoding()
|
response.enable_chunked_encoding()
|
||||||
yield from response.prepare(request)
|
yield from response.prepare(request)
|
||||||
|
|
||||||
for data in datas:
|
for data in stream:
|
||||||
response.write(data)
|
response.write(data)
|
||||||
yield from response.drain()
|
yield from response.drain()
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ class ServerHandler:
|
|||||||
r"/debug",
|
r"/debug",
|
||||||
description="Dump debug information to disk (debug directory in config directory). Work only for local server",
|
description="Dump debug information to disk (debug directory in config directory). Work only for local server",
|
||||||
status_codes={
|
status_codes={
|
||||||
201: "Writed"
|
201: "Written"
|
||||||
})
|
})
|
||||||
def debug(request, response):
|
def debug(request, response):
|
||||||
|
|
||||||
@ -157,7 +157,7 @@ class ServerHandler:
|
|||||||
f.write(ServerHandler._getDebugData())
|
f.write(ServerHandler._getDebugData())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# If something is wrong we log the info to the log and we hope the log will be include correctly to the debug export
|
# If something is wrong we log the info to the log and we hope the log will be include correctly to the debug export
|
||||||
log.error("Could not export debug informations {}".format(e), exc_info=1)
|
log.error("Could not export debug information {}".format(e), exc_info=1)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if Controller.instance().gns3vm.engine == "vmware":
|
if Controller.instance().gns3vm.engine == "vmware":
|
||||||
|
@ -52,7 +52,8 @@ class SymbolHandler:
|
|||||||
controller = Controller.instance()
|
controller = Controller.instance()
|
||||||
try:
|
try:
|
||||||
yield from response.file(controller.symbols.get_path(request.match_info["symbol_id"]))
|
yield from response.file(controller.symbols.get_path(request.match_info["symbol_id"]))
|
||||||
except (KeyError, FileNotFoundError, PermissionError):
|
except (KeyError, OSError) as e:
|
||||||
|
log.warning("Could not get symbol file: {}".format(e))
|
||||||
response.set_status(404)
|
response.set_status(404)
|
||||||
|
|
||||||
@Route.post(
|
@Route.post(
|
||||||
@ -66,7 +67,7 @@ class SymbolHandler:
|
|||||||
controller = Controller.instance()
|
controller = Controller.instance()
|
||||||
path = os.path.join(controller.symbols.symbols_path(), os.path.basename(request.match_info["symbol_id"]))
|
path = os.path.join(controller.symbols.symbols_path(), os.path.basename(request.match_info["symbol_id"]))
|
||||||
try:
|
try:
|
||||||
with open(path, 'wb') as f:
|
with open(path, "wb") as f:
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
chunk = yield from request.content.read(1024)
|
chunk = yield from request.content.read(1024)
|
||||||
@ -75,7 +76,7 @@ class SymbolHandler:
|
|||||||
if not chunk:
|
if not chunk:
|
||||||
break
|
break
|
||||||
f.write(chunk)
|
f.write(chunk)
|
||||||
except OSError as e:
|
except (UnicodeEncodeError, OSError) as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Could not write symbol file '{}': {}".format(path, e))
|
raise aiohttp.web.HTTPConflict(text="Could not write symbol file '{}': {}".format(path, e))
|
||||||
# Reset the symbol list
|
# Reset the symbol list
|
||||||
controller.symbols.list()
|
controller.symbols.list()
|
||||||
|
@ -178,7 +178,7 @@ class Hypervisor(UBridgeHypervisor):
|
|||||||
env=env)
|
env=env)
|
||||||
|
|
||||||
log.info("ubridge started PID={}".format(self._process.pid))
|
log.info("ubridge started PID={}".format(self._process.pid))
|
||||||
except (OSError, PermissionError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
ubridge_stdout = self.read_stdout()
|
ubridge_stdout = self.read_stdout()
|
||||||
log.error("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
|
log.error("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
|
||||||
raise UbridgeError("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
|
raise UbridgeError("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
|
||||||
|
@ -197,11 +197,9 @@ class AsyncioTelnetServer:
|
|||||||
yield from self._write_intro(network_writer, echo=self._echo, binary=self._binary, naws=self._naws)
|
yield from self._write_intro(network_writer, echo=self._echo, binary=self._binary, naws=self._naws)
|
||||||
yield from connection.connected()
|
yield from connection.connected()
|
||||||
yield from self._process(network_reader, network_writer, connection)
|
yield from self._process(network_reader, network_writer, connection)
|
||||||
except ConnectionResetError:
|
except ConnectionError:
|
||||||
with (yield from self._lock):
|
with (yield from self._lock):
|
||||||
|
|
||||||
network_writer.close()
|
network_writer.close()
|
||||||
|
|
||||||
if self._reader_process == network_reader:
|
if self._reader_process == network_reader:
|
||||||
self._reader_process = None
|
self._reader_process = None
|
||||||
# Cancel current read from this reader
|
# Cancel current read from this reader
|
||||||
@ -217,7 +215,7 @@ class AsyncioTelnetServer:
|
|||||||
try:
|
try:
|
||||||
writer.write_eof()
|
writer.write_eof()
|
||||||
yield from writer.drain()
|
yield from writer.drain()
|
||||||
except ConnectionResetError:
|
except (AttributeError, ConnectionError):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
|
@ -43,8 +43,16 @@ class PingStats:
|
|||||||
cur_time > cls._last_measurement + 1.9:
|
cur_time > cls._last_measurement + 1.9:
|
||||||
cls._last_measurement = cur_time
|
cls._last_measurement = cur_time
|
||||||
# Non blocking call to get cpu usage. First call will return 0
|
# Non blocking call to get cpu usage. First call will return 0
|
||||||
cls._last_cpu_percent = psutil.cpu_percent(interval=None)
|
try:
|
||||||
cls._last_mem_percent = psutil.virtual_memory().percent
|
cls._last_cpu_percent = psutil.cpu_percent(interval=None)
|
||||||
|
cls._last_mem_percent = psutil.virtual_memory().percent
|
||||||
|
except RuntimeError:
|
||||||
|
# ignore the following error:
|
||||||
|
# RuntimeError: host_statistics(HOST_CPU_LOAD_INFO) syscall failed: (ipc/send) invalid reply port
|
||||||
|
pass
|
||||||
|
except PermissionError:
|
||||||
|
# [Errno 13] Permission denied: '/proc/stat'
|
||||||
|
pass
|
||||||
stats["cpu_usage_percent"] = cls._last_cpu_percent
|
stats["cpu_usage_percent"] = cls._last_cpu_percent
|
||||||
stats["memory_usage_percent"] = cls._last_mem_percent
|
stats["memory_usage_percent"] = cls._last_mem_percent
|
||||||
return stats
|
return stats
|
||||||
|
@ -225,6 +225,11 @@ class Route(object):
|
|||||||
response = Response(request=request, route=route)
|
response = Response(request=request, route=route)
|
||||||
response.set_status(408)
|
response.set_status(408)
|
||||||
response.json({"message": "Client error", "status": 408})
|
response.json({"message": "Client error", "status": 408})
|
||||||
|
except MemoryError:
|
||||||
|
log.error("Memory error detected, server has run out of memory!", exc_info=1)
|
||||||
|
response = Response(request=request, route=route)
|
||||||
|
response.set_status(500)
|
||||||
|
response.json({"message": "Memory error", "status": 500})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.error("Uncaught exception detected: {type}".format(type=type(e)), exc_info=1)
|
log.error("Uncaught exception detected: {type}".format(type=type(e)), exc_info=1)
|
||||||
response = Response(request=request, route=route)
|
response = Response(request=request, route=route)
|
||||||
|
@ -29,7 +29,10 @@ import functools
|
|||||||
import time
|
import time
|
||||||
import atexit
|
import atexit
|
||||||
|
|
||||||
from gns3server.utils.static import get_static_dir
|
# Import encoding now, to avoid implicit import later.
|
||||||
|
# Implicit import within threads may cause LookupError when standard library is in a ZIP
|
||||||
|
import encodings.idna
|
||||||
|
|
||||||
from .route import Route
|
from .route import Route
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
from ..compute import MODULES
|
from ..compute import MODULES
|
||||||
@ -38,6 +41,8 @@ from ..compute.qemu import Qemu
|
|||||||
from ..controller import Controller
|
from ..controller import Controller
|
||||||
from ..utils.asyncio import asyncio_ensure_future
|
from ..utils.asyncio import asyncio_ensure_future
|
||||||
|
|
||||||
|
from gns3server.utils.static import get_static_dir
|
||||||
|
|
||||||
# do not delete this import
|
# do not delete this import
|
||||||
import gns3server.handlers
|
import gns3server.handlers
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
jsonschema>=2.4.0
|
jsonschema>=2.4.0
|
||||||
aiohttp>=2.2.0,<2.4.0 # pyup: ignore
|
aiohttp>=2.3.3,<2.4.0 # pyup: ignore
|
||||||
aiohttp-cors>=0.5.3,<0.6.0 # pyup: ignore
|
aiohttp-cors>=0.5.3,<0.6.0 # pyup: ignore
|
||||||
yarl>=0.11
|
yarl>=0.11
|
||||||
Jinja2>=2.7.3
|
Jinja2>=2.7.3
|
||||||
|
8
setup.py
8
setup.py
@ -62,17 +62,21 @@ setup(
|
|||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
platforms="any",
|
platforms="any",
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Development Status :: 4 - Beta",
|
"Development Status :: 5 - Production/Stable",
|
||||||
"Environment :: Console",
|
"Environment :: Console",
|
||||||
"Intended Audience :: Information Technology",
|
"Intended Audience :: Information Technology",
|
||||||
"Topic :: System :: Networking",
|
"Topic :: System :: Networking",
|
||||||
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
|
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
|
||||||
"Natural Language :: English",
|
"Natural Language :: English",
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: POSIX",
|
||||||
|
"Operating System :: MacOS :: MacOS X",
|
||||||
|
"Operating System :: Microsoft :: Windows",
|
||||||
"Programming Language :: Python",
|
"Programming Language :: Python",
|
||||||
"Programming Language :: Python :: 3",
|
"Programming Language :: Python :: 3",
|
||||||
"Programming Language :: Python :: 3.4",
|
"Programming Language :: Python :: 3.4",
|
||||||
"Programming Language :: Python :: 3.5",
|
"Programming Language :: Python :: 3.5",
|
||||||
|
"Programming Language :: Python :: 3.6",
|
||||||
|
"Programming Language :: Python :: 3.7",
|
||||||
"Programming Language :: Python :: Implementation :: CPython",
|
"Programming Language :: Python :: Implementation :: CPython",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user