Compare commits

..

67 Commits

Author SHA1 Message Date
78e1321bae 2.0.2 2017-05-30 08:57:07 +02:00
c46a9da769 Update sphinx from 1.6.1 to 1.6.2 (#1049) 2017-05-29 09:01:12 +02:00
ef5c013561 Merge branch 'master' into 2.0 2017-05-26 10:47:57 +02:00
8bbe7e8041 Set correct permission on ubridge when doing a remote installation 2017-05-26 10:47:25 +02:00
777361c23d Merge branch 'master' into 2.0 2017-05-26 10:32:26 +02:00
c3d5abd104 Remote install script should be totally non interactive 2017-05-26 10:29:33 +02:00
39879ca92b Duplicate project on remote server use UUID
Ref https://github.com/GNS3/gns3-server/issues/1040
2017-05-23 17:20:51 +02:00
5a86277a9f Test that import erase the auto_open and auto_start setting
Ref #1041
2017-05-23 16:08:30 +02:00
b87692acce Fix import of some old topologies from 1.3 2017-05-23 14:05:47 +02:00
d765d02f06 Fix error in logging of error during starting GNS3 VM 2017-05-23 13:39:41 +02:00
5bb6c51698 Fix an error when logging Docker container fail to start 2017-05-23 13:31:04 +02:00
59488e8064 Fix tests on python 3.4 (#1046) 2017-05-23 12:15:04 +02:00
54e80d68f8 Fix test failed on Python 3.4
Fix #1045
2017-05-23 11:11:13 +02:00
2a8911d19e Keep DOCKER_MINIMUM_API_VERSION and DOCKER_MINIMUM_VERSION consistent 2017-05-23 11:00:15 +02:00
8b7209ea78 Use docker version in error message of outdated docker installation 2017-05-23 10:41:42 +02:00
7d48eba01b Use versioned docker API
Since docker v1.13 the unversioned docker API is deprecated.
Backport of gns3-server 2.0, commit effbe59
2017-05-23 10:37:47 +02:00
1e598d1890 Test for support of docker commit images
Ref #1039
2017-05-23 10:23:12 +02:00
2191c487b6 Support images created by "docker commit". Fixes #1039 2017-05-23 10:23:11 +02:00
3ed49bbdd5 Update pytest from 3.0.7 to 3.1.0 (#1044) 2017-05-23 08:29:36 +02:00
7d30da5d4e Do not wait auto start to finish in order to complete project opening
Fix https://github.com/GNS3/gns3-gui/issues/2074
2017-05-22 20:18:07 +02:00
4dc55a5af4 Improve logging for remote server connection lost
Ref https://github.com/GNS3/gns3-gui/issues/2074
2017-05-22 13:57:32 +02:00
51d6152a5c Fix support for some old topologies
Fix #1042
2017-05-19 18:27:20 +02:00
bdcadcc335 Fix support from some old topologies
Fix #1038
2017-05-19 17:49:39 +02:00
812ff6b5d8 Update sphinx from 1.5.6 to 1.6.1 (#1035) 2017-05-17 08:30:16 +02:00
e24bfd5913 2.0.2dev1 2017-05-16 09:16:35 +02:00
f2d5eb0b27 2.0.1 2017-05-16 08:47:35 +02:00
39419462db Handle HTTP 504 errors from compute node
Fix #1032
2017-05-16 08:37:54 +02:00
ddc5e8169e Merge pull request #1031 from GNS3/async_images
When connecting to a compute node ask for qemu images list
2017-05-15 21:20:28 +02:00
3dc478385b When connecting to a compute node ask for qemu images list
The server start to compute qemu image list in background
because first time discovery could be slow due to the compute
time of md5sum.

Fix #996
2017-05-15 15:35:48 +02:00
851d6e1db8 When importing portable project NAT node is loaded on GNS3 VM
Fix #1030
2017-05-15 10:21:37 +02:00
facdf9c617 Update sphinx from 1.5.5 to 1.5.6 (#1029) 2017-05-15 09:41:18 +02:00
70d34befb9 Merge remote-tracking branch 'origin/2.0' into 2.0 2017-05-12 21:04:39 +08:00
8696a464ab Merge pull request #1027 from GNS3/fix_wic_numbering
Fix wic numbering
2017-05-12 21:03:05 +08:00
5721dd3f30 Fix port numbering for Wic slot 1 & 2 2017-05-12 14:12:24 +02:00
bf38a8515e Fix typo. 2017-05-12 18:54:54 +08:00
942cc09eda Fixes issue with connections when loading an ATM switch. 2017-05-12 18:00:58 +08:00
9baaf5685d Merge pull request #1028 from GNS3/filter_logs
Filter /settings from log
2017-05-12 17:31:42 +08:00
961dbb5162 Filter /settings from log
Fix #1026
2017-05-12 10:48:43 +02:00
19a6157f64 Fix wic numbering
Fix #1011
2017-05-12 09:48:05 +02:00
18cfcaa081 Fixes ATM mapping. 2017-05-12 13:09:26 +08:00
87d42f4eb9 Fixes Frame-relay VC mapping. 2017-05-12 12:39:01 +08:00
766456014b Fix export project is looking into the wrong directory
Fix #1010
2017-05-11 17:59:57 +02:00
f31e46e8ed Fix a race condition in logging when ubridge exit fast
Fix #1025
2017-05-11 17:26:18 +02:00
fb6a95f7bb Fix conversion issue with old topology with C3640
Fix #1019
2017-05-10 13:50:19 +02:00
93fe66a253 Fix error when you have a directory in your symbols directory
Fix #1006
2017-05-10 13:40:49 +02:00
e870b0cfa0 Catch VMWare errors when getting status after GNS3 VM start
Fix #1008
2017-05-09 19:16:47 +02:00
b4467d83bf Make sure upstart LANG is utf8
Fix #1017
2017-05-09 18:54:01 +02:00
ac5509f383 Always install typing module (compat with python 3.4 on ubuntu)
Fix #1021
2017-05-09 15:09:55 +02:00
050f6876ce Fix KeyError: 'filename'
Fix #1018
2017-05-09 12:25:15 +02:00
2172c25df6 Run tests into a tmp directory prefixed by unicode character 2017-05-09 09:43:47 +02:00
48dae085aa Catch missing function listxattr on some Linux host. 2017-05-08 21:02:23 +07:00
bfee08459d Test image sorting
Ref https://github.com/GNS3/gns3-gui/issues/2030
2017-05-05 20:09:51 +02:00
8a6584d90f Sort image list
Fix https://github.com/GNS3/gns3-gui/issues/2030
2017-05-05 19:52:45 +02:00
6870749e50 Handle a race condition at project closing
Fix #1015
2017-05-05 19:39:06 +02:00
3070a68273 Fix unicode error when reading ios configuration
Fix #1013
2017-05-05 19:27:04 +02:00
bda36081a9 Fix AttributeError: 'NoneType' object has no attribute 'send'
Fix #1014
2017-05-05 17:16:07 +02:00
930e7089db Fix a conversion issue with some 1.3 topologies 2017-05-04 12:09:56 +02:00
04220846b3 Fix an error with ethernetswitch when ethertype is null
Fix #1007
2017-05-04 10:33:02 +02:00
694a4e8068 Raise an error if we can't create the dynamips configuration directory
Fix #992
2017-05-03 17:55:13 +02:00
7ba351c888 Catch timeout error when loading virtualbox VM
Fix #999
2017-05-03 17:44:55 +02:00
ae3e25b03b Handle broken compute at server startup
Fix #1001
2017-05-03 17:40:58 +02:00
fa1450cb24 Catch error when we can't backup the topology
Fix #998
2017-05-03 17:35:10 +02:00
8e5f7d0838 Catch error when writting the topology file on read only device
Fix #1003
2017-05-03 17:28:47 +02:00
31d1696215 Catch a race condition in VirtualBox when editing and linking
Fix #993
2017-05-03 16:14:42 +02:00
078a6d8d34 Fix a race condition when editing a qemu vm and connecting it
Fix #997
2017-05-03 16:01:51 +02:00
ecba758c58 Docker aux console is a VT100 terminal
Without that program like nano can't work.

Signed-off-by: Julien Duponchelle <julien@gns3.net>
2017-05-03 10:30:43 +02:00
6190b9c189 2.0.1dev1 2017-05-03 10:18:26 +02:00
45 changed files with 467 additions and 96 deletions

View File

@ -9,7 +9,8 @@ install:
- python setup.py install
- pip install -rdev-requirements.txt
script:
- py.test -v -s tests
- mkdir ‡
- py.test -v -s tests --basetemp=‡
deploy:
provider: pypi
user: noplay

View File

@ -1,5 +1,51 @@
# Change Log
## 2.0.2 30/05/2017
* Set correct permission on ubridge when doing a remote installation
* Remote install script should be totally non interactive
* Duplicate project on remote server use UUID
* Fix import of some old topologies from 1.3
* Fix error in logging of error during starting GNS3 VM
* Fix an error when logging Docker container fail to start
* Use docker version in error message of outdated docker installation
* Support images created by "docker commit". Fixes #1039
* Do not wait auto start to finish in order to complete project opening
* Improve logging for remote server connection lost
## 2.0.1 16/05/2017
* Handle HTTP 504 errors from compute node
* When connecting to a compute node ask for qemu images list
* When importing portable project NAT node is loaded on GNS3 VM
* Fix port numbering for Wic slot 1 & 2
* Fixes issue with connections when loading an ATM switch.
* Fixes ATM mapping.
* Fixes Frame-relay VC mapping.
* Fix export project is looking into the wrong directory
* Fix a race condition in logging when ubridge exit fast
* Fix conversion issue with old topology with C3640
* Fix error when you have a directory in your symbols directory
* Catch VMWare errors when getting status after GNS3 VM start
* Make sure upstart LANG is utf8
* Always install typing module (compat with python 3.4 on ubuntu)
* Fix KeyError: 'filename'
* Catch missing function listxattr on some Linux host.
* Sort image list
* Handle a race condition at project closing
* Fix unicode error when reading ios configuration
* Fix AttributeError: 'NoneType' object has no attribute 'send'
* Fix a conversion issue with some 1.3 topologies
* Fix an error with ethernetswitch when ethertype is null
* Raise an error if we can't create the dynamips configuration directory
* Catch timeout error when loading virtualbox VM
* Handle broken compute at server startup
* Catch error when we can't backup the topology
* Catch error when writting the topology file on read only device
* Catch a race condition in VirtualBox when editing and linking
* Fix a race condition when editing a qemu vm and connecting it
* Docker aux console is a VT100 terminal
## 2.0.0 02/05/2017
* Fix connection to websocket with last docker release

View File

@ -1,7 +1,7 @@
-rrequirements.txt
sphinx==1.5.5
pytest==3.0.7
sphinx==1.6.2
pytest==3.1.0
pep8==1.7.0
pytest-catchlog==1.2.2
pytest-timeout==1.2.0

View File

@ -344,7 +344,7 @@ class BaseManager:
# test the 2nd byte and check if the 13th bit (CAP_NET_RAW) is set
if struct.unpack("<IIIII", caps)[1] & 1 << 13:
return True
except OSError as e:
except (AttributeError, OSError) as e:
log.error("could not determine if CAP_NET_RAW capability is set for {}: {}".format(executable, e))
return False

View File

@ -541,8 +541,9 @@ class BaseNode:
self._ubridge_hypervisor = Hypervisor(self._project, self.ubridge_path, self.working_dir, server_host)
log.info("Starting new uBridge hypervisor {}:{}".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
yield from self._ubridge_hypervisor.start()
log.info("Hypervisor {}:{} has successfully started".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
yield from self._ubridge_hypervisor.connect()
if self._ubridge_hypervisor:
log.info("Hypervisor {}:{} has successfully started".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
yield from self._ubridge_hypervisor.connect()
@asyncio.coroutine
def _stop_ubridge(self):

View File

@ -33,7 +33,9 @@ from gns3server.compute.docker.docker_error import DockerError, DockerHttp304Err
log = logging.getLogger(__name__)
# Be carefull to keep it consistent
DOCKER_MINIMUM_API_VERSION = "1.25"
DOCKER_MINIMUM_VERSION = "1.13"
class Docker(BaseManager):
@ -60,7 +62,7 @@ class Docker(BaseManager):
self._connected = False
raise DockerError("Can't connect to docker daemon")
if parse_version(version["ApiVersion"]) < parse_version(DOCKER_MINIMUM_API_VERSION):
raise DockerError("Docker API version is {}. GNS3 requires a minimum API version of {}".format(version["ApiVersion"], DOCKER_MINIMUM_API_VERSION))
raise DockerError("Docker version is {}. GNS3 requires a minimum version of {}".format(version["Version"], DOCKER_MINIMUM_VERSION))
def connector(self):
if self._connector is None or self._connector.closed:
@ -113,11 +115,13 @@ class Docker(BaseManager):
:returns: HTTP response
"""
data = json.dumps(data)
url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path
if timeout is None:
timeout = 60 * 60 * 24 * 31 # One month timeout
if path == 'version':
url = "http://docker/v1.12/" + path # API of docker v1.0
else:
url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path
try:
if path != "version": # version is use by check connection
yield from self._check_connection()

View File

@ -213,7 +213,7 @@ class DockerVM(BaseNode):
self._volumes = ["/etc/network"]
volumes = image_infos.get("ContainerConfig", {}).get("Volumes")
volumes = image_infos.get("Config", {}).get("Volumes")
if volumes is None:
return binds
for volume in volumes.keys():
@ -361,7 +361,7 @@ class DockerVM(BaseNode):
try:
yield from self._add_ubridge_connection(nio, adapter_number)
except UbridgeNamespaceError:
log.error("Container {} failed to start", self.name)
log.error("Container %s failed to start", self.name)
yield from self.stop()
# The container can crash soon after the start, this means we can not move the interface to the container namespace
@ -393,7 +393,7 @@ class DockerVM(BaseNode):
# We can not use the API because docker doesn't expose a websocket api for exec
# https://github.com/GNS3/gns3-gui/issues/1039
process = yield from asyncio.subprocess.create_subprocess_exec(
"docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "script", "-qfc", "while true; do /gns3/bin/busybox sh; done", "/dev/null",
"docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "script", "-qfc", "while true; do TERM=vt100 /gns3/bin/busybox sh; done", "/dev/null",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
stdin=asyncio.subprocess.PIPE)

View File

@ -188,22 +188,33 @@ class ATMSwitch(Device):
raise DynamipsError("Port {} is not allocated".format(port_number))
# remove VCs mapped with the port
pvc_entry = re.compile(r"""^([0-9]*):([0-9]*):([0-9]*)$""")
for source, destination in self._active_mappings.copy().items():
match_source_pvc = pvc_entry.search(source)
match_destination_pvc = pvc_entry.search(destination)
if match_source_pvc and match_destination_pvc:
if len(source) == 3 and len(destination) == 3:
# remove the virtual channels mapped with this port/nio
source_port, source_vpi, source_vci = map(int, match_source_pvc.group(1, 2, 3))
destination_port, destination_vpi, destination_vci = map(int, match_destination_pvc.group(1, 2, 3))
source_port, source_vpi, source_vci = source
destination_port, destination_vpi, destination_vci = destination
if port_number == source_port:
log.info('ATM switch "{name}" [{id}]: unmapping VCC between port {source_port} VPI {source_vpi} VCI {source_vci} and port {destination_port} VPI {destination_vpi} VCI {destination_vci}'.format(name=self._name,
id=self._id,
source_port=source_port,
source_vpi=source_vpi,
source_vci=source_vci,
destination_port=destination_port,
destination_vpi=destination_vpi,
destination_vci=destination_vci))
yield from self.unmap_pvc(source_port, source_vpi, source_vci, destination_port, destination_vpi, destination_vci)
yield from self.unmap_pvc(destination_port, destination_vpi, destination_vci, source_port, source_vpi, source_vci)
else:
# remove the virtual paths mapped with this port/nio
source_port, source_vpi = map(int, source.split(':'))
destination_port, destination_vpi = map(int, destination.split(':'))
source_port, source_vpi = source
destination_port, destination_vpi = destination
if port_number == source_port:
log.info('ATM switch "{name}" [{id}]: unmapping VPC between port {source_port} VPI {source_vpi} and port {destination_port} VPI {destination_vpi}'.format(name=self._name,
id=self._id,
source_port=source_port,
source_vpi=source_vpi,
destination_port=destination_port,
destination_vpi=destination_vpi))
yield from self.unmap_vp(source_port, source_vpi, destination_port, destination_vpi)
yield from self.unmap_vp(destination_port, destination_vpi, source_port, source_vpi)
@ -239,6 +250,14 @@ class ATMSwitch(Device):
if self.has_port(destination_port):
if (source_port, source_vpi, source_vci) not in self._active_mappings and \
(destination_port, destination_vpi, destination_vci) not in self._active_mappings:
log.info('ATM switch "{name}" [{id}]: mapping VCC between port {source_port} VPI {source_vpi} VCI {source_vci} and port {destination_port} VPI {destination_vpi} VCI {destination_vci}'.format(name=self._name,
id=self._id,
source_port=source_port,
source_vpi=source_vpi,
source_vci=source_vci,
destination_port=destination_port,
destination_vpi=destination_vpi,
destination_vci=destination_vci))
yield from self.map_pvc(source_port, source_vpi, source_vci, destination_port, destination_vpi, destination_vci)
yield from self.map_pvc(destination_port, destination_vpi, destination_vci, source_port, source_vpi, source_vci)
else:
@ -247,6 +266,12 @@ class ATMSwitch(Device):
destination_port, destination_vpi = map(int, destination.split(':'))
if self.has_port(destination_port):
if (source_port, source_vpi) not in self._active_mappings and (destination_port, destination_vpi) not in self._active_mappings:
log.info('ATM switch "{name}" [{id}]: mapping VPC between port {source_port} VPI {source_vpi} and port {destination_port} VPI {destination_vpi}'.format(name=self._name,
id=self._id,
source_port=source_port,
source_vpi=source_vpi,
destination_port=destination_port,
destination_vpi=destination_vpi))
yield from self.map_vp(source_port, source_vpi, destination_port, destination_vpi)
yield from self.map_vp(destination_port, destination_vpi, source_port, source_vpi)
@ -262,10 +287,10 @@ class ATMSwitch(Device):
"""
if port1 not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port1))
return
if port2 not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port2))
return
nio1 = self._nios[port1]
nio2 = self._nios[port2]
@ -283,7 +308,7 @@ class ATMSwitch(Device):
port2=port2,
vpi2=vpi2))
self._active_mappings["{}:{}".format(port1, vpi1)] = "{}:{}".format(port2, vpi2)
self._active_mappings[(port1, vpi1)] = (port2, vpi2)
@asyncio.coroutine
def unmap_vp(self, port1, vpi1, port2, vpi2):
@ -297,10 +322,10 @@ class ATMSwitch(Device):
"""
if port1 not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port1))
return
if port2 not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port2))
return
nio1 = self._nios[port1]
nio2 = self._nios[port2]
@ -318,7 +343,7 @@ class ATMSwitch(Device):
port2=port2,
vpi2=vpi2))
del self._active_mappings["{}:{}".format(port1, vpi1)]
del self._active_mappings[(port1, vpi1)]
@asyncio.coroutine
def map_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2):
@ -334,10 +359,10 @@ class ATMSwitch(Device):
"""
if port1 not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port1))
return
if port2 not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port2))
return
nio1 = self._nios[port1]
nio2 = self._nios[port2]
@ -359,7 +384,7 @@ class ATMSwitch(Device):
vpi2=vpi2,
vci2=vci2))
self._active_mappings["{}:{}:{}".format(port1, vpi1, vci1)] = "{}:{}:{}".format(port2, vpi2, vci2)
self._active_mappings[(port1, vpi1, vci1)] = (port2, vpi2, vci2)
@asyncio.coroutine
def unmap_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2):
@ -375,10 +400,10 @@ class ATMSwitch(Device):
"""
if port1 not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port1))
return
if port2 not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port2))
return
nio1 = self._nios[port1]
nio2 = self._nios[port2]
@ -399,7 +424,7 @@ class ATMSwitch(Device):
port2=port2,
vpi2=vpi2,
vci2=vci2))
del self._active_mappings["{}:{}:{}".format(port1, vpi1, vci1)]
del self._active_mappings[(port1, vpi1, vci1)]
@asyncio.coroutine
def start_capture(self, port_number, output_file, data_link_type="DLT_ATM_RFC1483"):

View File

@ -103,8 +103,8 @@ class Bridge(Device):
:param nio: NIO instance to remove
"""
yield from self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio))
if self._hypervisor:
yield from self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio))
self._nios.remove(nio)
@property

View File

@ -246,7 +246,7 @@ class EthernetSwitch(Device):
elif settings["type"] == "dot1q":
yield from self.set_dot1q_port(port_number, settings["vlan"])
elif settings["type"] == "qinq":
yield from self.set_qinq_port(port_number, settings["vlan"], settings["ethertype"])
yield from self.set_qinq_port(port_number, settings["vlan"], settings.get("ethertype"))
@asyncio.coroutine
def set_access_port(self, port_number, vlan_id):

View File

@ -191,9 +191,15 @@ class FrameRelaySwitch(Device):
# remove VCs mapped with the port
for source, destination in self._active_mappings.copy().items():
source_port, source_dlci = map(int, source.split(':'))
destination_port, destination_dlci = map(int, destination.split(':'))
source_port, source_dlci = source
destination_port, destination_dlci = destination
if port_number == source_port:
log.info('Frame Relay switch "{name}" [{id}]: unmapping VC between port {source_port} DLCI {source_dlci} and port {destination_port} DLCI {destination_dlci}'.format(name=self._name,
id=self._id,
source_port=source_port,
source_dlci=source_dlci,
destination_port=destination_port,
destination_dlci=destination_dlci))
yield from self.unmap_vc(source_port, source_dlci, destination_port, destination_dlci)
yield from self.unmap_vc(destination_port, destination_dlci, source_port, source_dlci)
@ -224,6 +230,13 @@ class FrameRelaySwitch(Device):
destination_port, destination_dlci = map(int, destination.split(':'))
if self.has_port(destination_port):
if (source_port, source_dlci) not in self._active_mappings and (destination_port, destination_dlci) not in self._active_mappings:
log.info('Frame Relay switch "{name}" [{id}]: mapping VC between port {source_port} DLCI {source_dlci} and port {destination_port} DLCI {destination_dlci}'.format(name=self._name,
id=self._id,
source_port=source_port,
source_dlci=source_dlci,
destination_port=destination_port,
destination_dlci=destination_dlci))
yield from self.map_vc(source_port, source_dlci, destination_port, destination_dlci)
yield from self.map_vc(destination_port, destination_dlci, source_port, source_dlci)
@ -260,7 +273,7 @@ class FrameRelaySwitch(Device):
port2=port2,
dlci2=dlci2))
self._active_mappings["{}:{}".format(port1, dlci1)] = "{}:{}".format(port2, dlci2)
self._active_mappings[(port1, dlci1)] = (port2, dlci2)
@asyncio.coroutine
def unmap_vc(self, port1, dlci1, port2, dlci2):
@ -294,7 +307,7 @@ class FrameRelaySwitch(Device):
dlci1=dlci1,
port2=port2,
dlci2=dlci2))
del self._active_mappings["{}:{}".format(port1, dlci1)]
del self._active_mappings[(port1, dlci1)]
@asyncio.coroutine
def start_capture(self, port_number, output_file, data_link_type="DLT_FRELAY"):

View File

@ -71,7 +71,10 @@ class Router(BaseNode):
super().__init__(name, node_id, project, manager, console=console, aux=aux, allocate_aux=aux)
self._working_directory = os.path.join(self.project.module_working_directory(self.manager.module_name.lower()), self.id)
os.makedirs(os.path.join(self._working_directory, "configs"), exist_ok=True)
try:
os.makedirs(os.path.join(self._working_directory, "configs"), exist_ok=True)
except OSError as e:
raise DynamipsError("Can't create the dynamips config directory: {}".format(str(e)))
if dynamips_id:
self._convert_before_2_0_0_b3(dynamips_id)
@ -1553,7 +1556,7 @@ class Router(BaseNode):
try:
startup_config_path = os.path.join(self._working_directory, startup_config)
if os.path.exists(startup_config_path):
with open(startup_config_path) as f:
with open(startup_config_path, encoding="utf-8") as f:
self._startup_config_content = f.read()
else:
self._startup_config_content = ''

View File

@ -1131,9 +1131,13 @@ class QemuVM(BaseNode):
adapter_number=adapter_number))
if self.ubridge:
yield from self._add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
self._local_udp_tunnels[adapter_number][1],
nio)
try:
yield from self._add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
self._local_udp_tunnels[adapter_number][1],
nio)
except IndexError:
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
adapter_number=adapter_number))
elif self.is_running():
raise QemuError("Sorry, adding a link to a started Qemu VM is not supported without using uBridge.")

View File

@ -94,7 +94,7 @@ class VirtualBox(BaseManager):
if not vboxmanage_path:
vboxmanage_path = self.find_vboxmanage()
if not vboxmanage_path:
raise VirtualBoxError("Could not found VBoxManage")
raise VirtualBoxError("Could not find VBoxManage")
command = [vboxmanage_path, "--nologo", subcommand]
command.extend(args)

View File

@ -968,9 +968,13 @@ class VirtualBoxVM(BaseNode):
adapter_number=adapter_number))
if self.ubridge:
yield from self._add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
self._local_udp_tunnels[adapter_number][1],
nio)
try:
yield from self._add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
self._local_udp_tunnels[adapter_number][1],
nio)
except KeyError:
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
adapter_number=adapter_number))
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
else:
vm_state = yield from self._get_vm_state()

View File

@ -87,7 +87,7 @@ class Controller:
for c in computes:
try:
yield from self.add_compute(**c)
except (aiohttp.web_exceptions.HTTPConflict):
except (aiohttp.web_exceptions.HTTPConflict, KeyError):
pass # Skip not available servers at loading
yield from self.load_projects()
try:

View File

@ -23,6 +23,7 @@ import json
import uuid
import sys
import io
from operator import itemgetter
from ..utils import parse_version
from ..utils.images import list_images
@ -528,7 +529,7 @@ class Compute:
raise aiohttp.web.HTTPForbidden(text=msg)
elif response.status == 404:
raise aiohttp.web.HTTPNotFound(text="{} {} not found".format(method, path))
elif response.status == 408:
elif response.status == 408 or response.status == 504:
raise aiohttp.web.HTTPRequestTimeout(text="{} {} request timeout".format(method, path))
elif response.status == 409:
try:
@ -579,8 +580,10 @@ class Compute:
Forward a call to the emulator on compute
"""
try:
res = yield from self.http_query(method, "/{}/{}".format(type, path), data=data, timeout=None)
action = "/{}/{}".format(type, path)
res = yield from self.http_query(method, action, data=data, timeout=None)
except aiohttp.errors.DisconnectedError:
log.error("Connection lost to %s during %s %s", self._id, method, action)
raise aiohttp.web.HTTPGatewayTimeout()
return res.json
@ -600,6 +603,9 @@ class Compute:
for local_image in list_images(type):
if local_image['filename'] not in [i['filename'] for i in images]:
images.append(local_image)
images = sorted(images, key=itemgetter('filename'))
else:
images = sorted(images, key=itemgetter('image'))
except OSError as e:
raise ComputeError("Can't list images: {}".format(str(e)))
return images

View File

@ -190,6 +190,5 @@ def _export_images(project, image, z):
if os.path.exists(path):
arcname = os.path.join("images", directory, os.path.basename(image))
z.write(path, arcname)
break
else:
raise aiohttp.web.HTTPConflict(text="Topology could not be exported because the image {} is not available. If you use multiple server, we need a copy of the image on the main server.".format(path))
return
raise aiohttp.web.HTTPConflict(text="Topology could not be exported because the image {} is not available. If you use multiple server, we need a copy of the image on the main server.".format(image))

View File

@ -250,7 +250,7 @@ class GNS3VM:
force=True)
except aiohttp.web.HTTPConflict:
pass
log.error("Can't start the GNS3 VM: {}", str(e))
log.error("Can't start the GNS3 VM: %s", str(e))
@asyncio.coroutine
def exit_vm(self):
@ -287,7 +287,7 @@ class GNS3VM:
yield from engine.start()
except Exception as e:
yield from self._controller.delete_compute("vm")
log.error("Can't start the GNS3 VM: {}", str(e))
log.error("Can't start the GNS3 VM: {}".format(str(e)))
yield from compute.update(name="GNS3 VM ({})".format(engine.vmname))
raise e
yield from compute.update(name="GNS3 VM ({})".format(engine.vmname),

View File

@ -228,7 +228,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
try:
resp = None
resp = yield from session.get('http://127.0.0.1:{}/v2/compute/network/interfaces'.format(api_port))
except (OSError, aiohttp.errors.ClientHttpProcessingError, TimeoutError):
except (OSError, aiohttp.errors.ClientHttpProcessingError, TimeoutError, asyncio.TimeoutError):
pass
if resp:

View File

@ -118,7 +118,10 @@ class VMwareGNS3VM(BaseGNS3VM):
if vmware_tools_state not in ("installed", "running"):
raise GNS3VMError("VMware tools are not installed in {}".format(self.vmname))
running = yield from self._is_running()
try:
running = yield from self._is_running()
except VMwareError as e:
raise GNS3VMError("Could not list VMware VMs: {}".format(str(e)))
if not running:
log.info("Update GNS3 VM settings")
# set the number of vCPUs and amount of RAM

View File

@ -24,7 +24,6 @@ import asyncio
import zipfile
import aiohttp
from ..config import Config
from .topology import load_topology
@ -74,7 +73,7 @@ def import_project(controller, project_id, stream, location=None, name=None, kee
path = location
else:
projects_path = controller.projects_directory()
path = os.path.join(projects_path, project_name)
path = os.path.join(projects_path, project_id)
try:
os.makedirs(path, exist_ok=True)
except UnicodeEncodeError as e:
@ -114,7 +113,7 @@ def import_project(controller, project_id, stream, location=None, name=None, kee
# unless it's a linux host without GNS3 VM
if not sys.platform.startswith("linux") or controller.has_compute("vm"):
for node in topology["topology"]["nodes"]:
if node["node_type"] in ("docker", "qemu", "iou"):
if node["node_type"] in ("docker", "qemu", "iou", "nat"):
node["compute_id"] = "vm"
else:
for node in topology["topology"]["nodes"]:

View File

@ -70,6 +70,10 @@ class Port:
return self._name
return self.short_name_type + "{}/{}".format(self._interface_number, self._port_number)
@short_name.setter
def short_name(self, val):
self._short_name = val
def __json__(self):
return {
"name": self._name,

View File

@ -181,7 +181,9 @@ class DynamipsPortFactory:
ports = []
adapter_number = 0
wic_port_number = 16
wic_slot = 1
wic_port_number = wic_slot * 16
display_wic_port_number = 0
for name in sorted(properties.keys()):
if name.startswith("slot") and properties[name]:
port_class = cls.ADAPTER_MATRIX[properties[name]]["port"]
@ -194,7 +196,13 @@ class DynamipsPortFactory:
port_class = cls.WIC_MATRIX[properties[name]]["port"]
if port_class:
for port_number in range(0, cls.WIC_MATRIX[properties[name]]["nb_ports"]):
name = "{}{}/{}".format(port_class.long_name_type(), 0, wic_port_number)
ports.append(port_class(name, 0, 0, wic_port_number))
name = "{}{}/{}".format(port_class.long_name_type(), 0, display_wic_port_number)
port = port_class(name, 0, 0, wic_port_number)
port.short_name = "{}{}/{}".format(port.short_name_type, 0, display_wic_port_number)
ports.append(port)
display_wic_port_number += 1
wic_port_number += 1
wic_slot += 1
wic_port_number = wic_slot * 16
return ports

View File

@ -675,7 +675,10 @@ class Project:
self._loading = False
# Should we start the nodes when project is open
if self._auto_start:
yield from self.start_all()
# Start all in the background without waiting for completion
# we ignore errors because we want to let the user open
# their project and fix it
asyncio.async(self.start_all())
@asyncio.coroutine
def wait_loaded(self):

View File

@ -55,6 +55,8 @@ class Symbols:
for file in os.listdir(directory):
if file.startswith('.'):
continue
if not os.path.isfile(os.path.join(directory, file)):
continue
symbol_id = file
symbols.append({
'symbol_id': symbol_id,

View File

@ -125,7 +125,10 @@ def load_topology(path):
if "revision" not in topo or topo["revision"] < GNS3_FILE_FORMAT_REVISION:
# If it's an old GNS3 file we need to convert it
# first we backup the file
shutil.copy(path, path + ".backup{}".format(topo.get("revision", 0)))
try:
shutil.copy(path, path + ".backup{}".format(topo.get("revision", 0)))
except (OSError) as e:
raise aiohttp.web.HTTPConflict(text="Can't write backup of the topology {}: {}".format(path, str(e)))
changed = True
if "revision" not in topo or topo["revision"] < 5:
@ -146,8 +149,11 @@ def load_topology(path):
raise e
if changed:
with open(path, "w+", encoding="utf-8") as f:
json.dump(topo, f, indent=4, sort_keys=True)
try:
with open(path, "w+", encoding="utf-8") as f:
json.dump(topo, f, indent=4, sort_keys=True)
except (OSError) as e:
raise aiohttp.web.HTTPConflict(text="Can't write the topology {}: {}".format(path, str(e)))
return topo
@ -283,6 +289,10 @@ def _convert_1_3_later(topo, topo_path):
node["properties"] = {}
# Some old dynamips node don't have type
if "type" not in old_node:
old_node["type"] = old_node["properties"]["platform"].upper()
if old_node["type"] == "VPCSDevice":
node["node_type"] = "vpcs"
elif old_node["type"] == "QemuVM":
@ -300,7 +310,7 @@ def _convert_1_3_later(topo, topo_path):
node["console_type"] = None
node["symbol"] = ":/symbols/hub.svg"
node["properties"]["ports_mapping"] = []
for port in old_node["ports"]:
for port in old_node.get("ports", []):
node["properties"]["ports_mapping"].append({
"name": "Ethernet{}".format(port["port_number"] - 1),
"port_number": port["port_number"] - 1
@ -310,7 +320,7 @@ def _convert_1_3_later(topo, topo_path):
node["symbol"] = ":/symbols/ethernet_switch.svg"
node["console_type"] = None
node["properties"]["ports_mapping"] = []
for port in old_node["ports"]:
for port in old_node.get("ports", []):
node["properties"]["ports_mapping"].append({
"name": "Ethernet{}".format(port["port_number"] - 1),
"port_number": port["port_number"] - 1,
@ -321,13 +331,15 @@ def _convert_1_3_later(topo, topo_path):
node["node_type"] = "frame_relay_switch"
node["symbol"] = ":/symbols/frame_relay_switch.svg"
node["console_type"] = None
elif old_node["type"] in ["C1700", "C2600", "C2691", "C3600", "C3725", "C3745", "C7200", "EtherSwitchRouter"]:
elif old_node["type"].upper() in ["C1700", "C2600", "C2691", "C3600", "C3620", "C3640", "C3660", "C3725", "C3745", "C7200", "EtherSwitchRouter"]:
if node["symbol"] is None:
node["symbol"] = ":/symbols/router.svg"
node["node_type"] = "dynamips"
node["properties"]["dynamips_id"] = old_node.get("dynamips_id")
if "platform" not in node["properties"] and old_node["type"].startswith("C"):
if "platform" not in node["properties"] and old_node["type"].upper().startswith("C"):
node["properties"]["platform"] = old_node["type"].lower()
if node["properties"]["platform"].startswith("c36"):
node["properties"]["platform"] = "c3600"
if "ram" not in node["properties"] and old_node["type"].startswith("C"):
node["properties"]["ram"] = PLATFORMS_DEFAULT_RAM[old_node["type"].lower()]
elif old_node["type"] == "VMwareVM":

View File

@ -54,7 +54,7 @@ class CrashReport:
Report crash to a third party service
"""
DSN = "sync+https://fd4397dee2e145da9227af29df24ded1:61a0e2c9b9f64204bb8ef7ac17b98e3e@sentry.io/38482"
DSN = "sync+https://67b93949a78d4ef5978388cc4b8906f9:271ee1dd01db4a39b919097f452cb6c5@sentry.io/38482"
if hasattr(sys, "frozen"):
cacert = get_resource("cacert.pem")
if cacert is not None and os.path.isfile(cacert):

View File

@ -112,7 +112,10 @@ class ProjectHandler:
if ProjectHandler._notifications_listening.setdefault(project.id, 0) <= 1:
yield from project.close()
pm.remove_project(project.id)
del ProjectHandler._notifications_listening[project.id]
try:
del ProjectHandler._notifications_listening[project.id]
except KeyError:
pass
else:
log.warning("Skip project closing, another client is listening for project notifications")
response.set_status(204)

View File

@ -23,7 +23,6 @@ import tempfile
from gns3server.web.route import Route
from gns3server.controller import Controller
from gns3server.controller.project import Project
from gns3server.controller.import_project import import_project
from gns3server.controller.export_project import export_project
from gns3server.config import Config

View File

@ -33,7 +33,7 @@ VM_CREATE_SCHEMA = {
},
"dynamips_id": {
"description": "Dynamips ID",
"type": "integer"
"type": ["integer", "null"]
},
"name": {
"description": "Dynamips VM instance name",

View File

@ -23,7 +23,7 @@
# or negative for a release candidate or beta (after the base version
# number has been incremented)
__version__ = "2.0.0"
__version__ = "2.0.2"
# If it's a git checkout try to add the commit
if "dev" in __version__:
@ -36,4 +36,4 @@ if "dev" in __version__:
except Exception as e:
print(e)
__version_info__ = (2, 0, 0, 0)
__version_info__ = (2, 0, 2, 0)

View File

@ -98,6 +98,16 @@ class WinStreamHandler(logging.StreamHandler):
self.handleError(record)
class LogFilter:
"""
This filter some noise from the logs
"""
def filter(record):
if record.name == "aiohttp.access" and "/settings" in record.msg and "200" in record.msg:
return 0
return 1
def init_logger(level, logfile=None, quiet=False):
if logfile and len(logfile) > 0:
stream_handler = logging.FileHandler(logfile)
@ -111,5 +121,7 @@ def init_logger(level, logfile=None, quiet=False):
if quiet:
stream_handler.addFilter(logging.Filter(name="user_facing"))
logging.getLogger('user_facing').propagate = False
if level > logging.DEBUG:
stream_handler.addFilter(LogFilter)
logging.basicConfig(level=level, handlers=[stream_handler])
return logging.getLogger('user_facing')

View File

@ -33,6 +33,7 @@ from .route import Route
from ..config import Config
from ..compute import MODULES
from ..compute.port_manager import PortManager
from ..compute.qemu import Qemu
from ..controller import Controller
@ -192,6 +193,10 @@ class WebServer:
Called when the HTTP server start
"""
yield from Controller.instance().start()
# Because with a large image collection
# without md5sum already computed we start the
# computing with server start
asyncio.async(Qemu.instance().list_images())
def run(self):
"""

View File

@ -1,5 +1,6 @@
description "GNS3 server"
author "GNS3 Team"
env LANG=en_US.UTF-8
start on filesystem or runlevel [2345]
stop on shutdown

View File

@ -6,3 +6,4 @@ Jinja2>=2.7.3
raven>=5.23.0
psutil>=3.0.0
zipstream>=1.1.4
typing>=3.5.3.0 # Otherwise yarl fail with python 3.4

View File

@ -152,7 +152,7 @@ log "Update system packages"
apt-get update
log "Upgrade packages"
apt-get upgrade -y
apt-get upgrade --yes --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
log " Install GNS3 packages"
apt-get install -y gns3-server
@ -163,6 +163,10 @@ then
useradd -d /opt/gns3/ -m gns3
fi
log "Add GNS3 to the ubridge group"
usermod -aG ubridge gns3
log "Install docker"
if [ ! -f "/usr/bin/docker" ]
then

View File

@ -40,9 +40,6 @@ class PyTest(TestCommand):
dependencies = open("requirements.txt", "r").read().splitlines()
if sys.version_info <= (3, 4):
dependencies.append('typing>=3.5.3.0 # Otherwise yarl fail with python 3.4')
setup(
name="gns3-server",
version=__import__("gns3server").__version__,

View File

@ -840,7 +840,7 @@ def test_get_image_informations(project, manager, loop):
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
def test_mount_binds(vm, tmpdir):
image_infos = {
"ContainerConfig": {
"Config": {
"Volumes": {
"/test/experimental": {}
}
@ -879,7 +879,7 @@ def test_start_aux(vm, loop):
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=MagicMock()) as mock_exec:
loop.run_until_complete(asyncio.async(vm._start_aux()))
mock_exec.assert_called_with('docker', 'exec', '-i', 'e90e34656842', '/gns3/bin/busybox', 'script', '-qfc', 'while true; do /gns3/bin/busybox sh; done', '/dev/null', stderr=asyncio.subprocess.STDOUT, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE)
mock_exec.assert_called_with('docker', 'exec', '-i', 'e90e34656842', '/gns3/bin/busybox', 'script', '-qfc', 'while true; do TERM=vt100 /gns3/bin/busybox sh; done', '/dev/null', stderr=asyncio.subprocess.STDOUT, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE)
def test_create_network_interfaces(vm):

View File

@ -360,8 +360,10 @@ def test_images(compute, async_run, images_dir):
images = async_run(compute.images("qemu"))
mock.assert_called_with("GET", "https://example.com:84/v2/compute/qemu/images", auth=None, data=None, headers={'content-type': 'application/json'}, chunked=False, timeout=None)
assert images == [{"filename": "linux.qcow2", "path": "linux.qcow2", "md5sum": "d41d8cd98f00b204e9800998ecf8427e", "filesize": 0},
{"filename": "asa.qcow2", "path": "asa.qcow2", "md5sum": "d41d8cd98f00b204e9800998ecf8427e", "filesize": 0}]
assert images == [
{"filename": "asa.qcow2", "path": "asa.qcow2", "md5sum": "d41d8cd98f00b204e9800998ecf8427e", "filesize": 0},
{"filename": "linux.qcow2", "path": "linux.qcow2", "md5sum": "d41d8cd98f00b204e9800998ecf8427e", "filesize": 0}
]
def test_list_files(project, async_run, compute):

View File

@ -35,6 +35,8 @@ def test_import_project(async_run, tmpdir, controller):
topology = {
"project_id": str(uuid.uuid4()),
"name": "test",
"auto_open": True,
"auto_start": True,
"topology": {
},
"version": "2.0.0"
@ -67,6 +69,8 @@ def test_import_project(async_run, tmpdir, controller):
# A new project name is generated when you import twice the same name
with open(zip_path, "rb") as f:
project = async_run(import_project(controller, str(uuid.uuid4()), f))
assert project.auto_open is False
assert project.auto_start is False
assert project.name != "test"
@ -252,6 +256,50 @@ def test_import_iou_linux_with_vm(linux_platform, async_run, tmpdir, controller)
assert topo["topology"]["nodes"][0]["compute_id"] == "vm"
def test_import_nat_non_linux(windows_platform, async_run, tmpdir, controller):
"""
On non linux host NAT should be moved to the GNS3 VM
"""
project_id = str(uuid.uuid4())
controller._computes["vm"] = AsyncioMagicMock()
topology = {
"project_id": str(uuid.uuid4()),
"name": "test",
"type": "topology",
"topology": {
"nodes": [
{
"compute_id": "local",
"node_id": "0fd3dd4d-dc93-4a04-a9b9-7396a9e22e8b",
"node_type": "nat",
"name": "test",
"properties": {}
}
],
"links": [],
"computes": [],
"drawings": []
},
"revision": 5,
"version": "2.0.0"
}
with open(str(tmpdir / "project.gns3"), 'w+') as f:
json.dump(topology, f)
zip_path = str(tmpdir / "project.zip")
with zipfile.ZipFile(zip_path, 'w') as myzip:
myzip.write(str(tmpdir / "project.gns3"), "project.gns3")
with open(zip_path, "rb") as f:
project = async_run(import_project(controller, project_id, f))
with open(os.path.join(project.path, "test.gns3")) as f:
topo = json.load(f)
assert topo["topology"]["nodes"][0]["compute_id"] == "vm"
def test_import_iou_non_linux(windows_platform, async_run, tmpdir, controller):
"""
On non linux host IOU should be moved to the GNS3 VM

View File

@ -548,8 +548,8 @@ def test_list_ports_dynamips(project, compute):
"link_type": "ethernet"
},
{
"name": "Serial0/16",
"short_name": "s0/16",
"name": "Serial0/0",
"short_name": "s0/0",
"data_link_types": {
"Cisco HDLC": "DLT_C_HDLC",
"Cisco PPP": "DLT_PPP_SERIAL",
@ -559,8 +559,8 @@ def test_list_ports_dynamips(project, compute):
"link_type": "serial"
},
{
"name": "Serial0/17",
"short_name": "s0/17",
"name": "Serial0/1",
"short_name": "s0/1",
"data_link_types": {
"Cisco HDLC": "DLT_C_HDLC",
"Cisco PPP": "DLT_PPP_SERIAL",
@ -570,24 +570,24 @@ def test_list_ports_dynamips(project, compute):
"link_type": "serial"
},
{
"name": "Serial0/18",
"short_name": "s0/18",
"name": "Serial0/2",
"short_name": "s0/2",
"data_link_types": {
"Cisco HDLC": "DLT_C_HDLC",
"Cisco PPP": "DLT_PPP_SERIAL",
"Frame Relay": "DLT_FRELAY"},
"port_number": 18,
"port_number": 32,
"adapter_number": 0,
"link_type": "serial"
},
{
"name": "Serial0/19",
"short_name": "s0/19",
"name": "Serial0/3",
"short_name": "s0/3",
"data_link_types": {
"Cisco HDLC": "DLT_C_HDLC",
"Cisco PPP": "DLT_PPP_SERIAL",
"Frame Relay": "DLT_FRELAY"},
"port_number": 19,
"port_number": 33,
"adapter_number": 0,
"link_type": "serial"
}

View File

@ -0,0 +1,77 @@
{
"auto_start": false,
"name": "1_3_dynamips",
"project_id": "ba5790e1-2f51-443e-a3cc-1a2eee132888",
"revision": 6,
"topology": {
"computes": [
{
"compute_id": "local",
"host": "127.0.0.1",
"name": "Local",
"port": 8000,
"protocol": "http"
}
],
"drawings": [],
"links": [],
"nodes": [
{
"symbol": ":/symbols/iosv_virl.svg",
"compute_id": "local",
"console": 2001,
"console_type": "telnet",
"label": {
"rotation": 0,
"style": "font-family: TypeWriter;font-size: 10;font-weight: bold;fill: #000000;fill-opacity: 1.0;",
"text": "R1",
"x": 22,
"y": -25
},
"name": "R1",
"node_id": "0bce6ad5-c688-4d4d-a425-f21aaf3927e2",
"node_type": "dynamips",
"port_name_format": "Ethernet{0}",
"port_segment_size": 0,
"first_port_name": null,
"properties": {
"dynamips_id": 1,
"auto_delete_disks": true,
"clock_divisor": 4,
"disk0": 0,
"disk1": 0,
"exec_area": 64,
"idlemax": 500,
"idlesleep": 30,
"image": "c7200-adventerprisek9-mz.124-24.T8.image",
"mac_addr": "ca01.2f39.0000",
"midplane": "vxr",
"mmap": true,
"npe": "npe-400",
"nvram": 512,
"platform": "c7200",
"power_supplies": [
1,
1
],
"ram": 512,
"sensors": [
22,
22,
22,
22
],
"slot0": "C7200-IO-FE",
"sparsemem": true,
"startup_config": "configs/i1_startup-config.cfg",
"system_id": "FTX0945W0MY"
},
"x": -112,
"y": -100,
"z": 1
}
]
},
"type": "topology",
"version": "ANYSTR"
}

View File

@ -0,0 +1,80 @@
{
"auto_start": false,
"name": "1_3_dynamips",
"project_id": "ba5790e1-2f51-443e-a3cc-1a2eee132888",
"revision": 3,
"topology": {
"nodes": [
{
"default_symbol": ":/symbols/iosv_virl.normal.svg",
"description": "Router c7200",
"dynamips_id": 1,
"hover_symbol": ":/symbols/iosv_virl.selected.svg",
"id": 1,
"label": {
"color": "#000000",
"font": "TypeWriter,10,-1,5,75,0,0,0,0,0",
"text": "R1",
"x": 22.6171875,
"y": -25.0
},
"ports": [
{
"adapter_number": 0,
"id": 1,
"name": "FastEthernet0/0",
"port_number": 0
}
],
"properties": {
"auto_delete_disks": true,
"clock_divisor": 4,
"console": 2001,
"disk0": 0,
"disk1": 0,
"exec_area": 64,
"idlemax": 500,
"idlesleep": 30,
"image": "c7200-adventerprisek9-mz.124-24.T8.image",
"mac_addr": "ca01.2f39.0000",
"midplane": "vxr",
"mmap": true,
"name": "R1",
"npe": "npe-400",
"nvram": 512,
"platform": "c7200",
"power_supplies": [
1,
1
],
"ram": 512,
"sensors": [
22,
22,
22,
22
],
"slot0": "C7200-IO-FE",
"sparsemem": true,
"startup_config": "configs/i1_startup-config.cfg",
"system_id": "FTX0945W0MY"
},
"server_id": 1,
"vm_id": "0bce6ad5-c688-4d4d-a425-f21aaf3927e2",
"x": -112.0,
"y": -100.0
}
],
"servers": [
{
"cloud": false,
"host": "127.0.0.1",
"id": 1,
"local": true,
"port": 8000
}
]
},
"type": "topology",
"version": "1.3.13"
}

View File

@ -16,6 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import io
import types
import asyncio
import unittest.mock
@ -69,6 +70,10 @@ class AsyncioMagicMock(unittest.mock.MagicMock):
"""
Magic mock returning coroutine
"""
try:
__class__ = types.CoroutineType
except AttributeError: # Not supported with Python 3.4
__class__ = types.GeneratorType
def __init__(self, return_value=None, return_values=None, **kwargs):
"""