mirror of
https://github.com/GNS3/gns3-server.git
synced 2025-06-25 01:59:14 +00:00
Compare commits
327 Commits
Author | SHA1 | Date | |
---|---|---|---|
19c4ec1867 | |||
8019374ed0 | |||
af530be346 | |||
9c3cfc4f4e | |||
c7d878ed9e | |||
49f1ee2e32 | |||
bd4de862c8 | |||
f038735595 | |||
a4f8675c93 | |||
da71f29208 | |||
b53b34d485 | |||
e63da227d0 | |||
c7d9af121f | |||
15babb137d | |||
eccee6b629 | |||
ef95ba1ed8 | |||
2bbdbeaa82 | |||
de2dad20d5 | |||
84c0a17572 | |||
f0edf799b7 | |||
a7be4681d5 | |||
07b982d4db | |||
da1cd9a3e7 | |||
0eafb6f06c | |||
042a69eecf | |||
1885fe62a6 | |||
e481ffa94c | |||
937bbf0131 | |||
d58a6ccda9 | |||
84fb108abb | |||
4455499e00 | |||
763f258465 | |||
d447a04c6a | |||
f358cb45a2 | |||
6b8e93f847 | |||
db95cb5c46 | |||
d6f63d3b7d | |||
7d90a73ed2 | |||
b1b2bbd581 | |||
da7074ea74 | |||
44307b43b9 | |||
febf0f7839 | |||
26d49f19c1 | |||
e1a7efad60 | |||
08956e438c | |||
e175650fb0 | |||
5f88db93ff | |||
e0a2553be4 | |||
b905760635 | |||
f9bc745ddb | |||
ead8a6caa2 | |||
5fab48ba75 | |||
2739483751 | |||
1f0fe6d304 | |||
5898b86dfc | |||
fa769cd41d | |||
379c216109 | |||
7422d06c1d | |||
5daff17911 | |||
8296621320 | |||
42ff398f05 | |||
d1de665939 | |||
f151181409 | |||
07395c9cf2 | |||
ae076c7ca9 | |||
6982e118c0 | |||
38a41a1cfd | |||
8fbfae6a98 | |||
5ca9becdf9 | |||
bb0db7846f | |||
e82862c69f | |||
d98a7aac19 | |||
43a3584a5c | |||
049c348709 | |||
343f2b574d | |||
016c3e515b | |||
4f03c3011e | |||
e183e7da27 | |||
313226786f | |||
df7d26e418 | |||
c829a250a9 | |||
775ee4259e | |||
e335015520 | |||
469187c584 | |||
ad7ecb8850 | |||
9b99a03cbf | |||
3aed651062 | |||
435dc82873 | |||
ae687346f1 | |||
d3436756b2 | |||
6f7b06e66f | |||
009ffcb4ef | |||
3f61fbeb0e | |||
e65648398b | |||
94a709cb42 | |||
ea7754f1c8 | |||
914fe7e750 | |||
8e3e3c08f8 | |||
e497e98ca1 | |||
3fd0a6d638 | |||
851ba074e7 | |||
cdd54b951a | |||
1ddb16eca0 | |||
7bc0570735 | |||
1ae17b74df | |||
cb6df28f59 | |||
3e89543ab9 | |||
a2ebbaa322 | |||
67e346ba92 | |||
acdc1e5ebb | |||
2bb062c61e | |||
a741662636 | |||
871fea33e0 | |||
8cb5cedb5d | |||
bc33683e47 | |||
fa140e991c | |||
f4b79af45f | |||
a1bf9cdfd3 | |||
f414ce459a | |||
366e9620dc | |||
ab13f628f7 | |||
e108a9e136 | |||
9e2043bfa8 | |||
28f7c2a1cd | |||
c7d58eb3b3 | |||
41f02dad54 | |||
76cc1d2750 | |||
2cdfd6c0d7 | |||
57cf57b408 | |||
16b5bb8810 | |||
56c153af79 | |||
9beca700a5 | |||
8e429ee4c1 | |||
f3095d94c1 | |||
5fd385159c | |||
dcbaa62df8 | |||
e9fb7f4981 | |||
d952718f30 | |||
978ec1b5be | |||
d5a7561bc4 | |||
c17e00204f | |||
936faaba5a | |||
5c1522f24a | |||
b92e64e507 | |||
848120c354 | |||
8a19afd618 | |||
815ef6066d | |||
85f571ee8b | |||
cf1b7ebe1f | |||
4fc5364ab5 | |||
4a91d8a6a5 | |||
b408f29726 | |||
1d08d4a5fa | |||
6d6e8196d2 | |||
75196b8a55 | |||
7ffdd0fab1 | |||
637a7326ec | |||
4afbf816ab | |||
40aec61701 | |||
7f77c66888 | |||
b7a859fa30 | |||
cd393491d5 | |||
f6d0971f15 | |||
ae5468a2d3 | |||
ec6e5a6118 | |||
f1737a6e43 | |||
e3b95fe9ce | |||
2109fd4f4d | |||
dff74352f7 | |||
977ccabf98 | |||
ad6fb664a2 | |||
6bc54b17c7 | |||
bb63f51f80 | |||
b335daf857 | |||
b93c85c390 | |||
cb197627b4 | |||
9b9eddb30c | |||
e564c84529 | |||
5531a576d1 | |||
a5b5f6c811 | |||
9ed15e55af | |||
f8ffd078a8 | |||
2651a48102 | |||
57394dfebf | |||
7422b31b2c | |||
06b9e46cd2 | |||
dc236eba09 | |||
36dbcfa7f6 | |||
6eca621b19 | |||
a046a4c980 | |||
34459f6a88 | |||
e097a0e728 | |||
7b25ce04e8 | |||
3b04f556b3 | |||
bd71f0cf4c | |||
879591eaf5 | |||
f39af9deb7 | |||
ec044f9210 | |||
498d006225 | |||
c1193c23c1 | |||
ba006f105f | |||
fa58b1f81c | |||
e167a81080 | |||
9f05b80d01 | |||
58a360e535 | |||
d243cb7d02 | |||
c563dbba8a | |||
5345fe8490 | |||
2a20333877 | |||
2e455037e1 | |||
23c48f5e3d | |||
e6ddce86b3 | |||
268c61ce80 | |||
d2c32bb570 | |||
3fe8016938 | |||
01dbaea2e4 | |||
427cb05c2a | |||
d36a9f3403 | |||
39c41336db | |||
fa22e19321 | |||
d472f47829 | |||
dcc4ddf11b | |||
e35ab1e8c0 | |||
975e1c8fa7 | |||
19b46a4014 | |||
7cdf23f497 | |||
06da40cdcd | |||
174e7cccea | |||
75212cda3f | |||
a018c5b0f5 | |||
93e2d4096d | |||
c077f3b990 | |||
60e9241857 | |||
ee4b094160 | |||
1dd437ecdc | |||
84eb8356e8 | |||
c833a20a8c | |||
eae6f33e29 | |||
59c1e125d3 | |||
7469f65fa0 | |||
392fa187c2 | |||
dab1b26569 | |||
03ffce0a75 | |||
9d28f4c0c3 | |||
04b02171bb | |||
e91e3525c4 | |||
c333e9451f | |||
49f1931e95 | |||
ebb8c0d0b0 | |||
2d42f32d71 | |||
fd18458588 | |||
5f15667ad1 | |||
bbdbc971c0 | |||
057b637961 | |||
87eef545e1 | |||
38815b3eaf | |||
4c4613b29b | |||
9d8cb4521d | |||
9dd09ccde1 | |||
86ebb9b840 | |||
569da0ce3a | |||
f73b288a28 | |||
e34c266ee1 | |||
80e15c86dc | |||
f35c742b07 | |||
f4f9e6eba6 | |||
90109e04aa | |||
c36de3393d | |||
976154fd0b | |||
7aedfc92fa | |||
9b1d513b03 | |||
836023a1bc | |||
93020a940f | |||
c2b78400f2 | |||
1123047404 | |||
01e2fcf225 | |||
85b9620953 | |||
1532b3ed9b | |||
a581eeba54 | |||
9b0088728f | |||
dea68bcb28 | |||
1fb4d191c9 | |||
167a0b8435 | |||
5842487cd0 | |||
1995adf838 | |||
5a8408cdb9 | |||
a7ec224b6d | |||
89e86b7778 | |||
56658756e2 | |||
942d07c8ce | |||
f6fb0623be | |||
3fc64bced2 | |||
373113545f | |||
5bee927481 | |||
4f61443b20 | |||
4673424da7 | |||
40261ec99c | |||
3e4423f663 | |||
88e8c36d1c | |||
537122daba | |||
fd22cd8361 | |||
71c3bda0a5 | |||
53d60bc71a | |||
4a3322b822 | |||
98537a242a | |||
03414993dc | |||
179372936e | |||
5deb584a03 | |||
c280fd5fb8 | |||
5db1f6b2f7 | |||
f92ac6e52e | |||
758fb2a0e7 | |||
5a28b9409a | |||
1f756c0d4f | |||
439a0c80d6 | |||
fd0fb97204 | |||
884bfa4724 | |||
794a7e880a | |||
de0df70933 | |||
0f868ef107 | |||
061b459abe | |||
8cf55166cb | |||
8de90d8236 | |||
7e196cabc1 | |||
16b4d60151 | |||
33edbefa3b | |||
6a808927d2 |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
gns3server/version.py merge=ours
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -26,6 +26,7 @@ pip-log.txt
|
|||||||
|
|
||||||
# Unit test / coverage reports
|
# Unit test / coverage reports
|
||||||
.coverage
|
.coverage
|
||||||
|
.coverage*
|
||||||
.tox
|
.tox
|
||||||
.cache
|
.cache
|
||||||
nosetests.xml
|
nosetests.xml
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
language: python
|
language: python
|
||||||
python:
|
python:
|
||||||
- '3.4'
|
|
||||||
- '3.5'
|
- '3.5'
|
||||||
sudo: false
|
sudo: false
|
||||||
cache: pip
|
cache: pip
|
||||||
|
184
CHANGELOG
184
CHANGELOG
@ -1,5 +1,157 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## 1.5.3 12/01/2016
|
||||||
|
|
||||||
|
* Fix sporadically systemd is unable to start gns3-server
|
||||||
|
|
||||||
|
## 1.5.3 rc1 20/12/2016
|
||||||
|
|
||||||
|
* Support aiohttp 1.2 (but not compatible with previous versions)
|
||||||
|
* Explain that segfault on IOU is a issue with the image
|
||||||
|
* Fix an issue with finding vmrun and vboxmanage
|
||||||
|
* Support named remote servers for VPCS
|
||||||
|
* When checking for a free port check if the host and 0.0.0.0 are available
|
||||||
|
* smm=off is only for 64bits
|
||||||
|
* Fix set hostname on remote server
|
||||||
|
* Fix sending smm option to qemu
|
||||||
|
* Workaround a bug with KVM, Qemu >= 2.4 and Intel CPU
|
||||||
|
* Renable sleep at Vbox exit bug seem to be back
|
||||||
|
* Support large project (> 2GB) during export
|
||||||
|
* Fix Deleting running telnet docker VM shows error in log
|
||||||
|
* Fix when closing a container using VNC, root permission are not reset
|
||||||
|
* Use $PATH also for dynamips and cleanup some $PATH usages
|
||||||
|
* Fix a lock issue with some virtualbox vm
|
||||||
|
* Raise proper error when you try to load an empty qcow2 file
|
||||||
|
* Fix upload form crash
|
||||||
|
* Search bin from the $PATH for sample configuration file
|
||||||
|
* Updated systemd unit file and added sample configuration file
|
||||||
|
|
||||||
|
## 1.5.2 18/08/2016
|
||||||
|
|
||||||
|
* Move utils.vmnet to gns3 namespace
|
||||||
|
* Fix Exporting portable projects with QEMU includes base images even when selecting no.
|
||||||
|
* Catch error when md5sum file is corrupted
|
||||||
|
* requirements.txt : added support for newer aiohttp version
|
||||||
|
* Improve compaction of .gns3project
|
||||||
|
* Fix crash when winpcap is not installed
|
||||||
|
|
||||||
|
## 1.5.1 07/07/2016
|
||||||
|
|
||||||
|
* Increase the number of interface for docker
|
||||||
|
* Add the method in the bad request answer
|
||||||
|
* Fix a rare crash in IOU
|
||||||
|
* Fix a crash when docker is used but not installed
|
||||||
|
* Backport Docker node hot linking
|
||||||
|
* Allows hot-linking for Docker containers. Ref #267.
|
||||||
|
|
||||||
|
## 1.5.0 27/06/2016
|
||||||
|
|
||||||
|
* Fix import of project with no disk
|
||||||
|
* Allow for (a lot) more docker container ports. Fixes #593.
|
||||||
|
* Raise an error if you try to use Docker on non Linux host
|
||||||
|
* Fix a crash in Docker if daemon stop to respond
|
||||||
|
* Fix a crash if Dynamips router has no initial configuration
|
||||||
|
* Kill ghosts process at startup (Dynamips, VPCS, Ubridge)
|
||||||
|
|
||||||
|
## 1.5.0rc2 15/06/2016
|
||||||
|
|
||||||
|
* Fix black screen with Qt app in Docker container
|
||||||
|
* Detect when command in the container exit
|
||||||
|
* Docker when the aux console exit and restart it
|
||||||
|
* Pass by default the environment variable container=docker
|
||||||
|
* Fix busybox binary location
|
||||||
|
* Avoid loosing console port for Docker
|
||||||
|
* Workaround a crash in x11vnc
|
||||||
|
* Delete volume when dropping the container
|
||||||
|
* Catch connection reset in ioucon
|
||||||
|
* Delete vlan.dat for L2IOL during config import. Fixes #1285.
|
||||||
|
* Copy original ressources from VOLUMES
|
||||||
|
|
||||||
|
## 1.5.0rc1 01/06/2016
|
||||||
|
|
||||||
|
* Save an restore docker permission
|
||||||
|
* Export the list of volumes to a env variable accessible in the container
|
||||||
|
* Fix a crash when docker start command is None
|
||||||
|
* Ubridge 0.9.4 is require
|
||||||
|
* Generate a MAC address using the project + node UUID. Ref #522.
|
||||||
|
* Catch extra args in windows signal handler
|
||||||
|
* Allow to block network traffic originating from the host OS for vmnet interfaces (Windows only).
|
||||||
|
* Fix an import error when you have no GNS3 VM
|
||||||
|
* Warn if you can not export a file due to permission issue
|
||||||
|
* Do not delete adapters when stopping a VMware VM. Ref #1066.
|
||||||
|
* Allocate a new vmnet interface if vmnet 0 1 or 8 is set to a custom adapter. Set adapter type to all adapters regardless if already configured or added by GNS3.
|
||||||
|
* Set default VMware VM adapter type to e1000.
|
||||||
|
|
||||||
|
## 1.5.0b1 23/05/2016
|
||||||
|
|
||||||
|
* Allow an IOS router to stop even the Dynamips hypervisor command fail to be sent. Ref #488.
|
||||||
|
* Extract private-config only when necessary (content is different than the default). Fixes #520.
|
||||||
|
* Fixes disabling the VPCS relay feature. Fixes #521.
|
||||||
|
* Fixes wrong exception in Docker VM implementation.
|
||||||
|
* Force Npcap DLL to be used first for Dynamips and uBridge (instead of the one from Winpcap if installed).
|
||||||
|
* Fixed startup-config is lost if you change any IOS router settings. Fixes #1233.
|
||||||
|
* Fixes check for NPF service and add check for NPCAP service on Windows.
|
||||||
|
* Fix ProcessLookupError X11VNC
|
||||||
|
* Force tag latest for docker image if no tag is specified
|
||||||
|
* Cleanup unbreakable space
|
||||||
|
* Do not raise error if vmrun.exe is named vmrun.EXE
|
||||||
|
* Load docker api only for Linux
|
||||||
|
|
||||||
|
## 1.5.0a2 10/05/2016
|
||||||
|
|
||||||
|
* Fix distribution on PyPi
|
||||||
|
|
||||||
|
## 1.5.0a1 10/05/2016
|
||||||
|
|
||||||
|
* Rebase Qcow2 disks when starting a VM if needed
|
||||||
|
* Docker support
|
||||||
|
* import / export portable projects (.gns3project)
|
||||||
|
|
||||||
|
## 1.4.6 28/04/2016
|
||||||
|
|
||||||
|
* More robust save/restore for VirtualBox linked clone VM hard disks.
|
||||||
|
* Prevent non linked cloned hard disks to be detached when using VirtualBox linked cloned VMs. Fixes #1184.
|
||||||
|
* Stricter checks to match VMware version to the right vmrun (VIX library) version. Also checks the VIX library version when only using the GNS3 VM running in VMware.
|
||||||
|
* Allow only .pcap to be downloaded from remote stream API
|
||||||
|
* Fix incrementation of qemu mac address
|
||||||
|
* Clear warnings about using linked clones with VMware Player.
|
||||||
|
* Alternative method to find the Documents folder on Windows.
|
||||||
|
* Add IOU support and install config in /etc
|
||||||
|
|
||||||
|
## 1.4.5 23/03/2016
|
||||||
|
|
||||||
|
* Stop the VMware VM if there is an error while setting up the network connections or console.
|
||||||
|
* Remote install on 14.04 ubuntu
|
||||||
|
* Include VMware VMs paths found preferences.ini
|
||||||
|
* Allow to stop a VMware VM from GNS3 even if halted within the VM. Fixes #1118.
|
||||||
|
* Keep Dynamips stdout log file in the project directory.
|
||||||
|
* Get MAC addresses for host interfaces to use for filtering frames from vmnet interfaces.
|
||||||
|
* Dynamips uuid hypervisor command is no longer supported.
|
||||||
|
* Restart NPF service after adding vmnet adapters on Windows.
|
||||||
|
* Support /etc/gns3/gns3_server.conf for the config
|
||||||
|
* Improve warning if fusion is not installed or in non standard location
|
||||||
|
|
||||||
|
## 1.4.4 23/02/2016
|
||||||
|
* Check if VMware Fusion is correctly installed when retrieving the VM list.
|
||||||
|
|
||||||
|
## 1.4.3 19/02/2016
|
||||||
|
* Nothing! (changes made in the GUI only).
|
||||||
|
|
||||||
|
## 1.4.2 17/02/2016
|
||||||
|
* Fix missing format in IOU export
|
||||||
|
* Fix number of arguments to the UDP errors on VBOX
|
||||||
|
* Add verification when UDP tunnel is created in a VirtualBox VM. Ref #899.
|
||||||
|
* Fixes VMware linked clone cleanup bug. Fixes #420.
|
||||||
|
* Removed docker support from 1.4 (drop unused code)
|
||||||
|
* Fix a crash if you create a file named IOS in the image dir
|
||||||
|
* Disallow creating project with " in the path
|
||||||
|
* Always look at the registry to find vmrun.exe on Windows.
|
||||||
|
* Check for VMware VIX library version. Fixes #413.
|
||||||
|
* Fixes VDE not working #345.
|
||||||
|
* Do not list qemu binary with -spice in the name
|
||||||
|
* Send command line used to start the VM to client
|
||||||
|
* Fix crash if you have a { in your user name
|
||||||
|
|
||||||
## 1.4.1 01/02/2016
|
## 1.4.1 01/02/2016
|
||||||
* VMware raise error if version is not found
|
* VMware raise error if version is not found
|
||||||
* For topologies before 1.4 manage qemu missing
|
* For topologies before 1.4 manage qemu missing
|
||||||
@ -12,7 +164,7 @@
|
|||||||
* Fix error when setting Qemu VM boot to 'cd' (HDD or CD/DVD-ROM)
|
* Fix error when setting Qemu VM boot to 'cd' (HDD or CD/DVD-ROM)
|
||||||
* Fixed the VMware default VM location on Windows, so that it doesn't assume the "Documents" folder is within the %USERPROFILE% folder, and also support Windows Server's folder (which is "My Virtual Machines" instead of "Virtual Machines").
|
* Fixed the VMware default VM location on Windows, so that it doesn't assume the "Documents" folder is within the %USERPROFILE% folder, and also support Windows Server's folder (which is "My Virtual Machines" instead of "Virtual Machines").
|
||||||
* Improve dynamips startup_config dump
|
* Improve dynamips startup_config dump
|
||||||
* Dump environnement to server debug log
|
* Dump environment to server debug log
|
||||||
* Fix usage of qemu 0.10 on Windows
|
* Fix usage of qemu 0.10 on Windows
|
||||||
* Show hostname when the hostname is missing in the iourc.txt
|
* Show hostname when the hostname is missing in the iourc.txt
|
||||||
|
|
||||||
@ -64,7 +216,7 @@
|
|||||||
* Support VM usage for qemu
|
* Support VM usage for qemu
|
||||||
* Raise an error if psutil version is invalid
|
* Raise an error if psutil version is invalid
|
||||||
|
|
||||||
## 1.4.0rc1 12/11/2015
|
## 1.4.0rc1 12/11/2015
|
||||||
|
|
||||||
* Raise error if server received windows path
|
* Raise error if server received windows path
|
||||||
* Update sentry key
|
* Update sentry key
|
||||||
@ -204,7 +356,7 @@
|
|||||||
* Send an error when vmware executable cannot be found on Linux. Fixes #288.
|
* Send an error when vmware executable cannot be found on Linux. Fixes #288.
|
||||||
* Support for CPUs setting for Qemu VMs.
|
* Support for CPUs setting for Qemu VMs.
|
||||||
|
|
||||||
## 1.4.0alpha4 04/08/2015
|
## 1.4.0alpha4 04/08/2015
|
||||||
|
|
||||||
* Quote command in qemu debug logs so you can copy/paste them
|
* Quote command in qemu debug logs so you can copy/paste them
|
||||||
* Support for Qemu disk interfaces, cd/dvd-rom image and boot priority. Fixes #278.
|
* Support for Qemu disk interfaces, cd/dvd-rom image and boot priority. Fixes #278.
|
||||||
@ -212,11 +364,11 @@
|
|||||||
* Catch GeneratorExit exception when trying to create a Ghost IOS image.
|
* Catch GeneratorExit exception when trying to create a Ghost IOS image.
|
||||||
* Backport: removes code that deletes IOS router instance files.
|
* Backport: removes code that deletes IOS router instance files.
|
||||||
|
|
||||||
## 1.3.9 03/08/2015
|
## 1.3.9 03/08/2015
|
||||||
|
|
||||||
* Backport: removes code that deletes IOS router instance files.
|
* Backport: removes code that deletes IOS router instance files.
|
||||||
|
|
||||||
## 1.4.0alpha3 28/07/2015
|
## 1.4.0alpha3 28/07/2015
|
||||||
|
|
||||||
* Raise error if qemu image already exist when creating disk
|
* Raise error if qemu image already exist when creating disk
|
||||||
* Prevent user to create a qemu to a different directory on non local server
|
* Prevent user to create a qemu to a different directory on non local server
|
||||||
@ -236,7 +388,7 @@
|
|||||||
* Update documentation
|
* Update documentation
|
||||||
* API for listing current projects
|
* API for listing current projects
|
||||||
|
|
||||||
## 1.3.8 27/07/2015
|
## 1.3.8 27/07/2015
|
||||||
|
|
||||||
* Catch ProcessLookupError when updating iouyap config. Fixes #255.
|
* Catch ProcessLookupError when updating iouyap config. Fixes #255.
|
||||||
* Fixes IOS adapters and WICS cannot be removed. Fixes #282.
|
* Fixes IOS adapters and WICS cannot be removed. Fixes #282.
|
||||||
@ -253,7 +405,7 @@
|
|||||||
* Backport from 1.4: Fixes RuntimeError: Event loop is closed.
|
* Backport from 1.4: Fixes RuntimeError: Event loop is closed.
|
||||||
* Backport from 1.4: Bind host on 0.0.0.0 when checking for a free UDP port.
|
* Backport from 1.4: Bind host on 0.0.0.0 when checking for a free UDP port.
|
||||||
|
|
||||||
## 1.4.0alpha2 22/07/2015
|
## 1.4.0alpha2 22/07/2015
|
||||||
|
|
||||||
* Deactivate uBridge process monitoring (process returns 1 on Windows when stopping).
|
* Deactivate uBridge process monitoring (process returns 1 on Windows when stopping).
|
||||||
* Prevent using different hypervisors that leverage hardware virtualization. - Implemented for Qemu when a VMware or VirtualBox VM with hardware virtualization is already running. - Implemented for VirtualBox only when a Qemu VM with KVM is already running.
|
* Prevent using different hypervisors that leverage hardware virtualization. - Implemented for Qemu when a VMware or VirtualBox VM with hardware virtualization is already running. - Implemented for VirtualBox only when a Qemu VM with KVM is already running.
|
||||||
@ -308,15 +460,15 @@
|
|||||||
* A notification stream with process monitoring
|
* A notification stream with process monitoring
|
||||||
* VMware support
|
* VMware support
|
||||||
|
|
||||||
## 1.3.7 22/06/2015
|
## 1.3.7 22/06/2015
|
||||||
|
|
||||||
* Prevent install on Python 2
|
* Prevent install on Python 2
|
||||||
|
|
||||||
## 1.3.6 16/06/2015
|
## 1.3.6 16/06/2015
|
||||||
|
|
||||||
* Fix an issue with 1.4dev compatibility
|
* Fix an issue with 1.4dev compatibility
|
||||||
|
|
||||||
## 1.3.5 16/06/15
|
## 1.3.5 16/06/15
|
||||||
|
|
||||||
* Ignore invalid characters when reading the output of a process
|
* Ignore invalid characters when reading the output of a process
|
||||||
* Turn on / off authentication
|
* Turn on / off authentication
|
||||||
@ -476,7 +628,7 @@
|
|||||||
* Initialize chassis when creating an IOS router. Fixes #107.
|
* Initialize chassis when creating an IOS router. Fixes #107.
|
||||||
* Lock the dynamips reader an writer
|
* Lock the dynamips reader an writer
|
||||||
|
|
||||||
## 1.3.0rc1 19/03/2015
|
## 1.3.0rc1 19/03/2015
|
||||||
|
|
||||||
* Save IOS router config when saving the project
|
* Save IOS router config when saving the project
|
||||||
* Look in legacy IOU images directory
|
* Look in legacy IOU images directory
|
||||||
@ -485,7 +637,7 @@
|
|||||||
* Support all QEMU status
|
* Support all QEMU status
|
||||||
* Bind tunnel UDP to the correct source index
|
* Bind tunnel UDP to the correct source index
|
||||||
|
|
||||||
## 1.3.0beta2 13/03/2015
|
## 1.3.0beta2 13/03/2015
|
||||||
|
|
||||||
* Fixed issue when VBoxManage returns an error.
|
* Fixed issue when VBoxManage returns an error.
|
||||||
* Server handler to shutdown a local server.
|
* Server handler to shutdown a local server.
|
||||||
@ -495,7 +647,7 @@
|
|||||||
* Alternative local server shutdown (intended for Windows).
|
* Alternative local server shutdown (intended for Windows).
|
||||||
* Request user permission to kill the local server if it cannot be stopped.
|
* Request user permission to kill the local server if it cannot be stopped.
|
||||||
|
|
||||||
## 1.3.0beta1 11/03/2015
|
## 1.3.0beta1 11/03/2015
|
||||||
|
|
||||||
* Optional IOU license key check.
|
* Optional IOU license key check.
|
||||||
* Relative path support of IOU, IOS and Qemu images.
|
* Relative path support of IOU, IOS and Qemu images.
|
||||||
@ -518,7 +670,7 @@
|
|||||||
* Fixed Telnet server initialization issue in VirtualBox.
|
* Fixed Telnet server initialization issue in VirtualBox.
|
||||||
* Disconnect network cable if adapter is not attached in VirtualBox vNIC.
|
* Disconnect network cable if adapter is not attached in VirtualBox vNIC.
|
||||||
|
|
||||||
## 1.3.0alpha1 03/03/2015
|
## 1.3.0alpha1 03/03/2015
|
||||||
|
|
||||||
* HTTP Rest API instead of WebSocket
|
* HTTP Rest API instead of WebSocket
|
||||||
* API documentation
|
* API documentation
|
||||||
@ -533,7 +685,7 @@
|
|||||||
|
|
||||||
## 1.2.2 2015/01/16
|
## 1.2.2 2015/01/16
|
||||||
|
|
||||||
### Small improvements / new features
|
### Small improvements / new features
|
||||||
|
|
||||||
* Auxiliary console support for IOS routers.
|
* Auxiliary console support for IOS routers.
|
||||||
* Suspend / resume support for Qemu.
|
* Suspend / resume support for Qemu.
|
||||||
@ -565,7 +717,7 @@
|
|||||||
* VirtualBox linked clones support (experimental, still some problems with temporary projects).
|
* VirtualBox linked clones support (experimental, still some problems with temporary projects).
|
||||||
|
|
||||||
|
|
||||||
## 1.1 2014/10/23
|
## 1.1 2014/10/23
|
||||||
|
|
||||||
* Serial console for local VirtualBox.
|
* Serial console for local VirtualBox.
|
||||||
|
|
||||||
|
@ -18,13 +18,17 @@ it on https://github.com/GNS3/gns3-gui we will take care of the triage.
|
|||||||
|
|
||||||
For bugs specific to the GNS3 VM, please report on https://github.com/GNS3/gns3-vm
|
For bugs specific to the GNS3 VM, please report on https://github.com/GNS3/gns3-vm
|
||||||
|
|
||||||
## Asking for new features
|
## Security issues
|
||||||
|
|
||||||
|
For security issues please keep it private and send an email to developers@gns3.net
|
||||||
|
|
||||||
|
## Asking for new features
|
||||||
|
|
||||||
The best is to start a discussion on the community website in order to get feedback
|
The best is to start a discussion on the community website in order to get feedback
|
||||||
from the whole community.
|
from the whole community.
|
||||||
|
|
||||||
|
|
||||||
## Contributing code
|
## Contributing code
|
||||||
|
|
||||||
We welcome code contribution from everyone including beginners.
|
We welcome code contribution from everyone including beginners.
|
||||||
Don't be afraid to submit a half finished or mediocre contribution and we will help you.
|
Don't be afraid to submit a half finished or mediocre contribution and we will help you.
|
||||||
@ -45,6 +49,6 @@ The reason we do this is to ensure, to the extent possible, that we don’t “t
|
|||||||
|
|
||||||
More information there: https://github.com/GNS3/cla
|
More information there: https://github.com/GNS3/cla
|
||||||
|
|
||||||
### Pull requests
|
### Pull requests
|
||||||
|
|
||||||
Creating a pull request is the easiest way to contribute code. Do not hesitate to create one early when contributing for new feature in order to get our feedback.
|
Creating a pull request is the easiest way to contribute code. Do not hesitate to create one early when contributing for new feature in order to get our feedback.
|
||||||
|
@ -4,6 +4,7 @@ include INSTALL
|
|||||||
include LICENSE
|
include LICENSE
|
||||||
include MANIFEST.in
|
include MANIFEST.in
|
||||||
include tox.ini
|
include tox.ini
|
||||||
|
include requirements.txt
|
||||||
recursive-include tests *
|
recursive-include tests *
|
||||||
recursive-exclude docs *
|
recursive-exclude docs *
|
||||||
recursive-include gns3server *
|
recursive-include gns3server *
|
||||||
|
@ -205,4 +205,4 @@ If you want test coverage:
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
py.test --cov=gns3server
|
py.test --cov-report term-missing --cov=gns3server
|
||||||
|
61
conf/gns3_server.conf
Normal file
61
conf/gns3_server.conf
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
[Server]
|
||||||
|
; IP where the server listen for connections
|
||||||
|
host = 0.0.0.0
|
||||||
|
; HTTP port for controlling the servers
|
||||||
|
port = 3080
|
||||||
|
|
||||||
|
; Option to enable SSL encryption
|
||||||
|
ssl = False
|
||||||
|
certfile=/home/gns3/.config/GNS3/ssl/server.cert
|
||||||
|
certkey=/home/gns3/.config/GNS3/ssl/server.key
|
||||||
|
|
||||||
|
; Path where devices images are stored
|
||||||
|
images_path = /home/gns3/GNS3/images
|
||||||
|
; Path where user projects are stored
|
||||||
|
projects_path = /home/gns3/GNS3/projects
|
||||||
|
|
||||||
|
; Option to automatically send crash reports to the GNS3 team
|
||||||
|
report_errors = True
|
||||||
|
|
||||||
|
; First console port of the range allocated to devices
|
||||||
|
console_start_port_range = 5000
|
||||||
|
; Last console port of the range allocated to devices
|
||||||
|
console_end_port_range = 10000
|
||||||
|
; First port of the range allocated for inter-device communication. Two ports are allocated per link.
|
||||||
|
udp_start_port_range = 10000
|
||||||
|
; Last port of the range allocated for inter-device communication. Two ports are allocated per link
|
||||||
|
udp_start_end_range = 20000
|
||||||
|
; uBridge executable location, default: search in PATH
|
||||||
|
;ubridge_path = ubridge
|
||||||
|
|
||||||
|
; Option to enable HTTP authentication.
|
||||||
|
auth = False
|
||||||
|
; Username for HTTP authentication.
|
||||||
|
user = gns3
|
||||||
|
; Password for HTTP authentication.
|
||||||
|
password = gns3
|
||||||
|
|
||||||
|
[VPCS]
|
||||||
|
; VPCS executable location, default: search in PATH
|
||||||
|
;vpcs_path = vpcs
|
||||||
|
|
||||||
|
[Dynamips]
|
||||||
|
; Enable auxiliary console ports on IOS routers
|
||||||
|
allocate_aux_console_ports = False
|
||||||
|
mmap_support = True
|
||||||
|
; Dynamips executable path, default: search in PATH
|
||||||
|
;dynamips_path = dynamips
|
||||||
|
sparse_memory_support = True
|
||||||
|
ghost_ios_support = True
|
||||||
|
|
||||||
|
[IOU]
|
||||||
|
; iouyap executable path, default: search in PATH
|
||||||
|
;iouyap_path = iouyap
|
||||||
|
; Path of your .iourc file. If not provided, the file is searched in $HOME/.iourc
|
||||||
|
iourc_path = /home/gns3/.iourc
|
||||||
|
; Validate if the iourc license file is correct. If you turn this off and your licence is invalid IOU will not start and no errors will be shown.
|
||||||
|
license_check = True
|
||||||
|
|
||||||
|
[Qemu]
|
||||||
|
; !! Remember to add the gns3 user to the KVM group, otherwise you will not have read / write permssions to /dev/kvm !!
|
||||||
|
enable_kvm = True
|
@ -29,7 +29,7 @@ You can check the server version with a simple curl command:
|
|||||||
|
|
||||||
.. code-block:: shell-session
|
.. code-block:: shell-session
|
||||||
|
|
||||||
# curl "http://localhost:8000/v1/version"
|
# curl "http://localhost:3080/v1/version"
|
||||||
{
|
{
|
||||||
"version": "1.3.dev1"
|
"version": "1.3.dev1"
|
||||||
}
|
}
|
||||||
@ -39,7 +39,7 @@ The next step is to create a project.
|
|||||||
|
|
||||||
.. code-block:: shell-session
|
.. code-block:: shell-session
|
||||||
|
|
||||||
# curl -X POST "http://localhost:8000/v1/projects" -d '{"name": "test"}'
|
# curl -X POST "http://localhost:3080/v1/projects" -d '{"name": "test"}'
|
||||||
{
|
{
|
||||||
"project_id": "42f9feee-3217-4104-981e-85d5f0a806ec",
|
"project_id": "42f9feee-3217-4104-981e-85d5f0a806ec",
|
||||||
"temporary": false,
|
"temporary": false,
|
||||||
@ -50,7 +50,7 @@ With this project id we can now create two VPCS VM.
|
|||||||
|
|
||||||
.. code-block:: shell-session
|
.. code-block:: shell-session
|
||||||
|
|
||||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 1"}'
|
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 1"}'
|
||||||
{
|
{
|
||||||
"console": 2000,
|
"console": 2000,
|
||||||
"name": "VPCS 1",
|
"name": "VPCS 1",
|
||||||
@ -58,7 +58,7 @@ With this project id we can now create two VPCS VM.
|
|||||||
"vm_id": "24d2e16b-fbef-4259-ae34-7bc21a41ee28"
|
"vm_id": "24d2e16b-fbef-4259-ae34-7bc21a41ee28"
|
||||||
}%
|
}%
|
||||||
|
|
||||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 2"}'
|
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 2"}'
|
||||||
{
|
{
|
||||||
"console": 2001,
|
"console": 2001,
|
||||||
"name": "VPCS 2",
|
"name": "VPCS 2",
|
||||||
@ -70,12 +70,12 @@ two UDP ports.
|
|||||||
|
|
||||||
.. code-block:: shell-session
|
.. code-block:: shell-session
|
||||||
|
|
||||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
|
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
|
||||||
{
|
{
|
||||||
"udp_port": 10000
|
"udp_port": 10000
|
||||||
}
|
}
|
||||||
|
|
||||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
|
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
|
||||||
{
|
{
|
||||||
"udp_port": 10001
|
"udp_port": 10001
|
||||||
}
|
}
|
||||||
@ -86,7 +86,7 @@ communication is made by creating two UDP tunnels.
|
|||||||
|
|
||||||
.. code-block:: shell-session
|
.. code-block:: shell-session
|
||||||
|
|
||||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/adapters/0/ports/0/nio" -d '{"lport": 10000, "rhost": "127.0.0.1", "rport": 10001, "type": "nio_udp"}'
|
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/adapters/0/ports/0/nio" -d '{"lport": 10000, "rhost": "127.0.0.1", "rport": 10001, "type": "nio_udp"}'
|
||||||
{
|
{
|
||||||
"lport": 10000,
|
"lport": 10000,
|
||||||
"rhost": "127.0.0.1",
|
"rhost": "127.0.0.1",
|
||||||
@ -94,7 +94,7 @@ communication is made by creating two UDP tunnels.
|
|||||||
"type": "nio_udp"
|
"type": "nio_udp"
|
||||||
}
|
}
|
||||||
|
|
||||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/adapters/0/ports/0/nio" -d '{"lport": 10001, "rhost": "127.0.0.1", "rport": 10000, "type": "nio_udp"}'
|
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/adapters/0/ports/0/nio" -d '{"lport": 10001, "rhost": "127.0.0.1", "rport": 10000, "type": "nio_udp"}'
|
||||||
{
|
{
|
||||||
"lport": 10001,
|
"lport": 10001,
|
||||||
"rhost": "127.0.0.1",
|
"rhost": "127.0.0.1",
|
||||||
@ -106,15 +106,15 @@ Now we can start the two VM
|
|||||||
|
|
||||||
.. code-block:: shell-session
|
.. code-block:: shell-session
|
||||||
|
|
||||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/start" -d "{}"
|
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/start" -d "{}"
|
||||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/start" -d '{}'
|
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/start" -d '{}'
|
||||||
|
|
||||||
Everything should be started now. You can connect via telnet to the different VM.
|
Everything should be started now. You can connect via telnet to the different VM.
|
||||||
The port is the field console in the create VM request.
|
The port is the field console in the create VM request.
|
||||||
|
|
||||||
.. code-block:: shell-session
|
.. code-block:: shell-session
|
||||||
|
|
||||||
# telnet 127.0.0.1 2000
|
# telnet 127.0.0.1 2000
|
||||||
Trying 127.0.0.1...
|
Trying 127.0.0.1...
|
||||||
Connected to localhost.
|
Connected to localhost.
|
||||||
Escape character is '^]'.
|
Escape character is '^]'.
|
||||||
@ -140,7 +140,7 @@ The port is the field console in the create VM request.
|
|||||||
Good-bye
|
Good-bye
|
||||||
Connection closed by foreign host.
|
Connection closed by foreign host.
|
||||||
|
|
||||||
# telnet 127.0.0.1 2001
|
# telnet 127.0.0.1 2001
|
||||||
telnet 127.0.0.1 2001
|
telnet 127.0.0.1 2001
|
||||||
Trying 127.0.0.1...
|
Trying 127.0.0.1...
|
||||||
Connected to localhost.
|
Connected to localhost.
|
||||||
@ -190,7 +190,7 @@ complexity for the client due to the fact only some command on some VM can be
|
|||||||
concurrent.
|
concurrent.
|
||||||
|
|
||||||
|
|
||||||
Authentification
|
Authentication
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
In this version of the API you have no authentification system. If you
|
In this version of the API you have no authentification system. If you
|
||||||
|
@ -75,7 +75,7 @@ class Config(object):
|
|||||||
# 2: $HOME/.config/GNS3.conf
|
# 2: $HOME/.config/GNS3.conf
|
||||||
# 3: /etc/xdg/GNS3/gns3_server.conf
|
# 3: /etc/xdg/GNS3/gns3_server.conf
|
||||||
# 4: /etc/xdg/GNS3.conf
|
# 4: /etc/xdg/GNS3.conf
|
||||||
# 5: server.conf in the current working directory
|
# 5: gns3_server.conf in the current working directory
|
||||||
|
|
||||||
appname = "GNS3"
|
appname = "GNS3"
|
||||||
home = os.path.expanduser("~")
|
home = os.path.expanduser("~")
|
||||||
@ -84,6 +84,7 @@ class Config(object):
|
|||||||
self._files = [os.path.join(os.getcwd(), filename),
|
self._files = [os.path.join(os.getcwd(), filename),
|
||||||
os.path.join(home, ".config", appname, filename),
|
os.path.join(home, ".config", appname, filename),
|
||||||
os.path.join(home, ".config", appname + ".conf"),
|
os.path.join(home, ".config", appname + ".conf"),
|
||||||
|
os.path.join("/etc/gns3", filename),
|
||||||
os.path.join("/etc/xdg", appname, filename),
|
os.path.join("/etc/xdg", appname, filename),
|
||||||
os.path.join("/etc/xdg", appname + ".conf")]
|
os.path.join("/etc/xdg", appname + ".conf")]
|
||||||
|
|
||||||
|
41
gns3server/controller/__init__.py
Normal file
41
gns3server/controller/__init__.py
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
|
from ..config import Config
|
||||||
|
|
||||||
|
|
||||||
|
class Controller:
|
||||||
|
"""The controller manage multiple gns3 servers"""
|
||||||
|
|
||||||
|
def isEnabled(self):
|
||||||
|
"""
|
||||||
|
:returns: True if current instance is the controller
|
||||||
|
of our GNS3 infrastructure.
|
||||||
|
"""
|
||||||
|
return Config.instance().get_section_config("Server").getboolean("controller")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def instance():
|
||||||
|
"""
|
||||||
|
Singleton to return only on instance of Controller.
|
||||||
|
:returns: instance of Controller
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not hasattr(Controller, '_instance') or Controller._instance is None:
|
||||||
|
Controller._instance = Controller()
|
||||||
|
return Controller._instance
|
@ -36,7 +36,7 @@ import logging
|
|||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# Dev build
|
# Dev build
|
||||||
if __version_info__[3] != 0:
|
if __version_info__[3] != 0:
|
||||||
import faulthandler
|
import faulthandler
|
||||||
|
|
||||||
@ -52,7 +52,7 @@ class CrashReport:
|
|||||||
Report crash to a third party service
|
Report crash to a third party service
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DSN = "sync+https://119ddececccd43b69951ac87d4859870:2a982a50bbbb49ddb33c87ef3720026e@app.getsentry.com/38482"
|
DSN = "sync+https://700b0c46edb0473baacd2dc318d8de1f:824bd6d75471494ebcb87ce27cfdeade@sentry.io/38482"
|
||||||
if hasattr(sys, "frozen"):
|
if hasattr(sys, "frozen"):
|
||||||
cacert = get_resource("cacert.pem")
|
cacert = get_resource("cacert.pem")
|
||||||
if cacert is not None and os.path.isfile(cacert):
|
if cacert is not None and os.path.isfile(cacert):
|
||||||
|
@ -24,7 +24,6 @@ from gns3server.handlers.api.dynamips_device_handler import DynamipsDeviceHandle
|
|||||||
from gns3server.handlers.api.dynamips_vm_handler import DynamipsVMHandler
|
from gns3server.handlers.api.dynamips_vm_handler import DynamipsVMHandler
|
||||||
from gns3server.handlers.api.qemu_handler import QEMUHandler
|
from gns3server.handlers.api.qemu_handler import QEMUHandler
|
||||||
from gns3server.handlers.api.virtualbox_handler import VirtualBoxHandler
|
from gns3server.handlers.api.virtualbox_handler import VirtualBoxHandler
|
||||||
from gns3server.handlers.api.docker_handler import DockerHandler
|
|
||||||
from gns3server.handlers.api.vpcs_handler import VPCSHandler
|
from gns3server.handlers.api.vpcs_handler import VPCSHandler
|
||||||
from gns3server.handlers.api.vmware_handler import VMwareHandler
|
from gns3server.handlers.api.vmware_handler import VMwareHandler
|
||||||
from gns3server.handlers.api.config_handler import ConfigHandler
|
from gns3server.handlers.api.config_handler import ConfigHandler
|
||||||
@ -34,6 +33,8 @@ from gns3server.handlers.upload_handler import UploadHandler
|
|||||||
from gns3server.handlers.index_handler import IndexHandler
|
from gns3server.handlers.index_handler import IndexHandler
|
||||||
|
|
||||||
if sys.platform.startswith("linux") or hasattr(sys, "_called_from_test") or os.environ.get("PYTEST_BUILD_DOCUMENTATION") == "1":
|
if sys.platform.startswith("linux") or hasattr(sys, "_called_from_test") or os.environ.get("PYTEST_BUILD_DOCUMENTATION") == "1":
|
||||||
|
from gns3server.handlers.api.docker_handler import DockerHandler
|
||||||
|
|
||||||
# IOU runs only on Linux but testsuite work on UNIX platform
|
# IOU runs only on Linux but testsuite work on UNIX platform
|
||||||
if not sys.platform.startswith("win"):
|
if not sys.platform.startswith("win"):
|
||||||
from gns3server.handlers.api.iou_handler import IOUHandler
|
from gns3server.handlers.api.iou_handler import IOUHandler
|
||||||
|
@ -15,15 +15,19 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
from aiohttp.web import HTTPConflict
|
from aiohttp.web import HTTPConflict
|
||||||
|
|
||||||
from ...web.route import Route
|
from ...web.route import Route
|
||||||
from ...modules.docker import Docker
|
from ...modules.docker import Docker
|
||||||
|
|
||||||
from ...schemas.docker import (
|
from ...schemas.docker import (
|
||||||
DOCKER_CREATE_SCHEMA, DOCKER_UPDATE_SCHEMA, DOCKER_CAPTURE_SCHEMA,
|
DOCKER_CREATE_SCHEMA,
|
||||||
DOCKER_OBJECT_SCHEMA
|
DOCKER_OBJECT_SCHEMA,
|
||||||
|
DOCKER_UPDATE_SCHEMA,
|
||||||
|
DOCKER_LIST_IMAGES_SCHEMA
|
||||||
)
|
)
|
||||||
|
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||||
from ...schemas.nio import NIO_SCHEMA
|
from ...schemas.nio import NIO_SCHEMA
|
||||||
|
|
||||||
|
|
||||||
@ -36,6 +40,7 @@ class DockerHandler:
|
|||||||
status_codes={
|
status_codes={
|
||||||
200: "Success",
|
200: "Success",
|
||||||
},
|
},
|
||||||
|
output=DOCKER_LIST_IMAGES_SCHEMA,
|
||||||
description="Get all available Docker images")
|
description="Get all available Docker images")
|
||||||
def show(request, response):
|
def show(request, response):
|
||||||
docker_manager = Docker.instance()
|
docker_manager = Docker.instance()
|
||||||
@ -44,7 +49,7 @@ class DockerHandler:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/projects/{project_id}/docker/images",
|
r"/projects/{project_id}/docker/vms",
|
||||||
parameters={
|
parameters={
|
||||||
"project_id": "UUID for the project"
|
"project_id": "UUID for the project"
|
||||||
},
|
},
|
||||||
@ -61,11 +66,18 @@ class DockerHandler:
|
|||||||
container = yield from docker_manager.create_vm(
|
container = yield from docker_manager.create_vm(
|
||||||
request.json.pop("name"),
|
request.json.pop("name"),
|
||||||
request.match_info["project_id"],
|
request.match_info["project_id"],
|
||||||
request.json.get("id"),
|
request.json.get("vm_id"),
|
||||||
image=request.json.pop("imagename"),
|
image=request.json.pop("image"),
|
||||||
startcmd=request.json.get("startcmd")
|
start_command=request.json.get("start_command"),
|
||||||
|
environment=request.json.get("environment"),
|
||||||
|
adapters=request.json.get("adapters"),
|
||||||
|
console=request.json.get("console"),
|
||||||
|
console_type=request.json.get("console_type"),
|
||||||
|
console_resolution=request.json.get("console_resolution", "1024x768"),
|
||||||
|
console_http_port=request.json.get("console_http_port", 80),
|
||||||
|
console_http_path=request.json.get("console_http_path", "/"),
|
||||||
|
aux=request.json.get("aux")
|
||||||
)
|
)
|
||||||
# FIXME: DO WE NEED THIS?
|
|
||||||
for name, value in request.json.items():
|
for name, value in request.json.items():
|
||||||
if name != "_vm_id":
|
if name != "_vm_id":
|
||||||
if hasattr(container, name) and getattr(container, name) != value:
|
if hasattr(container, name) and getattr(container, name) != value:
|
||||||
@ -76,7 +88,7 @@ class DockerHandler:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/projects/{project_id}/docker/images/{id}/start",
|
r"/projects/{project_id}/docker/vms/{id}/start",
|
||||||
parameters={
|
parameters={
|
||||||
"project_id": "UUID of the project",
|
"project_id": "UUID of the project",
|
||||||
"id": "ID of the container"
|
"id": "ID of the container"
|
||||||
@ -91,7 +103,7 @@ class DockerHandler:
|
|||||||
output=DOCKER_OBJECT_SCHEMA)
|
output=DOCKER_OBJECT_SCHEMA)
|
||||||
def start(request, response):
|
def start(request, response):
|
||||||
docker_manager = Docker.instance()
|
docker_manager = Docker.instance()
|
||||||
container = docker_manager.get_container(
|
container = docker_manager.get_vm(
|
||||||
request.match_info["id"],
|
request.match_info["id"],
|
||||||
project_id=request.match_info["project_id"])
|
project_id=request.match_info["project_id"])
|
||||||
yield from container.start()
|
yield from container.start()
|
||||||
@ -99,7 +111,7 @@ class DockerHandler:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/projects/{project_id}/docker/images/{id}/stop",
|
r"/projects/{project_id}/docker/vms/{id}/stop",
|
||||||
parameters={
|
parameters={
|
||||||
"project_id": "UUID of the project",
|
"project_id": "UUID of the project",
|
||||||
"id": "ID of the container"
|
"id": "ID of the container"
|
||||||
@ -114,7 +126,7 @@ class DockerHandler:
|
|||||||
output=DOCKER_OBJECT_SCHEMA)
|
output=DOCKER_OBJECT_SCHEMA)
|
||||||
def stop(request, response):
|
def stop(request, response):
|
||||||
docker_manager = Docker.instance()
|
docker_manager = Docker.instance()
|
||||||
container = docker_manager.get_container(
|
container = docker_manager.get_vm(
|
||||||
request.match_info["id"],
|
request.match_info["id"],
|
||||||
project_id=request.match_info["project_id"])
|
project_id=request.match_info["project_id"])
|
||||||
yield from container.stop()
|
yield from container.stop()
|
||||||
@ -122,7 +134,7 @@ class DockerHandler:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/projects/{project_id}/docker/images/{id}/reload",
|
r"/projects/{project_id}/docker/vms/{id}/reload",
|
||||||
parameters={
|
parameters={
|
||||||
"project_id": "UUID of the project",
|
"project_id": "UUID of the project",
|
||||||
"id": "ID of the container"
|
"id": "ID of the container"
|
||||||
@ -137,7 +149,7 @@ class DockerHandler:
|
|||||||
output=DOCKER_OBJECT_SCHEMA)
|
output=DOCKER_OBJECT_SCHEMA)
|
||||||
def reload(request, response):
|
def reload(request, response):
|
||||||
docker_manager = Docker.instance()
|
docker_manager = Docker.instance()
|
||||||
container = docker_manager.get_container(
|
container = docker_manager.get_vm(
|
||||||
request.match_info["id"],
|
request.match_info["id"],
|
||||||
project_id=request.match_info["project_id"])
|
project_id=request.match_info["project_id"])
|
||||||
yield from container.restart()
|
yield from container.restart()
|
||||||
@ -145,7 +157,7 @@ class DockerHandler:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.delete(
|
@Route.delete(
|
||||||
r"/projects/{project_id}/docker/images/{id}",
|
r"/projects/{project_id}/docker/vms/{id}",
|
||||||
parameters={
|
parameters={
|
||||||
"id": "ID for the container",
|
"id": "ID for the container",
|
||||||
"project_id": "UUID for the project"
|
"project_id": "UUID for the project"
|
||||||
@ -158,15 +170,15 @@ class DockerHandler:
|
|||||||
description="Delete a Docker container")
|
description="Delete a Docker container")
|
||||||
def delete(request, response):
|
def delete(request, response):
|
||||||
docker_manager = Docker.instance()
|
docker_manager = Docker.instance()
|
||||||
container = docker_manager.get_container(
|
container = docker_manager.get_vm(
|
||||||
request.match_info["id"],
|
request.match_info["id"],
|
||||||
project_id=request.match_info["project_id"])
|
project_id=request.match_info["project_id"])
|
||||||
yield from container.remove()
|
yield from container.delete()
|
||||||
response.set_status(204)
|
response.set_status(204)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/projects/{project_id}/docker/images/{id}/suspend",
|
r"/projects/{project_id}/docker/vms/{id}/suspend",
|
||||||
parameters={
|
parameters={
|
||||||
"project_id": "UUID of the project",
|
"project_id": "UUID of the project",
|
||||||
"id": "ID of the container"
|
"id": "ID of the container"
|
||||||
@ -181,14 +193,14 @@ class DockerHandler:
|
|||||||
output=DOCKER_OBJECT_SCHEMA)
|
output=DOCKER_OBJECT_SCHEMA)
|
||||||
def suspend(request, response):
|
def suspend(request, response):
|
||||||
docker_manager = Docker.instance()
|
docker_manager = Docker.instance()
|
||||||
container = docker_manager.get_container(
|
container = docker_manager.get_vm(
|
||||||
request.match_info["id"],
|
request.match_info["id"],
|
||||||
project_id=request.match_info["project_id"])
|
project_id=request.match_info["project_id"])
|
||||||
yield from container.pause()
|
yield from container.pause()
|
||||||
response.set_status(204)
|
response.set_status(204)
|
||||||
|
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/projects/{project_id}/docker/images/{id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||||
parameters={
|
parameters={
|
||||||
"project_id": "UUID for the project",
|
"project_id": "UUID for the project",
|
||||||
"id": "ID of the container",
|
"id": "ID of the container",
|
||||||
@ -205,26 +217,20 @@ class DockerHandler:
|
|||||||
output=NIO_SCHEMA)
|
output=NIO_SCHEMA)
|
||||||
def create_nio(request, response):
|
def create_nio(request, response):
|
||||||
docker_manager = Docker.instance()
|
docker_manager = Docker.instance()
|
||||||
container = docker_manager.get_container(
|
container = docker_manager.get_vm(
|
||||||
request.match_info["id"],
|
request.match_info["vm_id"],
|
||||||
project_id=request.match_info["project_id"])
|
project_id=request.match_info["project_id"])
|
||||||
nio_type = request.json["type"]
|
nio_type = request.json["type"]
|
||||||
if nio_type not in ("nio_udp"):
|
if nio_type not in ("nio_udp", "nio_tap"):
|
||||||
raise HTTPConflict(
|
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||||
text="NIO of type {} is not supported".format(nio_type))
|
nio = docker_manager.create_nio(int(request.match_info["adapter_number"]), request.json)
|
||||||
nio = docker_manager.create_nio(
|
yield from container.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||||
int(request.match_info["adapter_number"]), request.json)
|
|
||||||
adapter = container._ethernet_adapters[
|
|
||||||
int(request.match_info["adapter_number"])
|
|
||||||
]
|
|
||||||
container.adapter_add_nio_binding(
|
|
||||||
int(request.match_info["adapter_number"]), nio)
|
|
||||||
response.set_status(201)
|
response.set_status(201)
|
||||||
response.json(nio)
|
response.json(nio)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.delete(
|
@Route.delete(
|
||||||
r"/projects/{project_id}/docker/images/{id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||||
parameters={
|
parameters={
|
||||||
"project_id": "UUID for the project",
|
"project_id": "UUID for the project",
|
||||||
"id": "ID of the container",
|
"id": "ID of the container",
|
||||||
@ -239,9 +245,97 @@ class DockerHandler:
|
|||||||
description="Remove a NIO from a Docker container")
|
description="Remove a NIO from a Docker container")
|
||||||
def delete_nio(request, response):
|
def delete_nio(request, response):
|
||||||
docker_manager = Docker.instance()
|
docker_manager = Docker.instance()
|
||||||
container = docker_manager.get_container(
|
container = docker_manager.get_vm(
|
||||||
request.match_info["id"],
|
request.match_info["vm_id"],
|
||||||
project_id=request.match_info["project_id"])
|
project_id=request.match_info["project_id"])
|
||||||
yield from container.adapter_remove_nio_binding(
|
yield from container.adapter_remove_nio_binding(
|
||||||
int(request.match_info["adapter_number"]))
|
int(request.match_info["adapter_number"]))
|
||||||
response.set_status(204)
|
response.set_status(204)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@Route.put(
|
||||||
|
r"/projects/{project_id}/docker/vms/{vm_id}",
|
||||||
|
parameters={
|
||||||
|
"project_id": "UUID for the project",
|
||||||
|
"vm_id": "UUID for the instance"
|
||||||
|
},
|
||||||
|
status_codes={
|
||||||
|
200: "Instance updated",
|
||||||
|
400: "Invalid request",
|
||||||
|
404: "Instance doesn't exist",
|
||||||
|
409: "Conflict"
|
||||||
|
},
|
||||||
|
description="Update a Docker instance",
|
||||||
|
input=DOCKER_UPDATE_SCHEMA,
|
||||||
|
output=DOCKER_OBJECT_SCHEMA)
|
||||||
|
def update(request, response):
|
||||||
|
|
||||||
|
docker_manager = Docker.instance()
|
||||||
|
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||||
|
vm.name = request.json.get("name", vm.name)
|
||||||
|
vm.console = request.json.get("console", vm.console)
|
||||||
|
vm.aux = request.json.get("aux", vm.aux)
|
||||||
|
vm.console_type = request.json.get("console_type", vm.console_type)
|
||||||
|
vm.console_resolution = request.json.get("console_resolution", vm.console_resolution)
|
||||||
|
vm.console_http_port = request.json.get("console_http_port", vm.console_http_port)
|
||||||
|
vm.console_http_path = request.json.get("console_http_path", vm.console_http_path)
|
||||||
|
vm.start_command = request.json.get("start_command", vm.start_command)
|
||||||
|
vm.environment = request.json.get("environment", vm.environment)
|
||||||
|
vm.adapters = request.json.get("adapters", vm.adapters)
|
||||||
|
yield from vm.update()
|
||||||
|
response.json(vm)
|
||||||
|
|
||||||
|
@Route.post(
|
||||||
|
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||||
|
parameters={
|
||||||
|
"project_id": "UUID for the project",
|
||||||
|
"vm_id": "UUID for the instance",
|
||||||
|
"adapter_number": "Adapter to start a packet capture",
|
||||||
|
"port_number": "Port on the adapter"
|
||||||
|
},
|
||||||
|
status_codes={
|
||||||
|
200: "Capture started",
|
||||||
|
400: "Invalid request",
|
||||||
|
404: "Instance doesn't exist",
|
||||||
|
409: "VM not started"
|
||||||
|
},
|
||||||
|
description="Start a packet capture on a IOU VM instance",
|
||||||
|
input=VM_CAPTURE_SCHEMA)
|
||||||
|
def start_capture(request, response):
|
||||||
|
|
||||||
|
docker_manager = Docker.instance()
|
||||||
|
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||||
|
adapter_number = int(request.match_info["adapter_number"])
|
||||||
|
pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"])
|
||||||
|
|
||||||
|
if not vm.is_running():
|
||||||
|
raise HTTPConflict(text="Cannot capture traffic on a non started VM")
|
||||||
|
yield from vm.start_capture(adapter_number, pcap_file_path)
|
||||||
|
response.json({"pcap_file_path": str(pcap_file_path)})
|
||||||
|
|
||||||
|
@Route.post(
|
||||||
|
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||||
|
parameters={
|
||||||
|
"project_id": "UUID for the project",
|
||||||
|
"vm_id": "UUID for the instance",
|
||||||
|
"adapter_number": "Adapter to stop a packet capture",
|
||||||
|
"port_number": "Port on the adapter (always 0)"
|
||||||
|
},
|
||||||
|
status_codes={
|
||||||
|
204: "Capture stopped",
|
||||||
|
400: "Invalid request",
|
||||||
|
404: "Instance doesn't exist",
|
||||||
|
409: "VM not started"
|
||||||
|
},
|
||||||
|
description="Stop a packet capture on a IOU VM instance")
|
||||||
|
def stop_capture(request, response):
|
||||||
|
|
||||||
|
docker_manager = Docker.instance()
|
||||||
|
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||||
|
|
||||||
|
if not vm.is_running():
|
||||||
|
raise HTTPConflict(text="Cannot capture traffic on a non started VM")
|
||||||
|
|
||||||
|
adapter_number = int(request.match_info["adapter_number"])
|
||||||
|
yield from vm.stop_capture(adapter_number)
|
||||||
|
response.set_status(204)
|
||||||
|
@ -20,9 +20,9 @@ import asyncio
|
|||||||
from ...web.route import Route
|
from ...web.route import Route
|
||||||
from ...schemas.dynamips_device import DEVICE_CREATE_SCHEMA
|
from ...schemas.dynamips_device import DEVICE_CREATE_SCHEMA
|
||||||
from ...schemas.dynamips_device import DEVICE_UPDATE_SCHEMA
|
from ...schemas.dynamips_device import DEVICE_UPDATE_SCHEMA
|
||||||
from ...schemas.dynamips_device import DEVICE_CAPTURE_SCHEMA
|
|
||||||
from ...schemas.dynamips_device import DEVICE_OBJECT_SCHEMA
|
from ...schemas.dynamips_device import DEVICE_OBJECT_SCHEMA
|
||||||
from ...schemas.dynamips_device import DEVICE_NIO_SCHEMA
|
from ...schemas.dynamips_device import DEVICE_NIO_SCHEMA
|
||||||
|
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||||
from ...modules.dynamips import Dynamips
|
from ...modules.dynamips import Dynamips
|
||||||
|
|
||||||
|
|
||||||
@ -198,7 +198,7 @@ class DynamipsDeviceHandler:
|
|||||||
404: "Instance doesn't exist"
|
404: "Instance doesn't exist"
|
||||||
},
|
},
|
||||||
description="Start a packet capture on a Dynamips device instance",
|
description="Start a packet capture on a Dynamips device instance",
|
||||||
input=DEVICE_CAPTURE_SCHEMA)
|
input=VM_CAPTURE_SCHEMA)
|
||||||
def start_capture(request, response):
|
def start_capture(request, response):
|
||||||
|
|
||||||
dynamips_manager = Dynamips.instance()
|
dynamips_manager = Dynamips.instance()
|
||||||
|
@ -24,9 +24,9 @@ from ...schemas.nio import NIO_SCHEMA
|
|||||||
from ...schemas.vm import VM_LIST_IMAGES_SCHEMA
|
from ...schemas.vm import VM_LIST_IMAGES_SCHEMA
|
||||||
from ...schemas.dynamips_vm import VM_CREATE_SCHEMA
|
from ...schemas.dynamips_vm import VM_CREATE_SCHEMA
|
||||||
from ...schemas.dynamips_vm import VM_UPDATE_SCHEMA
|
from ...schemas.dynamips_vm import VM_UPDATE_SCHEMA
|
||||||
from ...schemas.dynamips_vm import VM_CAPTURE_SCHEMA
|
|
||||||
from ...schemas.dynamips_vm import VM_OBJECT_SCHEMA
|
from ...schemas.dynamips_vm import VM_OBJECT_SCHEMA
|
||||||
from ...schemas.dynamips_vm import VM_CONFIGS_SCHEMA
|
from ...schemas.dynamips_vm import VM_CONFIGS_SCHEMA
|
||||||
|
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||||
from ...modules.dynamips import Dynamips
|
from ...modules.dynamips import Dynamips
|
||||||
from ...modules.dynamips.dynamips_error import DynamipsError
|
from ...modules.dynamips.dynamips_error import DynamipsError
|
||||||
from ...modules.project_manager import ProjectManager
|
from ...modules.project_manager import ProjectManager
|
||||||
|
@ -37,6 +37,9 @@ class FileHandler:
|
|||||||
def read(request, response):
|
def read(request, response):
|
||||||
response.enable_chunked_encoding()
|
response.enable_chunked_encoding()
|
||||||
|
|
||||||
|
if not request.json.get("location").endswith(".pcap"):
|
||||||
|
raise aiohttp.web.HTTPForbidden(text="Only .pcap file are allowed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(request.json.get("location"), "rb") as f:
|
with open(request.json.get("location"), "rb") as f:
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
|
@ -24,9 +24,9 @@ from ...schemas.iou import IOU_CREATE_SCHEMA
|
|||||||
from ...schemas.iou import IOU_START_SCHEMA
|
from ...schemas.iou import IOU_START_SCHEMA
|
||||||
from ...schemas.iou import IOU_UPDATE_SCHEMA
|
from ...schemas.iou import IOU_UPDATE_SCHEMA
|
||||||
from ...schemas.iou import IOU_OBJECT_SCHEMA
|
from ...schemas.iou import IOU_OBJECT_SCHEMA
|
||||||
from ...schemas.iou import IOU_CAPTURE_SCHEMA
|
|
||||||
from ...schemas.iou import IOU_CONFIGS_SCHEMA
|
from ...schemas.iou import IOU_CONFIGS_SCHEMA
|
||||||
from ...schemas.vm import VM_LIST_IMAGES_SCHEMA
|
from ...schemas.vm import VM_LIST_IMAGES_SCHEMA
|
||||||
|
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||||
from ...modules.iou import IOU
|
from ...modules.iou import IOU
|
||||||
|
|
||||||
|
|
||||||
@ -65,10 +65,6 @@ class IOUHandler:
|
|||||||
if name == "private_config_content" and (vm.private_config_content and len(vm.private_config_content) > 0):
|
if name == "private_config_content" and (vm.private_config_content and len(vm.private_config_content) > 0):
|
||||||
continue
|
continue
|
||||||
setattr(vm, name, value)
|
setattr(vm, name, value)
|
||||||
if "startup_config_content" in request.json:
|
|
||||||
vm.startup_config = request.json.get("startup_config_content")
|
|
||||||
if "private_config_content" in request.json:
|
|
||||||
vm.private_config = request.json.get("private_config_content")
|
|
||||||
response.set_status(201)
|
response.set_status(201)
|
||||||
response.json(vm)
|
response.json(vm)
|
||||||
|
|
||||||
@ -112,14 +108,9 @@ class IOUHandler:
|
|||||||
|
|
||||||
iou_manager = IOU.instance()
|
iou_manager = IOU.instance()
|
||||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||||
|
|
||||||
for name, value in request.json.items():
|
for name, value in request.json.items():
|
||||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||||
setattr(vm, name, value)
|
setattr(vm, name, value)
|
||||||
if "startup_config_content" in request.json:
|
|
||||||
vm.startup_config = request.json.get("startup_config_content")
|
|
||||||
if "private_config_content" in request.json:
|
|
||||||
vm.private_config = request.json.get("private_config_content")
|
|
||||||
response.json(vm)
|
response.json(vm)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -148,11 +139,12 @@ class IOUHandler:
|
|||||||
"vm_id": "UUID for the instance"
|
"vm_id": "UUID for the instance"
|
||||||
},
|
},
|
||||||
status_codes={
|
status_codes={
|
||||||
204: "Instance started",
|
200: "Instance started",
|
||||||
400: "Invalid request",
|
400: "Invalid request",
|
||||||
404: "Instance doesn't exist"
|
404: "Instance doesn't exist"
|
||||||
},
|
},
|
||||||
input=IOU_START_SCHEMA,
|
input=IOU_START_SCHEMA,
|
||||||
|
output=IOU_OBJECT_SCHEMA,
|
||||||
description="Start a IOU instance")
|
description="Start a IOU instance")
|
||||||
def start(request, response):
|
def start(request, response):
|
||||||
|
|
||||||
@ -166,7 +158,7 @@ class IOUHandler:
|
|||||||
print(vm.iourc_path)
|
print(vm.iourc_path)
|
||||||
|
|
||||||
yield from vm.start()
|
yield from vm.start()
|
||||||
response.set_status(204)
|
response.json(vm)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.post(
|
@Route.post(
|
||||||
@ -232,7 +224,7 @@ class IOUHandler:
|
|||||||
if nio_type not in ("nio_udp", "nio_tap", "nio_generic_ethernet"):
|
if nio_type not in ("nio_udp", "nio_tap", "nio_generic_ethernet"):
|
||||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||||
nio = iou_manager.create_nio(vm.iouyap_path, request.json)
|
nio = iou_manager.create_nio(vm.iouyap_path, request.json)
|
||||||
vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio)
|
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio)
|
||||||
response.set_status(201)
|
response.set_status(201)
|
||||||
response.json(nio)
|
response.json(nio)
|
||||||
|
|
||||||
@ -255,7 +247,7 @@ class IOUHandler:
|
|||||||
|
|
||||||
iou_manager = IOU.instance()
|
iou_manager = IOU.instance()
|
||||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||||
vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]))
|
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]))
|
||||||
response.set_status(204)
|
response.set_status(204)
|
||||||
|
|
||||||
@Route.post(
|
@Route.post(
|
||||||
@ -273,7 +265,7 @@ class IOUHandler:
|
|||||||
409: "VM not started"
|
409: "VM not started"
|
||||||
},
|
},
|
||||||
description="Start a packet capture on a IOU VM instance",
|
description="Start a packet capture on a IOU VM instance",
|
||||||
input=IOU_CAPTURE_SCHEMA)
|
input=VM_CAPTURE_SCHEMA)
|
||||||
def start_capture(request, response):
|
def start_capture(request, response):
|
||||||
|
|
||||||
iou_manager = IOU.instance()
|
iou_manager = IOU.instance()
|
||||||
|
@ -20,6 +20,7 @@ import asyncio
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import psutil
|
import psutil
|
||||||
|
import tempfile
|
||||||
|
|
||||||
from ...web.route import Route
|
from ...web.route import Route
|
||||||
from ...schemas.project import PROJECT_OBJECT_SCHEMA, PROJECT_CREATE_SCHEMA, PROJECT_UPDATE_SCHEMA, PROJECT_FILE_LIST_SCHEMA, PROJECT_LIST_SCHEMA
|
from ...schemas.project import PROJECT_OBJECT_SCHEMA, PROJECT_CREATE_SCHEMA, PROJECT_UPDATE_SCHEMA, PROJECT_FILE_LIST_SCHEMA, PROJECT_LIST_SCHEMA
|
||||||
@ -56,6 +57,7 @@ class ProjectHandler:
|
|||||||
description="Create a new project on the server",
|
description="Create a new project on the server",
|
||||||
status_codes={
|
status_codes={
|
||||||
201: "Project created",
|
201: "Project created",
|
||||||
|
403: "You are not allowed to modify this property",
|
||||||
409: "Project already created"
|
409: "Project already created"
|
||||||
},
|
},
|
||||||
output=PROJECT_OBJECT_SCHEMA,
|
output=PROJECT_OBJECT_SCHEMA,
|
||||||
@ -234,7 +236,7 @@ class ProjectHandler:
|
|||||||
:returns: hash
|
:returns: hash
|
||||||
"""
|
"""
|
||||||
stats = {}
|
stats = {}
|
||||||
# Non blocking call in order to get cpu usage. First call will return 0
|
# Non blocking call in order to get cpu usage. First call will return 0
|
||||||
stats["cpu_usage_percent"] = psutil.cpu_percent(interval=None)
|
stats["cpu_usage_percent"] = psutil.cpu_percent(interval=None)
|
||||||
stats["memory_usage_percent"] = psutil.virtual_memory().percent
|
stats["memory_usage_percent"] = psutil.virtual_memory().percent
|
||||||
return {"action": "ping", "event": stats}
|
return {"action": "ping", "event": stats}
|
||||||
@ -278,7 +280,7 @@ class ProjectHandler:
|
|||||||
path = request.match_info["path"]
|
path = request.match_info["path"]
|
||||||
path = os.path.normpath(path)
|
path = os.path.normpath(path)
|
||||||
|
|
||||||
# Raise error if user try to escape
|
# Raise error if user try to escape
|
||||||
if path[0] == ".":
|
if path[0] == ".":
|
||||||
raise aiohttp.web.HTTPForbidden
|
raise aiohttp.web.HTTPForbidden
|
||||||
path = os.path.join(project.path, path)
|
path = os.path.join(project.path, path)
|
||||||
@ -301,4 +303,112 @@ class ProjectHandler:
|
|||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
raise aiohttp.web.HTTPNotFound()
|
raise aiohttp.web.HTTPNotFound()
|
||||||
except PermissionError:
|
except PermissionError:
|
||||||
|
raise aiohttp.web.HTTPForbidden()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@Route.post(
|
||||||
|
r"/projects/{project_id}/files/{path:.+}",
|
||||||
|
description="Get a file of a project",
|
||||||
|
parameters={
|
||||||
|
"project_id": "The UUID of the project",
|
||||||
|
},
|
||||||
|
raw=True,
|
||||||
|
status_codes={
|
||||||
|
200: "Return the file",
|
||||||
|
403: "Permission denied",
|
||||||
|
404: "The path doesn't exist"
|
||||||
|
})
|
||||||
|
def write_file(request, response):
|
||||||
|
|
||||||
|
pm = ProjectManager.instance()
|
||||||
|
project = pm.get_project(request.match_info["project_id"])
|
||||||
|
path = request.match_info["path"]
|
||||||
|
path = os.path.normpath(path)
|
||||||
|
|
||||||
|
# Raise error if user try to escape
|
||||||
|
if path[0] == ".":
|
||||||
raise aiohttp.web.HTTPForbidden
|
raise aiohttp.web.HTTPForbidden
|
||||||
|
path = os.path.join(project.path, path)
|
||||||
|
|
||||||
|
response.set_status(200)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(path, 'wb+') as f:
|
||||||
|
while True:
|
||||||
|
packet = yield from request.content.read(512)
|
||||||
|
if not packet:
|
||||||
|
break
|
||||||
|
f.write(packet)
|
||||||
|
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise aiohttp.web.HTTPNotFound()
|
||||||
|
except PermissionError:
|
||||||
|
raise aiohttp.web.HTTPForbidden()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@Route.get(
|
||||||
|
r"/projects/{project_id}/export",
|
||||||
|
description="Export a project as a portable archive",
|
||||||
|
parameters={
|
||||||
|
"project_id": "The UUID of the project",
|
||||||
|
},
|
||||||
|
raw=True,
|
||||||
|
status_codes={
|
||||||
|
200: "Return the file",
|
||||||
|
404: "The project doesn't exist"
|
||||||
|
})
|
||||||
|
def export_project(request, response):
|
||||||
|
|
||||||
|
pm = ProjectManager.instance()
|
||||||
|
project = pm.get_project(request.match_info["project_id"])
|
||||||
|
response.content_type = 'application/gns3project'
|
||||||
|
response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name)
|
||||||
|
response.enable_chunked_encoding()
|
||||||
|
# Very important: do not send a content length otherwise QT close the connection but curl can consume the Feed
|
||||||
|
response.content_length = None
|
||||||
|
response.start(request)
|
||||||
|
|
||||||
|
include_images = bool(int(request.json.get("include_images", "0")))
|
||||||
|
for data in project.export(include_images=include_images):
|
||||||
|
response.write(data)
|
||||||
|
yield from response.drain()
|
||||||
|
|
||||||
|
yield from response.write_eof()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@Route.post(
|
||||||
|
r"/projects/{project_id}/import",
|
||||||
|
description="Import a project from a portable archive",
|
||||||
|
parameters={
|
||||||
|
"project_id": "The UUID of the project",
|
||||||
|
},
|
||||||
|
raw=True,
|
||||||
|
output=PROJECT_OBJECT_SCHEMA,
|
||||||
|
status_codes={
|
||||||
|
200: "Project imported",
|
||||||
|
403: "You are not allowed to modify this property"
|
||||||
|
})
|
||||||
|
def import_project(request, response):
|
||||||
|
|
||||||
|
pm = ProjectManager.instance()
|
||||||
|
project_id = request.match_info["project_id"]
|
||||||
|
project = pm.create_project(project_id=project_id)
|
||||||
|
|
||||||
|
# We write the content to a temporary location
|
||||||
|
# and after extract all. It could be more optimal to stream
|
||||||
|
# this but it's not implemented in Python.
|
||||||
|
#
|
||||||
|
# Spooled mean the file is temporary keep in ram until max_size
|
||||||
|
try:
|
||||||
|
with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
|
||||||
|
while True:
|
||||||
|
packet = yield from request.content.read(512)
|
||||||
|
if not packet:
|
||||||
|
break
|
||||||
|
temp.write(packet)
|
||||||
|
project.import_zip(temp, gns3vm=bool(int(request.GET.get("gns3vm", "1"))))
|
||||||
|
except OSError as e:
|
||||||
|
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
|
||||||
|
|
||||||
|
response.json(project)
|
||||||
|
response.set_status(201)
|
||||||
|
@ -146,11 +146,12 @@ class QEMUHandler:
|
|||||||
"vm_id": "UUID for the instance"
|
"vm_id": "UUID for the instance"
|
||||||
},
|
},
|
||||||
status_codes={
|
status_codes={
|
||||||
204: "Instance started",
|
200: "Instance started",
|
||||||
400: "Invalid request",
|
400: "Invalid request",
|
||||||
404: "Instance doesn't exist"
|
404: "Instance doesn't exist"
|
||||||
},
|
},
|
||||||
description="Start a Qemu VM instance")
|
description="Start a Qemu VM instance",
|
||||||
|
output=QEMU_OBJECT_SCHEMA)
|
||||||
def start(request, response):
|
def start(request, response):
|
||||||
|
|
||||||
qemu_manager = Qemu.instance()
|
qemu_manager = Qemu.instance()
|
||||||
@ -161,7 +162,7 @@ class QEMUHandler:
|
|||||||
if pm.check_hardware_virtualization(vm) is False:
|
if pm.check_hardware_virtualization(vm) is False:
|
||||||
raise HTTPConflict(text="Cannot start VM with KVM enabled because hardware virtualization (VT-x/AMD-V) is already used by another software like VMware or VirtualBox")
|
raise HTTPConflict(text="Cannot start VM with KVM enabled because hardware virtualization (VT-x/AMD-V) is already used by another software like VMware or VirtualBox")
|
||||||
yield from vm.start()
|
yield from vm.start()
|
||||||
response.set_status(204)
|
response.json(vm)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.post(
|
@Route.post(
|
||||||
|
@ -22,8 +22,8 @@ from ...web.route import Route
|
|||||||
from ...schemas.nio import NIO_SCHEMA
|
from ...schemas.nio import NIO_SCHEMA
|
||||||
from ...schemas.virtualbox import VBOX_CREATE_SCHEMA
|
from ...schemas.virtualbox import VBOX_CREATE_SCHEMA
|
||||||
from ...schemas.virtualbox import VBOX_UPDATE_SCHEMA
|
from ...schemas.virtualbox import VBOX_UPDATE_SCHEMA
|
||||||
from ...schemas.virtualbox import VBOX_CAPTURE_SCHEMA
|
|
||||||
from ...schemas.virtualbox import VBOX_OBJECT_SCHEMA
|
from ...schemas.virtualbox import VBOX_OBJECT_SCHEMA
|
||||||
|
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||||
from ...modules.virtualbox import VirtualBox
|
from ...modules.virtualbox import VirtualBox
|
||||||
from ...modules.project_manager import ProjectManager
|
from ...modules.project_manager import ProjectManager
|
||||||
|
|
||||||
@ -342,7 +342,7 @@ class VirtualBoxHandler:
|
|||||||
404: "Instance doesn't exist"
|
404: "Instance doesn't exist"
|
||||||
},
|
},
|
||||||
description="Start a packet capture on a VirtualBox VM instance",
|
description="Start a packet capture on a VirtualBox VM instance",
|
||||||
input=VBOX_CAPTURE_SCHEMA)
|
input=VM_CAPTURE_SCHEMA)
|
||||||
def start_capture(request, response):
|
def start_capture(request, response):
|
||||||
|
|
||||||
vbox_manager = VirtualBox.instance()
|
vbox_manager = VirtualBox.instance()
|
||||||
|
@ -22,7 +22,7 @@ from ...web.route import Route
|
|||||||
from ...schemas.vmware import VMWARE_CREATE_SCHEMA
|
from ...schemas.vmware import VMWARE_CREATE_SCHEMA
|
||||||
from ...schemas.vmware import VMWARE_UPDATE_SCHEMA
|
from ...schemas.vmware import VMWARE_UPDATE_SCHEMA
|
||||||
from ...schemas.vmware import VMWARE_OBJECT_SCHEMA
|
from ...schemas.vmware import VMWARE_OBJECT_SCHEMA
|
||||||
from ...schemas.vmware import VMWARE_CAPTURE_SCHEMA
|
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||||
from ...schemas.nio import NIO_SCHEMA
|
from ...schemas.nio import NIO_SCHEMA
|
||||||
from ...modules.vmware import VMware
|
from ...modules.vmware import VMware
|
||||||
from ...modules.project_manager import ProjectManager
|
from ...modules.project_manager import ProjectManager
|
||||||
@ -271,7 +271,7 @@ class VMwareHandler:
|
|||||||
vmware_manager = VMware.instance()
|
vmware_manager = VMware.instance()
|
||||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||||
nio_type = request.json["type"]
|
nio_type = request.json["type"]
|
||||||
if nio_type not in ("nio_udp", "nio_vmnet", "nio_nat"):
|
if nio_type not in ("nio_udp", "nio_vmnet", "nio_nat", "nio_tap"):
|
||||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||||
nio = vmware_manager.create_nio(None, request.json)
|
nio = vmware_manager.create_nio(None, request.json)
|
||||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||||
@ -314,7 +314,7 @@ class VMwareHandler:
|
|||||||
404: "Instance doesn't exist",
|
404: "Instance doesn't exist",
|
||||||
},
|
},
|
||||||
description="Start a packet capture on a VMware VM instance",
|
description="Start a packet capture on a VMware VM instance",
|
||||||
input=VMWARE_CAPTURE_SCHEMA)
|
input=VM_CAPTURE_SCHEMA)
|
||||||
def start_capture(request, response):
|
def start_capture(request, response):
|
||||||
|
|
||||||
vmware_manager = VMware.instance()
|
vmware_manager = VMware.instance()
|
||||||
|
@ -130,13 +130,14 @@ class VPCSHandler:
|
|||||||
400: "Invalid request",
|
400: "Invalid request",
|
||||||
404: "Instance doesn't exist"
|
404: "Instance doesn't exist"
|
||||||
},
|
},
|
||||||
description="Start a VPCS instance")
|
description="Start a VPCS instance",
|
||||||
|
output=VPCS_OBJECT_SCHEMA)
|
||||||
def start(request, response):
|
def start(request, response):
|
||||||
|
|
||||||
vpcs_manager = VPCS.instance()
|
vpcs_manager = VPCS.instance()
|
||||||
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
vm = vpcs_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||||
yield from vm.start()
|
yield from vm.start()
|
||||||
response.set_status(204)
|
response.json(vm)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@Route.post(
|
@Route.post(
|
||||||
|
@ -54,7 +54,8 @@ class UploadHandler:
|
|||||||
@Route.post(
|
@Route.post(
|
||||||
r"/upload",
|
r"/upload",
|
||||||
description="Manage upload of GNS3 images",
|
description="Manage upload of GNS3 images",
|
||||||
api_version=None
|
api_version=None,
|
||||||
|
raw=True
|
||||||
)
|
)
|
||||||
def upload(request, response):
|
def upload(request, response):
|
||||||
data = yield from request.post()
|
data = yield from request.post()
|
||||||
|
@ -27,6 +27,9 @@ MODULES = [VPCS, VirtualBox, Dynamips, Qemu, VMware]
|
|||||||
|
|
||||||
if sys.platform.startswith("linux") or hasattr(sys, "_called_from_test") or os.environ.get("PYTEST_BUILD_DOCUMENTATION") == "1":
|
if sys.platform.startswith("linux") or hasattr(sys, "_called_from_test") or os.environ.get("PYTEST_BUILD_DOCUMENTATION") == "1":
|
||||||
|
|
||||||
|
from .docker import Docker
|
||||||
|
MODULES.append(Docker)
|
||||||
|
|
||||||
# IOU runs only on Linux but testsuite work on UNIX platform
|
# IOU runs only on Linux but testsuite work on UNIX platform
|
||||||
if not sys.platform.startswith("win"):
|
if not sys.platform.startswith("win"):
|
||||||
from .iou import IOU
|
from .iou import IOU
|
||||||
|
@ -27,6 +27,7 @@ class EthernetAdapter(Adapter):
|
|||||||
def __init__(self, interfaces=1):
|
def __init__(self, interfaces=1):
|
||||||
|
|
||||||
super().__init__(interfaces)
|
super().__init__(interfaces)
|
||||||
|
self.host_ifc = None
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
|
||||||
|
@ -423,7 +423,7 @@ class BaseManager:
|
|||||||
|
|
||||||
return force_unix_path(path)
|
return force_unix_path(path)
|
||||||
else:
|
else:
|
||||||
# For non local server we disallow using absolute path outside image directory
|
# For non local server we disallow using absolute path outside image directory
|
||||||
if Config.instance().get_section_config("Server").get("local", False) is False:
|
if Config.instance().get_section_config("Server").get("local", False) is False:
|
||||||
img_directory = self.config.get_section_config("Server").get("images_path", os.path.expanduser("~/GNS3/images"))
|
img_directory = self.config.get_section_config("Server").get("images_path", os.path.expanduser("~/GNS3/images"))
|
||||||
img_directory = force_unix_path(img_directory)
|
img_directory = force_unix_path(img_directory)
|
||||||
@ -486,14 +486,17 @@ class BaseManager:
|
|||||||
log.info("Writting image file %s", path)
|
log.info("Writting image file %s", path)
|
||||||
try:
|
try:
|
||||||
remove_checksum(path)
|
remove_checksum(path)
|
||||||
|
# We store the file under his final name only when the upload is finished
|
||||||
|
tmp_path = path + ".tmp"
|
||||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||||
with open(path, 'wb+') as f:
|
with open(tmp_path, 'wb+') as f:
|
||||||
while True:
|
while True:
|
||||||
packet = yield from stream.read(512)
|
packet = yield from stream.read(512)
|
||||||
if not packet:
|
if not packet:
|
||||||
break
|
break
|
||||||
f.write(packet)
|
f.write(packet)
|
||||||
os.chmod(path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
|
os.chmod(tmp_path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
|
||||||
|
shutil.move(tmp_path, path)
|
||||||
md5sum(path)
|
md5sum(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Could not write image: {} because {}".format(filename, e))
|
raise aiohttp.web.HTTPConflict(text="Could not write image: {} because {}".format(filename, e))
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import stat
|
||||||
import logging
|
import logging
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import shutil
|
import shutil
|
||||||
@ -24,7 +25,7 @@ import tempfile
|
|||||||
import psutil
|
import psutil
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
from pkg_resources import parse_version
|
from gns3server.utils import parse_version
|
||||||
from ..utils.asyncio import wait_run_in_executor
|
from ..utils.asyncio import wait_run_in_executor
|
||||||
from ..ubridge.hypervisor import Hypervisor
|
from ..ubridge.hypervisor import Hypervisor
|
||||||
from .vm_error import VMError
|
from .vm_error import VMError
|
||||||
@ -43,9 +44,11 @@ class BaseVM:
|
|||||||
:param project: Project instance
|
:param project: Project instance
|
||||||
:param manager: parent VM Manager
|
:param manager: parent VM Manager
|
||||||
:param console: TCP console port
|
:param console: TCP console port
|
||||||
|
:param aux: TCP aux console port
|
||||||
|
:param allocate_aux: Boolean if true will allocate an aux console port
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name, vm_id, project, manager, console=None, console_type="telnet"):
|
def __init__(self, name, vm_id, project, manager, console=None, console_type="telnet", aux=None, allocate_aux=False):
|
||||||
|
|
||||||
self._name = name
|
self._name = name
|
||||||
self._usage = ""
|
self._usage = ""
|
||||||
@ -53,24 +56,36 @@ class BaseVM:
|
|||||||
self._project = project
|
self._project = project
|
||||||
self._manager = manager
|
self._manager = manager
|
||||||
self._console = console
|
self._console = console
|
||||||
|
self._aux = aux
|
||||||
self._console_type = console_type
|
self._console_type = console_type
|
||||||
self._temporary_directory = None
|
self._temporary_directory = None
|
||||||
self._hw_virtualization = False
|
self._hw_virtualization = False
|
||||||
self._ubridge_hypervisor = None
|
self._ubridge_hypervisor = None
|
||||||
|
self._closed = False
|
||||||
self._vm_status = "stopped"
|
self._vm_status = "stopped"
|
||||||
|
self._command_line = ""
|
||||||
|
self._allocate_aux = allocate_aux
|
||||||
|
|
||||||
if self._console is not None:
|
if self._console is not None:
|
||||||
if console_type == "vnc":
|
if console_type == "vnc":
|
||||||
self._console = self._manager.port_manager.reserve_tcp_port(self._console, self._project, port_range_start=5900, port_range_end=6000)
|
self._console = self._manager.port_manager.reserve_tcp_port(self._console, self._project, port_range_start=5900, port_range_end=6000)
|
||||||
else:
|
else:
|
||||||
self._console = self._manager.port_manager.reserve_tcp_port(self._console, self._project)
|
self._console = self._manager.port_manager.reserve_tcp_port(self._console, self._project)
|
||||||
else:
|
|
||||||
|
# We need to allocate aux before giving a random console port
|
||||||
|
if self._aux is not None:
|
||||||
|
self._aux = self._manager.port_manager.reserve_tcp_port(self._aux, self._project)
|
||||||
|
|
||||||
|
if self._console is None:
|
||||||
if console_type == "vnc":
|
if console_type == "vnc":
|
||||||
# VNC is a special case and the range must be 5900-6000
|
# VNC is a special case and the range must be 5900-6000
|
||||||
self._console = self._manager.port_manager.get_free_tcp_port(self._project, port_range_start=5900, port_range_end=6000)
|
self._console = self._manager.port_manager.get_free_tcp_port(self._project, port_range_start=5900, port_range_end=6000)
|
||||||
else:
|
else:
|
||||||
self._console = self._manager.port_manager.get_free_tcp_port(self._project)
|
self._console = self._manager.port_manager.get_free_tcp_port(self._project)
|
||||||
|
|
||||||
|
if self._aux is None and allocate_aux:
|
||||||
|
self._aux = self._manager.port_manager.get_free_tcp_port(self._project)
|
||||||
|
|
||||||
log.debug("{module}: {name} [{id}] initialized. Console port {console}".format(module=self.manager.module_name,
|
log.debug("{module}: {name} [{id}] initialized. Console port {console}".format(module=self.manager.module_name,
|
||||||
name=self.name,
|
name=self.name,
|
||||||
id=self.id,
|
id=self.id,
|
||||||
@ -94,6 +109,17 @@ class BaseVM:
|
|||||||
self._vm_status = status
|
self._vm_status = status
|
||||||
self._project.emit("vm.{}".format(status), self)
|
self._project.emit("vm.{}".format(status), self)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def command_line(self):
|
||||||
|
"""Return command used to start the VM"""
|
||||||
|
|
||||||
|
return self._command_line
|
||||||
|
|
||||||
|
@command_line.setter
|
||||||
|
def command_line(self, command_line):
|
||||||
|
|
||||||
|
self._command_line = command_line
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def project(self):
|
def project(self):
|
||||||
"""
|
"""
|
||||||
@ -199,11 +225,13 @@ class BaseVM:
|
|||||||
"""
|
"""
|
||||||
Delete the VM (including all its files).
|
Delete the VM (including all its files).
|
||||||
"""
|
"""
|
||||||
|
def set_rw(operation, name, exc):
|
||||||
|
os.chmod(name, stat.S_IWRITE)
|
||||||
|
|
||||||
directory = self.project.vm_working_directory(self)
|
directory = self.project.vm_working_directory(self)
|
||||||
if os.path.exists(directory):
|
if os.path.exists(directory):
|
||||||
try:
|
try:
|
||||||
yield from wait_run_in_executor(shutil.rmtree, directory)
|
yield from wait_run_in_executor(shutil.rmtree, directory, onerror=set_rw)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise aiohttp.web.HTTPInternalServerError(text="Could not delete the VM working directory: {}".format(e))
|
raise aiohttp.web.HTTPInternalServerError(text="Could not delete the VM working directory: {}".format(e))
|
||||||
|
|
||||||
@ -221,12 +249,75 @@ class BaseVM:
|
|||||||
|
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
def close(self):
|
def close(self):
|
||||||
"""
|
"""
|
||||||
Close the VM process.
|
Close the VM process.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
raise NotImplementedError
|
if self._closed:
|
||||||
|
return False
|
||||||
|
|
||||||
|
log.info("{module}: '{name}' [{id}]: is closing".format(
|
||||||
|
module=self.manager.module_name,
|
||||||
|
name=self.name,
|
||||||
|
id=self.id))
|
||||||
|
|
||||||
|
if self._console:
|
||||||
|
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||||
|
self._console = None
|
||||||
|
|
||||||
|
if self._aux:
|
||||||
|
self._manager.port_manager.release_tcp_port(self._aux, self._project)
|
||||||
|
self._aux = None
|
||||||
|
|
||||||
|
self._closed = True
|
||||||
|
return True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def allocate_aux(self):
|
||||||
|
"""
|
||||||
|
:returns: Boolean allocate or not an aux console
|
||||||
|
"""
|
||||||
|
return self._allocate_aux
|
||||||
|
|
||||||
|
@allocate_aux.setter
|
||||||
|
def allocate_aux(self, allocate_aux):
|
||||||
|
"""
|
||||||
|
:returns: Boolean allocate or not an aux console
|
||||||
|
"""
|
||||||
|
self._allocate_aux = allocate_aux
|
||||||
|
|
||||||
|
@property
|
||||||
|
def aux(self):
|
||||||
|
"""
|
||||||
|
Returns the aux console port of this VM.
|
||||||
|
|
||||||
|
:returns: aux console port
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._aux
|
||||||
|
|
||||||
|
@aux.setter
|
||||||
|
def aux(self, aux):
|
||||||
|
"""
|
||||||
|
Changes the aux port
|
||||||
|
|
||||||
|
:params aux: Console port (integer) or None to free the port
|
||||||
|
"""
|
||||||
|
|
||||||
|
if aux == self._aux:
|
||||||
|
return
|
||||||
|
|
||||||
|
if self._aux:
|
||||||
|
self._manager.port_manager.release_tcp_port(self._aux, self._project)
|
||||||
|
self._aux = None
|
||||||
|
if aux is not None:
|
||||||
|
self._aux = self._manager.port_manager.reserve_tcp_port(aux, self._project)
|
||||||
|
log.info("{module}: '{name}' [{id}]: aux port set to {port}".format(module=self.manager.module_name,
|
||||||
|
name=self.name,
|
||||||
|
id=self.id,
|
||||||
|
port=aux))
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def console(self):
|
def console(self):
|
||||||
@ -243,22 +334,28 @@ class BaseVM:
|
|||||||
"""
|
"""
|
||||||
Changes the console port
|
Changes the console port
|
||||||
|
|
||||||
:params console: Console port (integer)
|
:params console: Console port (integer) or None to free the port
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if console == self._console:
|
if console == self._console:
|
||||||
return
|
return
|
||||||
|
|
||||||
if self._console_type == "vnc" and console < 5900:
|
if self._console_type == "vnc" and console is not None and console < 5900:
|
||||||
raise VMError("VNC console require a port superior or equal to 5900")
|
raise VMError("VNC console require a port superior or equal to 5900 currently it's {}".format(console))
|
||||||
|
|
||||||
if self._console:
|
if self._console:
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||||
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project)
|
self._console = None
|
||||||
log.info("{module}: '{name}' [{id}]: console port set to {port}".format(module=self.manager.module_name,
|
if console is not None:
|
||||||
name=self.name,
|
if self.console_type == "vnc":
|
||||||
id=self.id,
|
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project, port_range_start=5900, port_range_end=6000)
|
||||||
port=console))
|
else:
|
||||||
|
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project)
|
||||||
|
|
||||||
|
log.info("{module}: '{name}' [{id}]: console port set to {port}".format(module=self.manager.module_name,
|
||||||
|
name=self.name,
|
||||||
|
id=self.id,
|
||||||
|
port=console))
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def console_type(self):
|
def console_type(self):
|
||||||
@ -306,8 +403,10 @@ class BaseVM:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
path = self._manager.config.get_section_config("Server").get("ubridge_path", "ubridge")
|
path = self._manager.config.get_section_config("Server").get("ubridge_path", "ubridge")
|
||||||
if path == "ubridge":
|
path = shutil.which(path)
|
||||||
path = shutil.which("ubridge")
|
|
||||||
|
if path is None or len(path) == 0:
|
||||||
|
raise VMError("uBridge is not installed or uBridge path is invalid")
|
||||||
return path
|
return path
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
@ -327,8 +426,6 @@ class BaseVM:
|
|||||||
yield from self._ubridge_hypervisor.start()
|
yield from self._ubridge_hypervisor.start()
|
||||||
log.info("Hypervisor {}:{} has successfully started".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
|
log.info("Hypervisor {}:{} has successfully started".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
|
||||||
yield from self._ubridge_hypervisor.connect()
|
yield from self._ubridge_hypervisor.connect()
|
||||||
if parse_version(self._ubridge_hypervisor.version) < parse_version('0.9.2'):
|
|
||||||
raise VMError("uBridge version must be >= 0.9.2, detected version is {}".format(self._ubridge_hypervisor.version))
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def hw_virtualization(self):
|
def hw_virtualization(self):
|
||||||
|
@ -22,51 +22,131 @@ Docker server module.
|
|||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import docker
|
import urllib
|
||||||
from requests.exceptions import ConnectionError
|
import json
|
||||||
|
import sys
|
||||||
|
from gns3server.utils import parse_version
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
from ..base_manager import BaseManager
|
from ..base_manager import BaseManager
|
||||||
from ..project_manager import ProjectManager
|
from .docker_vm import DockerVM
|
||||||
from .docker_vm import Container
|
from .docker_error import *
|
||||||
from .docker_error import DockerError
|
|
||||||
|
DOCKER_MINIMUM_API_VERSION = "1.21"
|
||||||
|
|
||||||
|
|
||||||
class Docker(BaseManager):
|
class Docker(BaseManager):
|
||||||
|
|
||||||
_VM_CLASS = Container
|
_VM_CLASS = DockerVM
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
# FIXME: make configurable and start docker before trying
|
self._server_url = '/var/run/docker.sock'
|
||||||
self._server_url = 'unix://var/run/docker.sock'
|
self._connected = False
|
||||||
self._client = docker.Client(base_url=self._server_url)
|
# Allow locking during ubridge operations
|
||||||
self._execute_lock = asyncio.Lock()
|
self.ubridge_lock = asyncio.Lock()
|
||||||
|
|
||||||
@property
|
|
||||||
def server_url(self):
|
|
||||||
"""Returns the Docker server url.
|
|
||||||
|
|
||||||
:returns: url
|
|
||||||
:rtype: string
|
|
||||||
"""
|
|
||||||
return self._server_url
|
|
||||||
|
|
||||||
@server_url.setter
|
|
||||||
def server_url(self, value):
|
|
||||||
self._server_url = value
|
|
||||||
self._client = docker.Client(base_url=value)
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def execute(self, command, kwargs, timeout=60):
|
def connector(self):
|
||||||
command = getattr(self._client, command)
|
if not self._connected or self._connector.closed:
|
||||||
log.debug("Executing Docker with command: {}".format(command))
|
if not sys.platform.startswith("linux"):
|
||||||
|
raise DockerError("Docker is supported only on Linux")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._connector = aiohttp.connector.UnixConnector(self._server_url, conn_timeout=2)
|
||||||
|
self._connected = True
|
||||||
|
version = yield from self.query("GET", "version")
|
||||||
|
except (aiohttp.errors.ClientOSError, FileNotFoundError):
|
||||||
|
self._connected = False
|
||||||
|
raise DockerError("Can't connect to docker daemon")
|
||||||
|
|
||||||
|
if parse_version(version["ApiVersion"]) < parse_version(DOCKER_MINIMUM_API_VERSION):
|
||||||
|
raise DockerError("Docker API version is {}. GNS3 requires a minimum API version of {}".format(version["ApiVersion"], DOCKER_MINIMUM_API_VERSION))
|
||||||
|
return self._connector
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def unload(self):
|
||||||
|
yield from super().unload()
|
||||||
|
if self._connected:
|
||||||
|
self._connector.close()
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def query(self, method, path, data={}, params={}):
|
||||||
|
"""
|
||||||
|
Make a query to the docker daemon and decode the request
|
||||||
|
|
||||||
|
:param method: HTTP method
|
||||||
|
:param path: Endpoint in API
|
||||||
|
:param data: Dictionnary with the body. Will be transformed to a JSON
|
||||||
|
:param params: Parameters added as a query arg
|
||||||
|
"""
|
||||||
|
|
||||||
|
response = yield from self.http_query(method, path, data=data, params=params)
|
||||||
|
body = yield from response.read()
|
||||||
|
if len(body):
|
||||||
|
if response.headers['CONTENT-TYPE'] == 'application/json':
|
||||||
|
body = json.loads(body.decode("utf-8"))
|
||||||
|
else:
|
||||||
|
body = body.decode("utf-8")
|
||||||
|
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
|
||||||
|
return body
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def http_query(self, method, path, data={}, params={}):
|
||||||
|
"""
|
||||||
|
Make a query to the docker daemon
|
||||||
|
|
||||||
|
:param method: HTTP method
|
||||||
|
:param path: Endpoint in API
|
||||||
|
:param data: Dictionnary with the body. Will be transformed to a JSON
|
||||||
|
:param params: Parameters added as a query arg
|
||||||
|
:returns: HTTP response
|
||||||
|
"""
|
||||||
|
data = json.dumps(data)
|
||||||
|
url = "http://docker/" + path
|
||||||
try:
|
try:
|
||||||
result = command(**kwargs)
|
response = yield from aiohttp.request(
|
||||||
except Exception as error:
|
method,
|
||||||
raise DockerError("Docker has returned an error: {}".format(error))
|
url,
|
||||||
return result
|
connector=(yield from self.connector()),
|
||||||
|
params=params,
|
||||||
|
data=data,
|
||||||
|
headers={"content-type": "application/json", },
|
||||||
|
)
|
||||||
|
except (aiohttp.ClientResponseError, aiohttp.ClientOSError) as e:
|
||||||
|
raise DockerError("Docker has returned an error: {}".format(str(e)))
|
||||||
|
if response.status >= 300:
|
||||||
|
body = yield from response.read()
|
||||||
|
try:
|
||||||
|
body = json.loads(body.decode("utf-8"))["message"]
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
|
||||||
|
if response.status == 304:
|
||||||
|
raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body))
|
||||||
|
elif response.status == 404:
|
||||||
|
raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body))
|
||||||
|
else:
|
||||||
|
raise DockerError("Docker has returned an error: {} {}".format(response.status, body))
|
||||||
|
return response
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def websocket_query(self, path, params={}):
|
||||||
|
"""
|
||||||
|
Open a websocket connection
|
||||||
|
|
||||||
|
:param path: Endpoint in API
|
||||||
|
:param params: Parameters added as a query arg
|
||||||
|
:returns: Websocket
|
||||||
|
"""
|
||||||
|
|
||||||
|
url = "http://docker/" + path
|
||||||
|
connection = yield from aiohttp.ws_connect(url,
|
||||||
|
connector=(yield from self.connector()),
|
||||||
|
origin="http://docker",
|
||||||
|
autoping=True)
|
||||||
|
return connection
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def list_images(self):
|
def list_images(self):
|
||||||
@ -76,44 +156,8 @@ class Docker(BaseManager):
|
|||||||
:rtype: list
|
:rtype: list
|
||||||
"""
|
"""
|
||||||
images = []
|
images = []
|
||||||
try:
|
for image in (yield from self.query("GET", "images/json", params={"all": 0})):
|
||||||
for image in self._client.images():
|
for tag in image['RepoTags']:
|
||||||
for tag in image['RepoTags']:
|
if tag != "<none>:<none>":
|
||||||
images.append({'imagename': tag})
|
images.append({'image': tag})
|
||||||
return images
|
return sorted(images, key=lambda i: i['image'])
|
||||||
except ConnectionError as error:
|
|
||||||
raise DockerError(
|
|
||||||
"""Docker couldn't list images and returned an error: {}
|
|
||||||
Is the Docker service running?""".format(error))
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
|
||||||
def list_containers(self):
|
|
||||||
"""Gets Docker container list.
|
|
||||||
|
|
||||||
:returns: list of dicts
|
|
||||||
:rtype: list
|
|
||||||
"""
|
|
||||||
return self._client.containers()
|
|
||||||
|
|
||||||
def get_container(self, cid, project_id=None):
|
|
||||||
"""Returns a Docker container.
|
|
||||||
|
|
||||||
:param id: Docker container identifier
|
|
||||||
:param project_id: Project identifier
|
|
||||||
|
|
||||||
:returns: Docker container
|
|
||||||
"""
|
|
||||||
if project_id:
|
|
||||||
project = ProjectManager.instance().get_project(project_id)
|
|
||||||
|
|
||||||
if cid not in self._vms:
|
|
||||||
raise aiohttp.web.HTTPNotFound(
|
|
||||||
text="Docker container with ID {} doesn't exist".format(cid))
|
|
||||||
|
|
||||||
container = self._vms[cid]
|
|
||||||
if project_id:
|
|
||||||
if container.project.id != project.id:
|
|
||||||
raise aiohttp.web.HTTPNotFound(
|
|
||||||
text="Project ID {} doesn't belong to container {}".format(
|
|
||||||
project_id, container.name))
|
|
||||||
return container
|
|
||||||
|
@ -24,3 +24,11 @@ from ..vm_error import VMError
|
|||||||
|
|
||||||
class DockerError(VMError):
|
class DockerError(VMError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DockerHttp304Error(DockerError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DockerHttp404Error(DockerError):
|
||||||
|
pass
|
||||||
|
File diff suppressed because it is too large
Load Diff
BIN
gns3server/modules/docker/resources/bin/busybox
Executable file
BIN
gns3server/modules/docker/resources/bin/busybox
Executable file
Binary file not shown.
138
gns3server/modules/docker/resources/etc/udhcpc/default.script
Executable file
138
gns3server/modules/docker/resources/etc/udhcpc/default.script
Executable file
@ -0,0 +1,138 @@
|
|||||||
|
#!/tmp/gns3/bin/sh
|
||||||
|
|
||||||
|
# script for udhcpc
|
||||||
|
# Copyright (c) 2008 Natanael Copa <natanael.copa@gmail.com>
|
||||||
|
|
||||||
|
UDHCPC="/gns3/etc/udhcpc"
|
||||||
|
UDHCPC_CONF="$UDHCPC/udhcpc.conf"
|
||||||
|
|
||||||
|
RESOLV_CONF="/etc/resolv.conf"
|
||||||
|
[ -f $UDHCPC_CONF ] && . $UDHCPC_CONF
|
||||||
|
|
||||||
|
export broadcast
|
||||||
|
export dns
|
||||||
|
export domain
|
||||||
|
export interface
|
||||||
|
export ip
|
||||||
|
export mask
|
||||||
|
export metric
|
||||||
|
export router
|
||||||
|
export subnet
|
||||||
|
|
||||||
|
#export PATH=/usr/bin:/bin:/usr/sbin:/sbin
|
||||||
|
|
||||||
|
run_scripts() {
|
||||||
|
local dir=$1
|
||||||
|
if [ -d $dir ]; then
|
||||||
|
for i in $dir/*; do
|
||||||
|
[ -f $i ] && $i
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
deconfig() {
|
||||||
|
ip addr flush dev $interface
|
||||||
|
}
|
||||||
|
|
||||||
|
is_wifi() {
|
||||||
|
test -e /sys/class/net/$interface/phy80211
|
||||||
|
}
|
||||||
|
|
||||||
|
if_index() {
|
||||||
|
if [ -e /sys/class/net/$interface/ifindex ]; then
|
||||||
|
cat /sys/class/net/$interface/ifindex
|
||||||
|
else
|
||||||
|
ip link show dev $interface | head -n1 | cut -d: -f1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
calc_metric() {
|
||||||
|
local base=
|
||||||
|
if is_wifi; then
|
||||||
|
base=300
|
||||||
|
else
|
||||||
|
base=200
|
||||||
|
fi
|
||||||
|
echo $(( $base + $(if_index) ))
|
||||||
|
}
|
||||||
|
|
||||||
|
routes() {
|
||||||
|
[ -z "$router" ] && return
|
||||||
|
local gw= num=
|
||||||
|
while ip route del default via dev $interface 2>/dev/null; do
|
||||||
|
:
|
||||||
|
done
|
||||||
|
num=0
|
||||||
|
for gw in $router; do
|
||||||
|
ip route add 0.0.0.0/0 via $gw dev $interface \
|
||||||
|
metric $(( $num + ${IF_METRIC:-$(calc_metric)} ))
|
||||||
|
num=$(( $num + 1 ))
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
resolvconf() {
|
||||||
|
local i
|
||||||
|
[ -n "$IF_PEER_DNS" ] && [ "$IF_PEER_DNS" != "yes" ] && return
|
||||||
|
if [ "$RESOLV_CONF" = "no" ] || [ "$RESOLV_CONF" = "NO" ] \
|
||||||
|
|| [ -z "$RESOLV_CONF" ]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
echo -n > "$RESOLV_CONF"
|
||||||
|
[ -n "$domain" ] && echo "search $domain" >> "$RESOLV_CONF"
|
||||||
|
for i in $dns; do
|
||||||
|
echo "nameserver $i" >> "$RESOLV_CONF"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
bound() {
|
||||||
|
ip addr add $ip/$mask ${broadcast:+broadcast $broadcast} dev $interface
|
||||||
|
ip link set dev $interface up
|
||||||
|
routes
|
||||||
|
resolvconf
|
||||||
|
}
|
||||||
|
|
||||||
|
renew() {
|
||||||
|
if ! ip addr show dev $interface | grep $ip/$mask; then
|
||||||
|
ip addr flush dev $interface
|
||||||
|
ip addr add $ip/$mask ${broadcast:+broadcast $broadcast} dev $interface
|
||||||
|
fi
|
||||||
|
|
||||||
|
local i
|
||||||
|
for i in $router; do
|
||||||
|
if ! ip route show | grep ^default | grep $i; then
|
||||||
|
routes
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if ! grep "^search $domain"; then
|
||||||
|
resolvconf
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
for i in $dns; do
|
||||||
|
if ! grep "^nameserver $i"; then
|
||||||
|
resolvconf
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
deconfig|renew|bound)
|
||||||
|
run_scripts $UDHCPC/pre-$1
|
||||||
|
$1
|
||||||
|
run_scripts $UDHCPC/post-$1
|
||||||
|
;;
|
||||||
|
leasefail)
|
||||||
|
echo "udhcpc failed to get a DHCP lease" >&2
|
||||||
|
;;
|
||||||
|
nak)
|
||||||
|
echo "udhcpc received DHCP NAK" >&2
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: this script should be called from udhcpc" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
exit 0
|
||||||
|
|
81
gns3server/modules/docker/resources/init.sh
Executable file
81
gns3server/modules/docker/resources/init.sh
Executable file
@ -0,0 +1,81 @@
|
|||||||
|
#!/gns3/bin/busybox sh
|
||||||
|
#
|
||||||
|
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#
|
||||||
|
# This script is injected into the container and launch before
|
||||||
|
# the start command of the container
|
||||||
|
#
|
||||||
|
OLD_PATH="$PATH"
|
||||||
|
PATH=/gns3/bin:/tmp/gns3/bin
|
||||||
|
|
||||||
|
# bootstrap busybox commands
|
||||||
|
if [ ! -d /tmp/gns3/bin ]; then
|
||||||
|
busybox mkdir -p /tmp/gns3/bin
|
||||||
|
/gns3/bin/busybox --install -s /tmp/gns3/bin
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Restore file permission and mount volumes
|
||||||
|
echo "$GNS3_VOLUMES" | tr ":" "\n" | while read i
|
||||||
|
do
|
||||||
|
# Copy original files if destination is empty (first start)
|
||||||
|
[ "$(ls -A "/gns3volumes$i")" ] || cp -a "$i/." "/gns3volumes$i"
|
||||||
|
|
||||||
|
mount --bind "/gns3volumes$i" "$i"
|
||||||
|
if [ -f "$i/.gns3_perms" ]
|
||||||
|
then
|
||||||
|
while IFS=: read PERMS OWNER GROUP FILE
|
||||||
|
do
|
||||||
|
chmod "$PERMS" "$FILE"
|
||||||
|
chown "${OWNER}:${GROUP}" "$FILE"
|
||||||
|
done < "$i/.gns3_perms"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
# /etc/hosts
|
||||||
|
[ -s /etc/hosts ] || cat > /etc/hosts << __EOF__
|
||||||
|
127.0.1.1 $HOSTNAME
|
||||||
|
127.0.0.1 localhost
|
||||||
|
::1 localhost ip6-localhost ip6-loopback
|
||||||
|
fe00::0 ip6-localnet
|
||||||
|
ff00::0 ip6-mcastprefix
|
||||||
|
ff02::1 ip6-allnodes
|
||||||
|
ff02::2 ip6-allrouters
|
||||||
|
__EOF__
|
||||||
|
|
||||||
|
# configure loopback interface
|
||||||
|
ip link set dev lo up
|
||||||
|
|
||||||
|
# Wait for all eth available
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
grep $GNS3_MAX_ETHERNET /proc/net/dev > /dev/null && break
|
||||||
|
sleep 0.5
|
||||||
|
done
|
||||||
|
|
||||||
|
# activate eth interfaces
|
||||||
|
sed -n 's/^ *\(eth[0-9]*\):.*/\1/p' < /proc/net/dev | while read dev; do
|
||||||
|
ip link set dev $dev up
|
||||||
|
done
|
||||||
|
|
||||||
|
# configure network interfaces
|
||||||
|
ifup -a -f
|
||||||
|
|
||||||
|
# continue normal docker startup
|
||||||
|
PATH="$OLD_PATH"
|
||||||
|
exec "$@"
|
||||||
|
|
@ -32,9 +32,9 @@ import glob
|
|||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
from gns3server.utils.interfaces import get_windows_interfaces, is_interface_up
|
from gns3server.utils.interfaces import interfaces, is_interface_up
|
||||||
from gns3server.utils.asyncio import wait_run_in_executor
|
from gns3server.utils.asyncio import wait_run_in_executor
|
||||||
from pkg_resources import parse_version
|
from gns3server.utils import parse_version
|
||||||
from uuid import UUID, uuid4
|
from uuid import UUID, uuid4
|
||||||
from ..base_manager import BaseManager
|
from ..base_manager import BaseManager
|
||||||
from ..project_manager import ProjectManager
|
from ..project_manager import ProjectManager
|
||||||
@ -219,7 +219,7 @@ class Dynamips(BaseManager):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# Release the dynamips ids if we want to reload the same project
|
# Release the dynamips ids if we want to reload the same project
|
||||||
# later
|
# later
|
||||||
if project.id in self._dynamips_ids:
|
if project.id in self._dynamips_ids:
|
||||||
del self._dynamips_ids[project.id]
|
del self._dynamips_ids[project.id]
|
||||||
|
|
||||||
@ -336,16 +336,16 @@ class Dynamips(BaseManager):
|
|||||||
def find_dynamips(self):
|
def find_dynamips(self):
|
||||||
|
|
||||||
# look for Dynamips
|
# look for Dynamips
|
||||||
dynamips_path = self.config.get_section_config("Dynamips").get("dynamips_path")
|
dynamips_path = self.config.get_section_config("Dynamips").get("dynamips_path", "dynamips")
|
||||||
if not dynamips_path:
|
if not os.path.isabs(dynamips_path):
|
||||||
dynamips_path = shutil.which("dynamips")
|
dynamips_path = shutil.which(dynamips_path)
|
||||||
|
|
||||||
if not dynamips_path:
|
if not dynamips_path:
|
||||||
raise DynamipsError("Could not find Dynamips")
|
raise DynamipsError("Could not find Dynamips")
|
||||||
if not os.path.isfile(dynamips_path):
|
if not os.path.isfile(dynamips_path):
|
||||||
raise DynamipsError("Dynamips {} is not accessible".format(dynamips_path))
|
raise DynamipsError("Dynamips {} is not accessible".format(dynamips_path))
|
||||||
if not os.access(dynamips_path, os.X_OK):
|
if not os.access(dynamips_path, os.X_OK):
|
||||||
raise DynamipsError("Dynamips is not executable")
|
raise DynamipsError("Dynamips {} is not executable".format(dynamips_path))
|
||||||
|
|
||||||
self._dynamips_path = dynamips_path
|
self._dynamips_path = dynamips_path
|
||||||
return dynamips_path
|
return dynamips_path
|
||||||
@ -439,9 +439,9 @@ class Dynamips(BaseManager):
|
|||||||
ethernet_device = nio_settings["ethernet_device"]
|
ethernet_device = nio_settings["ethernet_device"]
|
||||||
if sys.platform.startswith("win"):
|
if sys.platform.startswith("win"):
|
||||||
# replace the interface name by the GUID on Windows
|
# replace the interface name by the GUID on Windows
|
||||||
interfaces = get_windows_interfaces()
|
windows_interfaces = interfaces()
|
||||||
npf_interface = None
|
npf_interface = None
|
||||||
for interface in interfaces:
|
for interface in windows_interfaces:
|
||||||
if interface["name"] == ethernet_device:
|
if interface["name"] == ethernet_device:
|
||||||
npf_interface = interface["id"]
|
npf_interface = interface["id"]
|
||||||
if not npf_interface:
|
if not npf_interface:
|
||||||
@ -603,8 +603,8 @@ class Dynamips(BaseManager):
|
|||||||
elif startup_config_content:
|
elif startup_config_content:
|
||||||
startup_config_path = self._create_config(vm, default_startup_config_path, startup_config_content)
|
startup_config_path = self._create_config(vm, default_startup_config_path, startup_config_content)
|
||||||
yield from vm.set_configs(startup_config_path)
|
yield from vm.set_configs(startup_config_path)
|
||||||
# An empty startup config crash dynamips
|
elif os.path.isfile(default_startup_config_path) and os.path.getsize(default_startup_config_path) == 0:
|
||||||
else:
|
# An empty startup-config may crash Dynamips
|
||||||
startup_config_path = self._create_config(vm, default_startup_config_path, "!\n")
|
startup_config_path = self._create_config(vm, default_startup_config_path, "!\n")
|
||||||
yield from vm.set_configs(startup_config_path)
|
yield from vm.set_configs(startup_config_path)
|
||||||
|
|
||||||
@ -710,10 +710,12 @@ class Dynamips(BaseManager):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
image_dir = self.get_images_directory()
|
image_dir = self.get_images_directory()
|
||||||
|
if not os.path.exists(image_dir):
|
||||||
|
return []
|
||||||
try:
|
try:
|
||||||
files = os.listdir(image_dir)
|
files = os.listdir(image_dir)
|
||||||
except FileNotFoundError:
|
except OSError as e:
|
||||||
return []
|
raise DynamipsError("Can not list {}: {}".format(image_dir, str(e)))
|
||||||
files.sort()
|
files.sort()
|
||||||
images = []
|
images = []
|
||||||
for filename in files:
|
for filename in files:
|
||||||
@ -724,7 +726,6 @@ class Dynamips(BaseManager):
|
|||||||
# read the first 7 bytes of the file.
|
# read the first 7 bytes of the file.
|
||||||
elf_header_start = f.read(7)
|
elf_header_start = f.read(7)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
print(e)
|
|
||||||
continue
|
continue
|
||||||
# valid IOS images must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
# valid IOS images must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
||||||
if elf_header_start == b'\x7fELF\x01\x02\x01':
|
if elf_header_start == b'\x7fELF\x01\x02\x01':
|
||||||
|
@ -55,7 +55,6 @@ class DynamipsHypervisor:
|
|||||||
self._working_dir = working_dir
|
self._working_dir = working_dir
|
||||||
self._version = "N/A"
|
self._version = "N/A"
|
||||||
self._timeout = timeout
|
self._timeout = timeout
|
||||||
self._uuid = None
|
|
||||||
self._reader = None
|
self._reader = None
|
||||||
self._writer = None
|
self._writer = None
|
||||||
self._io_lock = asyncio.Lock()
|
self._io_lock = asyncio.Lock()
|
||||||
@ -99,8 +98,6 @@ class DynamipsHypervisor:
|
|||||||
except IndexError:
|
except IndexError:
|
||||||
self._version = "Unknown"
|
self._version = "Unknown"
|
||||||
|
|
||||||
self._uuid = yield from self.send("hypervisor uuid")
|
|
||||||
|
|
||||||
# this forces to send the working dir to Dynamips
|
# this forces to send the working dir to Dynamips
|
||||||
yield from self.set_working_dir(self._working_dir)
|
yield from self.set_working_dir(self._working_dir)
|
||||||
|
|
||||||
@ -174,16 +171,6 @@ class DynamipsHypervisor:
|
|||||||
|
|
||||||
return self._working_dir
|
return self._working_dir
|
||||||
|
|
||||||
@property
|
|
||||||
def uuid(self):
|
|
||||||
"""
|
|
||||||
Returns this hypervisor UUID.
|
|
||||||
|
|
||||||
:Returns: uuid string
|
|
||||||
"""
|
|
||||||
|
|
||||||
return self._uuid
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def devices(self):
|
def devices(self):
|
||||||
"""
|
"""
|
||||||
|
@ -19,9 +19,9 @@
|
|||||||
Represents a Dynamips hypervisor and starts/stops the associated Dynamips process.
|
Represents a Dynamips hypervisor and starts/stops the associated Dynamips process.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import tempfile
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from gns3server.utils.asyncio import wait_for_process_termination
|
from gns3server.utils.asyncio import wait_for_process_termination
|
||||||
@ -118,16 +118,22 @@ class Hypervisor(DynamipsHypervisor):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
self._command = self._build_command()
|
self._command = self._build_command()
|
||||||
|
env = os.environ.copy()
|
||||||
|
if sys.platform.startswith("win"):
|
||||||
|
# add the Npcap directory to $PATH to force Dynamips to use npcap DLL instead of Winpcap (if installed)
|
||||||
|
system_root = os.path.join(os.path.expandvars("%SystemRoot%"), "System32", "Npcap")
|
||||||
|
if os.path.isdir(system_root):
|
||||||
|
env["PATH"] = system_root + ';' + env["PATH"]
|
||||||
try:
|
try:
|
||||||
log.info("Starting Dynamips: {}".format(self._command))
|
log.info("Starting Dynamips: {}".format(self._command))
|
||||||
|
self._stdout_file = os.path.join(self.working_dir, "dynamips_i{}_stdout.txt".format(self._id))
|
||||||
with tempfile.NamedTemporaryFile(delete=False) as fd:
|
log.info("Dynamips process logging to {}".format(self._stdout_file))
|
||||||
self._stdout_file = fd.name
|
with open(self._stdout_file, "w", encoding="utf-8") as fd:
|
||||||
log.info("Dynamips process logging to {}".format(fd.name))
|
|
||||||
self._process = yield from asyncio.create_subprocess_exec(*self._command,
|
self._process = yield from asyncio.create_subprocess_exec(*self._command,
|
||||||
stdout=fd,
|
stdout=fd,
|
||||||
stderr=subprocess.STDOUT,
|
stderr=subprocess.STDOUT,
|
||||||
cwd=self._working_dir)
|
cwd=self._working_dir,
|
||||||
|
env=env)
|
||||||
log.info("Dynamips process started PID={}".format(self._process.pid))
|
log.info("Dynamips process started PID={}".format(self._process.pid))
|
||||||
self._started = True
|
self._started = True
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
|
@ -48,9 +48,9 @@ class NIOVDE(NIO):
|
|||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def create(self):
|
def create(self):
|
||||||
|
|
||||||
self._hypervisor.send("nio create_vde {name} {control} {local}".format(name=self._name,
|
yield from self._hypervisor.send("nio create_vde {name} {control} {local}".format(name=self._name,
|
||||||
control=self._control_file,
|
control=self._control_file,
|
||||||
local=self._local_file))
|
local=self._local_file))
|
||||||
|
|
||||||
log.info("NIO VDE {name} created with control={control}, local={local}".format(name=self._name,
|
log.info("NIO VDE {name} created with control={control}, local={local}".format(name=self._name,
|
||||||
control=self._control_file,
|
control=self._control_file,
|
||||||
|
@ -21,7 +21,7 @@ http://github.com/GNS3/dynamips/blob/master/README.hypervisor#L558
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from pkg_resources import parse_version
|
from gns3server.utils import parse_version
|
||||||
|
|
||||||
from .device import Device
|
from .device import Device
|
||||||
from ..nios.nio_udp import NIOUDP
|
from ..nios.nio_udp import NIOUDP
|
||||||
|
@ -61,12 +61,12 @@ class Router(BaseVM):
|
|||||||
|
|
||||||
def __init__(self, name, vm_id, project, manager, dynamips_id=None, console=None, aux=None, platform="c7200", hypervisor=None, ghost_flag=False):
|
def __init__(self, name, vm_id, project, manager, dynamips_id=None, console=None, aux=None, platform="c7200", hypervisor=None, ghost_flag=False):
|
||||||
|
|
||||||
super().__init__(name, vm_id, project, manager, console=console)
|
allocate_aux = manager.config.get_section_config("Dynamips").getboolean("allocate_aux_console_ports", False)
|
||||||
|
|
||||||
|
super().__init__(name, vm_id, project, manager, console=console, aux=aux, allocate_aux=aux)
|
||||||
|
|
||||||
self._hypervisor = hypervisor
|
self._hypervisor = hypervisor
|
||||||
self._dynamips_id = dynamips_id
|
self._dynamips_id = dynamips_id
|
||||||
self._closed = False
|
|
||||||
self._name = name
|
|
||||||
self._platform = platform
|
self._platform = platform
|
||||||
self._image = ""
|
self._image = ""
|
||||||
self._startup_config = ""
|
self._startup_config = ""
|
||||||
@ -88,7 +88,6 @@ class Router(BaseVM):
|
|||||||
self._disk0 = 0 # Megabytes
|
self._disk0 = 0 # Megabytes
|
||||||
self._disk1 = 0 # Megabytes
|
self._disk1 = 0 # Megabytes
|
||||||
self._auto_delete_disks = False
|
self._auto_delete_disks = False
|
||||||
self._aux = aux
|
|
||||||
self._mac_addr = ""
|
self._mac_addr = ""
|
||||||
self._system_id = "FTX0945W0MY" # processor board ID in IOS
|
self._system_id = "FTX0945W0MY" # processor board ID in IOS
|
||||||
self._slots = []
|
self._slots = []
|
||||||
@ -100,19 +99,12 @@ class Router(BaseVM):
|
|||||||
else:
|
else:
|
||||||
self._dynamips_id = dynamips_id
|
self._dynamips_id = dynamips_id
|
||||||
manager.take_dynamips_id(project.id, dynamips_id)
|
manager.take_dynamips_id(project.id, dynamips_id)
|
||||||
|
|
||||||
if self._aux is not None:
|
|
||||||
self._aux = self._manager.port_manager.reserve_tcp_port(self._aux, self._project)
|
|
||||||
else:
|
|
||||||
allocate_aux = self.manager.config.get_section_config("Dynamips").getboolean("allocate_aux_console_ports", False)
|
|
||||||
if allocate_aux:
|
|
||||||
self._aux = self._manager.port_manager.get_free_tcp_port(self._project)
|
|
||||||
else:
|
else:
|
||||||
log.info("Creating a new ghost IOS instance")
|
log.info("Creating a new ghost IOS instance")
|
||||||
if self._console:
|
if self._console:
|
||||||
# Ghost VMs do not need a console port.
|
# Ghost VMs do not need a console port.
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
self.console = None
|
||||||
self._console = None
|
|
||||||
self._dynamips_id = 0
|
self._dynamips_id = 0
|
||||||
self._name = "Ghost"
|
self._name = "Ghost"
|
||||||
|
|
||||||
@ -140,8 +132,8 @@ class Router(BaseVM):
|
|||||||
"disk0": self._disk0,
|
"disk0": self._disk0,
|
||||||
"disk1": self._disk1,
|
"disk1": self._disk1,
|
||||||
"auto_delete_disks": self._auto_delete_disks,
|
"auto_delete_disks": self._auto_delete_disks,
|
||||||
"console": self._console,
|
"console": self.console,
|
||||||
"aux": self._aux,
|
"aux": self.aux,
|
||||||
"mac_addr": self._mac_addr,
|
"mac_addr": self._mac_addr,
|
||||||
"system_id": self._system_id}
|
"system_id": self._system_id}
|
||||||
|
|
||||||
@ -195,8 +187,8 @@ class Router(BaseVM):
|
|||||||
|
|
||||||
yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self._console))
|
yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self._console))
|
||||||
|
|
||||||
if self._aux is not None:
|
if self.aux is not None:
|
||||||
yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=self._aux))
|
yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=self.aux))
|
||||||
|
|
||||||
# get the default base MAC address
|
# get the default base MAC address
|
||||||
mac_addr = yield from self._hypervisor.send('{platform} get_mac_addr "{name}"'.format(platform=self._platform,
|
mac_addr = yield from self._hypervisor.send('{platform} get_mac_addr "{name}"'.format(platform=self._platform,
|
||||||
@ -278,7 +270,10 @@ class Router(BaseVM):
|
|||||||
|
|
||||||
status = yield from self.get_status()
|
status = yield from self.get_status()
|
||||||
if status != "inactive":
|
if status != "inactive":
|
||||||
yield from self._hypervisor.send('vm stop "{name}"'.format(name=self._name))
|
try:
|
||||||
|
yield from self._hypervisor.send('vm stop "{name}"'.format(name=self._name))
|
||||||
|
except DynamipsError as e:
|
||||||
|
log.warn("Could not stop {}: {}".format(self._name, e))
|
||||||
self.status = "stopped"
|
self.status = "stopped"
|
||||||
log.info('Router "{name}" [{id}] has been stopped'.format(name=self._name, id=self._id))
|
log.info('Router "{name}" [{id}] has been stopped'.format(name=self._name, id=self._id))
|
||||||
yield from self.save_configs()
|
yield from self.save_configs()
|
||||||
@ -328,19 +323,8 @@ class Router(BaseVM):
|
|||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def close(self):
|
def close(self):
|
||||||
|
|
||||||
if self._closed:
|
if not (yield from super().close()):
|
||||||
# router is already closed
|
return False
|
||||||
return
|
|
||||||
|
|
||||||
log.debug('Router "{name}" [{id}] is closing'.format(name=self._name, id=self._id))
|
|
||||||
|
|
||||||
if self._console:
|
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
|
||||||
self._console = None
|
|
||||||
|
|
||||||
if self._aux:
|
|
||||||
self._manager.port_manager.release_tcp_port(self._aux, self._project)
|
|
||||||
self._aux = None
|
|
||||||
|
|
||||||
for adapter in self._slots:
|
for adapter in self._slots:
|
||||||
if adapter is not None:
|
if adapter is not None:
|
||||||
@ -354,8 +338,8 @@ class Router(BaseVM):
|
|||||||
try:
|
try:
|
||||||
yield from self.stop()
|
yield from self.stop()
|
||||||
yield from self._hypervisor.send('vm delete "{}"'.format(self._name))
|
yield from self._hypervisor.send('vm delete "{}"'.format(self._name))
|
||||||
except DynamipsError:
|
except DynamipsError as e:
|
||||||
pass
|
log.warn("Could not stop and delete {}: {}".format(self._name, e))
|
||||||
yield from self.hypervisor.stop()
|
yield from self.hypervisor.stop()
|
||||||
|
|
||||||
if self._auto_delete_disks:
|
if self._auto_delete_disks:
|
||||||
@ -367,7 +351,7 @@ class Router(BaseVM):
|
|||||||
files += glob.glob(os.path.join(glob.escape(project_dir), "{}_i{}_flash[0-1]".format(self.platform, self.dynamips_id)))
|
files += glob.glob(os.path.join(glob.escape(project_dir), "{}_i{}_flash[0-1]".format(self.platform, self.dynamips_id)))
|
||||||
files += glob.glob(os.path.join(glob.escape(project_dir), "{}_i{}_rom".format(self.platform, self.dynamips_id)))
|
files += glob.glob(os.path.join(glob.escape(project_dir), "{}_i{}_rom".format(self.platform, self.dynamips_id)))
|
||||||
files += glob.glob(os.path.join(glob.escape(project_dir), "{}_i{}_bootflash".format(self.platform, self.dynamips_id)))
|
files += glob.glob(os.path.join(glob.escape(project_dir), "{}_i{}_bootflash".format(self.platform, self.dynamips_id)))
|
||||||
files += glob.glob(os.path.join(glob.escape(project_dir), "{}_i{}_ssa").format(self.platform, self.dynamips_id))
|
files += glob.glob(os.path.join(glob.escape(project_dir), "{}_i{}_ssa".format(self.platform, self.dynamips_id)))
|
||||||
for file in files:
|
for file in files:
|
||||||
try:
|
try:
|
||||||
log.debug("Deleting file {}".format(file))
|
log.debug("Deleting file {}".format(file))
|
||||||
@ -375,7 +359,6 @@ class Router(BaseVM):
|
|||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.warn("Could not delete file {}: {}".format(file, e))
|
log.warn("Could not delete file {}: {}".format(file, e))
|
||||||
continue
|
continue
|
||||||
self._closed = True
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def platform(self):
|
def platform(self):
|
||||||
@ -913,25 +896,8 @@ class Router(BaseVM):
|
|||||||
:param console: console port (integer)
|
:param console: console port (integer)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=console))
|
self.console = console
|
||||||
|
yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self.console))
|
||||||
log.info('Router "{name}" [{id}]: console port updated from {old_console} to {new_console}'.format(name=self._name,
|
|
||||||
id=self._id,
|
|
||||||
old_console=self._console,
|
|
||||||
new_console=console))
|
|
||||||
|
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
|
||||||
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def aux(self):
|
|
||||||
"""
|
|
||||||
Returns the TCP auxiliary port.
|
|
||||||
|
|
||||||
:returns: console auxiliary port (integer)
|
|
||||||
"""
|
|
||||||
|
|
||||||
return self._aux
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def set_aux(self, aux):
|
def set_aux(self, aux):
|
||||||
@ -941,16 +907,9 @@ class Router(BaseVM):
|
|||||||
:param aux: console auxiliary port (integer)
|
:param aux: console auxiliary port (integer)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
self.aux = aux
|
||||||
yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=aux))
|
yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=aux))
|
||||||
|
|
||||||
log.info('Router "{name}" [{id}]: aux port updated from {old_aux} to {new_aux}'.format(name=self._name,
|
|
||||||
id=self._id,
|
|
||||||
old_aux=self._aux,
|
|
||||||
new_aux=aux))
|
|
||||||
|
|
||||||
self._manager.port_manager.release_tcp_port(self._aux, self._project)
|
|
||||||
self._aux = self._manager.port_manager.reserve_tcp_port(aux, self._project)
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def get_cpu_usage(self, cpu_id=0):
|
def get_cpu_usage(self, cpu_id=0):
|
||||||
"""
|
"""
|
||||||
@ -1560,12 +1519,18 @@ class Router(BaseVM):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if self.startup_config or self.private_config:
|
if self.startup_config or self.private_config:
|
||||||
|
|
||||||
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
|
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
|
||||||
|
try:
|
||||||
|
config_path = os.path.join(module_workdir, "configs")
|
||||||
|
os.makedirs(config_path, exist_ok=True)
|
||||||
|
except OSError as e:
|
||||||
|
raise DynamipsError("Could could not create configuration directory {}: {}".format(config_path, e))
|
||||||
|
|
||||||
startup_config_base64, private_config_base64 = yield from self.extract_config()
|
startup_config_base64, private_config_base64 = yield from self.extract_config()
|
||||||
if startup_config_base64:
|
if startup_config_base64:
|
||||||
if not self.startup_config:
|
if not self.startup_config:
|
||||||
self._startup_config = os.path.join("configs", "i{}_startup-config.cfg".format(self._dynamips_id))
|
self._startup_config = os.path.join("configs", "i{}_startup-config.cfg".format(self._dynamips_id))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
config = base64.b64decode(startup_config_base64).decode("utf-8", errors="replace")
|
config = base64.b64decode(startup_config_base64).decode("utf-8", errors="replace")
|
||||||
config = "!\n" + config.replace("\r", "")
|
config = "!\n" + config.replace("\r", "")
|
||||||
@ -1576,13 +1541,11 @@ class Router(BaseVM):
|
|||||||
except (binascii.Error, OSError) as e:
|
except (binascii.Error, OSError) as e:
|
||||||
raise DynamipsError("Could not save the startup configuration {}: {}".format(config_path, e))
|
raise DynamipsError("Could not save the startup configuration {}: {}".format(config_path, e))
|
||||||
|
|
||||||
if private_config_base64:
|
if private_config_base64 and base64.b64decode(private_config_base64) != b'\nkerberos password \nend\n':
|
||||||
if not self.private_config:
|
if not self.private_config:
|
||||||
self._private_config = os.path.join("configs", "i{}_private-config.cfg".format(self._dynamips_id))
|
self._private_config = os.path.join("configs", "i{}_private-config.cfg".format(self._dynamips_id))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
config = base64.b64decode(private_config_base64).decode("utf-8", errors="replace")
|
config = base64.b64decode(private_config_base64).decode("utf-8", errors="replace")
|
||||||
config = "!\n" + config.replace("\r", "")
|
|
||||||
config_path = os.path.join(module_workdir, self.private_config)
|
config_path = os.path.join(module_workdir, self.private_config)
|
||||||
with open(config_path, "wb") as f:
|
with open(config_path, "wb") as f:
|
||||||
log.info("saving private-config to {}".format(self.private_config))
|
log.info("saving private-config to {}".format(self.private_config))
|
||||||
|
@ -72,7 +72,6 @@ class IOUVM(BaseVM):
|
|||||||
|
|
||||||
super().__init__(name, vm_id, project, manager, console=console)
|
super().__init__(name, vm_id, project, manager, console=console)
|
||||||
|
|
||||||
self._command = []
|
|
||||||
self._iouyap_process = None
|
self._iouyap_process = None
|
||||||
self._iou_process = None
|
self._iou_process = None
|
||||||
self._iou_stdout_file = ""
|
self._iou_stdout_file = ""
|
||||||
@ -98,11 +97,8 @@ class IOUVM(BaseVM):
|
|||||||
Closes this IOU VM.
|
Closes this IOU VM.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
log.debug('IOU "{name}" [{id}] is closing'.format(name=self._name, id=self._id))
|
if not (yield from super().close()):
|
||||||
|
return False
|
||||||
if self._console:
|
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
|
||||||
self._console = None
|
|
||||||
|
|
||||||
adapters = self._ethernet_adapters + self._serial_adapters
|
adapters = self._ethernet_adapters + self._serial_adapters
|
||||||
for adapter in adapters:
|
for adapter in adapters:
|
||||||
@ -112,7 +108,6 @@ class IOUVM(BaseVM):
|
|||||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||||
|
|
||||||
yield from self.stop()
|
yield from self.stop()
|
||||||
self.save_configs()
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def path(self):
|
def path(self):
|
||||||
@ -135,7 +130,7 @@ class IOUVM(BaseVM):
|
|||||||
self._path = self.manager.get_abs_image_path(path)
|
self._path = self.manager.get_abs_image_path(path)
|
||||||
|
|
||||||
# In 1.2 users uploaded images to the images roots
|
# In 1.2 users uploaded images to the images roots
|
||||||
# after the migration their images are inside images/IOU
|
# after the migration their images are inside images/IOU
|
||||||
# but old topologies use old path
|
# but old topologies use old path
|
||||||
if "IOU" not in self._path:
|
if "IOU" not in self._path:
|
||||||
location, filename = os.path.split(self._path)
|
location, filename = os.path.split(self._path)
|
||||||
@ -220,7 +215,8 @@ class IOUVM(BaseVM):
|
|||||||
"startup_config": self.relative_startup_config_file,
|
"startup_config": self.relative_startup_config_file,
|
||||||
"private_config": self.relative_private_config_file,
|
"private_config": self.relative_private_config_file,
|
||||||
"iourc_path": self.iourc_path,
|
"iourc_path": self.iourc_path,
|
||||||
"use_default_iou_values": self._use_default_iou_values}
|
"use_default_iou_values": self._use_default_iou_values,
|
||||||
|
"command_line": self.command_line}
|
||||||
|
|
||||||
# return the relative path if the IOU image is in the images_path directory
|
# return the relative path if the IOU image is in the images_path directory
|
||||||
iou_vm_info["path"] = self.manager.get_relative_image_path(self.path)
|
iou_vm_info["path"] = self.manager.get_relative_image_path(self.path)
|
||||||
@ -234,9 +230,11 @@ class IOUVM(BaseVM):
|
|||||||
:returns: path to IOUYAP
|
:returns: path to IOUYAP
|
||||||
"""
|
"""
|
||||||
|
|
||||||
path = self._manager.config.get_section_config("IOU").get("iouyap_path", "iouyap")
|
search_path = self._manager.config.get_section_config("IOU").get("iouyap_path", "iouyap")
|
||||||
if path == "iouyap":
|
path = shutil.which(search_path)
|
||||||
path = shutil.which("iouyap")
|
# shutil.which return None if the path doesn't exists
|
||||||
|
if not path:
|
||||||
|
return search_path
|
||||||
return path
|
return path
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -403,9 +401,9 @@ class IOUVM(BaseVM):
|
|||||||
raise IOUError("Hostname \"{}\" not found in iourc file {}".format(hostname, self.iourc_path))
|
raise IOUError("Hostname \"{}\" not found in iourc file {}".format(hostname, self.iourc_path))
|
||||||
user_ioukey = config["license"][hostname]
|
user_ioukey = config["license"][hostname]
|
||||||
if user_ioukey[-1:] != ';':
|
if user_ioukey[-1:] != ';':
|
||||||
raise IOUError("IOU key not ending with ; in iourc file".format(self.iourc_path))
|
raise IOUError("IOU key not ending with ; in iourc file {}".format(self.iourc_path))
|
||||||
if len(user_ioukey) != 17:
|
if len(user_ioukey) != 17:
|
||||||
raise IOUError("IOU key length is not 16 characters in iourc file".format(self.iourc_path))
|
raise IOUError("IOU key length is not 16 characters in iourc file {}".format(self.iourc_path))
|
||||||
user_ioukey = user_ioukey[:16]
|
user_ioukey = user_ioukey[:16]
|
||||||
|
|
||||||
# We can't test this because it's mean distributing a valid licence key
|
# We can't test this because it's mean distributing a valid licence key
|
||||||
@ -502,13 +500,14 @@ class IOUVM(BaseVM):
|
|||||||
|
|
||||||
if "IOURC" not in os.environ:
|
if "IOURC" not in os.environ:
|
||||||
env["IOURC"] = iourc_path
|
env["IOURC"] = iourc_path
|
||||||
self._command = yield from self._build_command()
|
command = yield from self._build_command()
|
||||||
try:
|
try:
|
||||||
log.info("Starting IOU: {}".format(self._command))
|
log.info("Starting IOU: {}".format(command))
|
||||||
self._iou_stdout_file = os.path.join(self.working_dir, "iou.log")
|
self._iou_stdout_file = os.path.join(self.working_dir, "iou.log")
|
||||||
log.info("Logging to {}".format(self._iou_stdout_file))
|
log.info("Logging to {}".format(self._iou_stdout_file))
|
||||||
with open(self._iou_stdout_file, "w", encoding="utf-8") as fd:
|
with open(self._iou_stdout_file, "w", encoding="utf-8") as fd:
|
||||||
self._iou_process = yield from asyncio.create_subprocess_exec(*self._command,
|
self.command_line = ' '.join(command)
|
||||||
|
self._iou_process = yield from asyncio.create_subprocess_exec(*command,
|
||||||
stdout=fd,
|
stdout=fd,
|
||||||
stderr=subprocess.STDOUT,
|
stderr=subprocess.STDOUT,
|
||||||
cwd=self.working_dir,
|
cwd=self.working_dir,
|
||||||
@ -537,14 +536,19 @@ class IOUVM(BaseVM):
|
|||||||
:param returncode: Process returncode
|
:param returncode: Process returncode
|
||||||
"""
|
"""
|
||||||
|
|
||||||
log.info("{} process has stopped, return code: {}".format(process_name, returncode))
|
|
||||||
self._terminate_process_iou()
|
self._terminate_process_iou()
|
||||||
self._terminate_process_iouyap()
|
self._terminate_process_iouyap()
|
||||||
self._ioucon_thread_stop_event.set()
|
self._ioucon_thread_stop_event.set()
|
||||||
|
|
||||||
if returncode != 0:
|
if returncode != 0:
|
||||||
self.project.emit("log.error", {"message": "{} process has stopped, return code: {}\n{}".format(process_name,
|
log.info("{} process has stopped, return code: {}".format(process_name, returncode))
|
||||||
returncode,
|
else:
|
||||||
self.read_iou_stdout())})
|
if returncode == 11:
|
||||||
|
message = "{} process has stopped, return code: {}. This could be an issue with the image using a different image can fix the issue.\n{}".format(process_name, returncode, self.read_iou_stdout())
|
||||||
|
else:
|
||||||
|
message = "{} process has stopped, return code: {}\n{}".format(process_name, returncode, self.read_iou_stdout())
|
||||||
|
log.warn(message)
|
||||||
|
self.project.emit("log.error", {"message": message})
|
||||||
|
|
||||||
def _rename_nvram_file(self):
|
def _rename_nvram_file(self):
|
||||||
"""
|
"""
|
||||||
@ -666,7 +670,10 @@ class IOUVM(BaseVM):
|
|||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
if self._iou_process.returncode is None:
|
if self._iou_process.returncode is None:
|
||||||
log.warn("IOU process {} is still running... killing it".format(self._iou_process.pid))
|
log.warn("IOU process {} is still running... killing it".format(self._iou_process.pid))
|
||||||
self._iou_process.kill()
|
try:
|
||||||
|
self._iou_process.kill()
|
||||||
|
except ProcessLookupError:
|
||||||
|
pass
|
||||||
self._iou_process = None
|
self._iou_process = None
|
||||||
|
|
||||||
if self.is_iouyap_running():
|
if self.is_iouyap_running():
|
||||||
@ -683,6 +690,7 @@ class IOUVM(BaseVM):
|
|||||||
self._iouyap_process = None
|
self._iouyap_process = None
|
||||||
|
|
||||||
self._started = False
|
self._started = False
|
||||||
|
self.save_configs()
|
||||||
|
|
||||||
def _terminate_process_iouyap(self):
|
def _terminate_process_iouyap(self):
|
||||||
"""
|
"""
|
||||||
@ -912,6 +920,7 @@ class IOUVM(BaseVM):
|
|||||||
|
|
||||||
self._adapters = self._ethernet_adapters + self._serial_adapters
|
self._adapters = self._ethernet_adapters + self._serial_adapters
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
def adapter_add_nio_binding(self, adapter_number, port_number, nio):
|
def adapter_add_nio_binding(self, adapter_number, port_number, nio):
|
||||||
"""
|
"""
|
||||||
Adds a adapter NIO binding.
|
Adds a adapter NIO binding.
|
||||||
@ -944,6 +953,7 @@ class IOUVM(BaseVM):
|
|||||||
except ProcessLookupError:
|
except ProcessLookupError:
|
||||||
log.error("Could not update iouyap configuration: process (PID={}) not found".format(self._iouyap_process.pid))
|
log.error("Could not update iouyap configuration: process (PID={}) not found".format(self._iouyap_process.pid))
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
def adapter_remove_nio_binding(self, adapter_number, port_number):
|
def adapter_remove_nio_binding(self, adapter_number, port_number):
|
||||||
"""
|
"""
|
||||||
Removes an adapter NIO binding.
|
Removes an adapter NIO binding.
|
||||||
@ -1064,6 +1074,14 @@ class IOUVM(BaseVM):
|
|||||||
else:
|
else:
|
||||||
startup_config = startup_config.replace("%h", self._name)
|
startup_config = startup_config.replace("%h", self._name)
|
||||||
f.write(startup_config)
|
f.write(startup_config)
|
||||||
|
|
||||||
|
vlan_file = os.path.join(self.working_dir, "vlan.dat-{:05d}".format(self.application_id))
|
||||||
|
if os.path.exists(vlan_file):
|
||||||
|
try:
|
||||||
|
os.remove(vlan_file)
|
||||||
|
except OSError as e:
|
||||||
|
log.error("Could not delete VLAN file '{}': {}".format(vlan_file, e))
|
||||||
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise IOUError("Can't write startup-config file '{}': {}".format(startup_config_path, e))
|
raise IOUError("Can't write startup-config file '{}': {}".format(startup_config_path, e))
|
||||||
|
|
||||||
@ -1097,7 +1115,7 @@ class IOUVM(BaseVM):
|
|||||||
if private_config is None:
|
if private_config is None:
|
||||||
private_config = ''
|
private_config = ''
|
||||||
|
|
||||||
# We disallow erasing the startup config file
|
# We disallow erasing the private config file
|
||||||
if len(private_config) == 0 and os.path.exists(private_config_path):
|
if len(private_config) == 0 and os.path.exists(private_config_path):
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -1188,7 +1206,7 @@ class IOUVM(BaseVM):
|
|||||||
try:
|
try:
|
||||||
startup_config_content, private_config_content = nvram_export(nvram_content)
|
startup_config_content, private_config_content = nvram_export(nvram_content)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
log.warning("Could not export configs from nvram file".format(nvram_file, e))
|
log.warning("Could not export configs from nvram file {}: {}".format(nvram_file, e))
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
return startup_config_content, private_config_content
|
return startup_config_content, private_config_content
|
||||||
@ -1204,18 +1222,16 @@ class IOUVM(BaseVM):
|
|||||||
config_path = os.path.join(self.working_dir, "startup-config.cfg")
|
config_path = os.path.join(self.working_dir, "startup-config.cfg")
|
||||||
try:
|
try:
|
||||||
config = startup_config_content.decode("utf-8", errors="replace")
|
config = startup_config_content.decode("utf-8", errors="replace")
|
||||||
config = "!\n" + config.replace("\r", "")
|
|
||||||
with open(config_path, "wb") as f:
|
with open(config_path, "wb") as f:
|
||||||
log.info("saving startup-config to {}".format(config_path))
|
log.info("saving startup-config to {}".format(config_path))
|
||||||
f.write(config.encode("utf-8"))
|
f.write(config.encode("utf-8"))
|
||||||
except (binascii.Error, OSError) as e:
|
except (binascii.Error, OSError) as e:
|
||||||
raise IOUError("Could not save the startup configuration {}: {}".format(config_path, e))
|
raise IOUError("Could not save the startup configuration {}: {}".format(config_path, e))
|
||||||
|
|
||||||
if private_config_content:
|
if private_config_content and private_config_content != b'\nend\n':
|
||||||
config_path = os.path.join(self.working_dir, "private-config.cfg")
|
config_path = os.path.join(self.working_dir, "private-config.cfg")
|
||||||
try:
|
try:
|
||||||
config = private_config_content.decode("utf-8", errors="replace")
|
config = private_config_content.decode("utf-8", errors="replace")
|
||||||
config = "!\n" + config.replace("\r", "")
|
|
||||||
with open(config_path, "wb") as f:
|
with open(config_path, "wb") as f:
|
||||||
log.info("saving private-config to {}".format(config_path))
|
log.info("saving private-config to {}".format(config_path))
|
||||||
f.write(config.encode("utf-8"))
|
f.write(config.encode("utf-8"))
|
||||||
@ -1254,10 +1270,10 @@ class IOUVM(BaseVM):
|
|||||||
|
|
||||||
nio.startPacketCapture(output_file, data_link_type)
|
nio.startPacketCapture(output_file, data_link_type)
|
||||||
log.info('IOU "{name}" [{id}]: starting packet capture on {adapter_number}/{port_number} to {output_file}'.format(name=self._name,
|
log.info('IOU "{name}" [{id}]: starting packet capture on {adapter_number}/{port_number} to {output_file}'.format(name=self._name,
|
||||||
id=self._id,
|
id=self._id,
|
||||||
adapter_number=adapter_number,
|
adapter_number=adapter_number,
|
||||||
port_number=port_number,
|
port_number=port_number,
|
||||||
output_file=output_file))
|
output_file=output_file))
|
||||||
|
|
||||||
if self.is_iouyap_running():
|
if self.is_iouyap_running():
|
||||||
self._update_iouyap_config()
|
self._update_iouyap_config()
|
||||||
|
@ -550,6 +550,8 @@ def send_recv_loop(epoll, console, router, esc_char, stop_event):
|
|||||||
esc_state = True
|
esc_state = True
|
||||||
else:
|
else:
|
||||||
router.write(buf)
|
router.write(buf)
|
||||||
|
except ConnectionError as e:
|
||||||
|
pass
|
||||||
finally:
|
finally:
|
||||||
router.unregister(epoll)
|
router.unregister(epoll)
|
||||||
console.unregister(epoll)
|
console.unregister(epoll)
|
||||||
|
@ -24,6 +24,10 @@ import logging
|
|||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# This ports are disallowed by Chrome and Firefox to avoid trouble with skip them
|
||||||
|
BANNED_PORTS = set((1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87, 95, 101, 102, 103, 104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139, 143, 179, 389, 465, 512, 513, 514, 515, 526, 530, 531, 532, 540, 556, 563, 587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667, 6668, 6669))
|
||||||
|
|
||||||
|
|
||||||
class PortManager:
|
class PortManager:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -42,8 +46,8 @@ class PortManager:
|
|||||||
server_config = Config.instance().get_section_config("Server")
|
server_config = Config.instance().get_section_config("Server")
|
||||||
remote_console_connections = server_config.getboolean("allow_remote_console")
|
remote_console_connections = server_config.getboolean("allow_remote_console")
|
||||||
|
|
||||||
console_start_port_range = server_config.getint("console_start_port_range", 2001)
|
console_start_port_range = server_config.getint("console_start_port_range", 5000)
|
||||||
console_end_port_range = server_config.getint("console_end_port_range", 7000)
|
console_end_port_range = server_config.getint("console_end_port_range", 10000)
|
||||||
self._console_port_range = (console_start_port_range, console_end_port_range)
|
self._console_port_range = (console_start_port_range, console_end_port_range)
|
||||||
log.debug("Console port range is {}-{}".format(console_start_port_range, console_end_port_range))
|
log.debug("Console port range is {}-{}".format(console_start_port_range, console_end_port_range))
|
||||||
|
|
||||||
@ -102,7 +106,7 @@ class PortManager:
|
|||||||
return self._udp_host
|
return self._udp_host
|
||||||
|
|
||||||
@udp_host.setter
|
@udp_host.setter
|
||||||
def host(self, new_host):
|
def udp_host(self, new_host):
|
||||||
|
|
||||||
self._udp_host = new_host
|
self._udp_host = new_host
|
||||||
|
|
||||||
@ -142,16 +146,16 @@ class PortManager:
|
|||||||
if end_port < start_port:
|
if end_port < start_port:
|
||||||
raise HTTPConflict(text="Invalid port range {}-{}".format(start_port, end_port))
|
raise HTTPConflict(text="Invalid port range {}-{}".format(start_port, end_port))
|
||||||
|
|
||||||
|
|
||||||
last_exception = None
|
last_exception = None
|
||||||
for port in range(start_port, end_port + 1):
|
for port in range(start_port, end_port + 1):
|
||||||
if port in ignore_ports:
|
if port in ignore_ports or port in BANNED_PORTS:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
last_exception
|
|
||||||
try:
|
try:
|
||||||
PortManager._check_port(host, port, socket_type)
|
PortManager._check_port(host, port, socket_type)
|
||||||
return port
|
if host != "0.0.0.0":
|
||||||
|
PortManager._check_port("0.0.0.0", port, socket_type)
|
||||||
|
return port
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
last_exception = e
|
last_exception = e
|
||||||
if port + 1 == end_port:
|
if port + 1 == end_port:
|
||||||
@ -163,6 +167,7 @@ class PortManager:
|
|||||||
end_port,
|
end_port,
|
||||||
host,
|
host,
|
||||||
last_exception))
|
last_exception))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _check_port(host, port, socket_type):
|
def _check_port(host, port, socket_type):
|
||||||
"""
|
"""
|
||||||
@ -182,7 +187,6 @@ class PortManager:
|
|||||||
s.bind(sa) # the port is available if bind is a success
|
s.bind(sa) # the port is available if bind is a success
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def get_free_tcp_port(self, project, port_range_start=None, port_range_end=None):
|
def get_free_tcp_port(self, project, port_range_start=None, port_range_end=None):
|
||||||
"""
|
"""
|
||||||
Get an available TCP port and reserve it
|
Get an available TCP port and reserve it
|
||||||
@ -227,15 +231,15 @@ class PortManager:
|
|||||||
old_port = port
|
old_port = port
|
||||||
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
|
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
|
||||||
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
|
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
|
||||||
log.warning(msg)
|
log.debug(msg)
|
||||||
project.emit("log.warning", {"message": msg})
|
#project.emit("log.warning", {"message": msg})
|
||||||
return port
|
return port
|
||||||
if port < self._console_port_range[0] or port > self._console_port_range[1]:
|
if port < port_range_start or port > port_range_end:
|
||||||
old_port = port
|
old_port = port
|
||||||
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
|
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
|
||||||
msg = "TCP port {} is outside the range {}-{} on host {}. Port has been replaced by {}".format(old_port, port_range_start, port_range_end, self._console_host, port)
|
msg = "TCP port {} is outside the range {}-{} on host {}. Port has been replaced by {}".format(old_port, port_range_start, port_range_end, self._console_host, port)
|
||||||
log.warning(msg)
|
log.debug(msg)
|
||||||
project.emit("log.warning", {"message": msg})
|
#project.emit("log.warning", {"message": msg})
|
||||||
return port
|
return port
|
||||||
try:
|
try:
|
||||||
PortManager._check_port(self._console_host, port, "TCP")
|
PortManager._check_port(self._console_host, port, "TCP")
|
||||||
@ -243,8 +247,8 @@ class PortManager:
|
|||||||
old_port = port
|
old_port = port
|
||||||
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
|
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
|
||||||
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
|
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
|
||||||
log.warning(msg)
|
log.debug(msg)
|
||||||
project.emit("log.warning", {"message": msg})
|
#project.emit("log.warning", {"message": msg})
|
||||||
return port
|
return port
|
||||||
|
|
||||||
self._used_tcp_ports.add(port)
|
self._used_tcp_ports.add(port)
|
||||||
@ -291,7 +295,7 @@ class PortManager:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if port in self._used_udp_ports:
|
if port in self._used_udp_ports:
|
||||||
raise HTTPConflict(text="UDP port {} already in use on host".format(port, self._console_host))
|
raise HTTPConflict(text="UDP port {} already in use on host {}".format(port, self._console_host))
|
||||||
if port < self._udp_port_range[0] or port > self._udp_port_range[1]:
|
if port < self._udp_port_range[0] or port > self._udp_port_range[1]:
|
||||||
raise HTTPConflict(text="UDP port {} is outside the range {}-{}".format(port, self._udp_port_range[0], self._udp_port_range[1]))
|
raise HTTPConflict(text="UDP port {} is outside the range {}-{}".format(port, self._udp_port_range[0], self._udp_port_range[1]))
|
||||||
self._used_udp_ports.add(port)
|
self._used_udp_ports.add(port)
|
||||||
|
@ -15,17 +15,21 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import aiohttp
|
|
||||||
import os
|
import os
|
||||||
|
import aiohttp
|
||||||
import shutil
|
import shutil
|
||||||
import asyncio
|
import asyncio
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import zipstream
|
||||||
|
import zipfile
|
||||||
|
import json
|
||||||
|
|
||||||
from uuid import UUID, uuid4
|
from uuid import UUID, uuid4
|
||||||
from .port_manager import PortManager
|
from .port_manager import PortManager
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
from ..utils.asyncio import wait_run_in_executor
|
from ..utils.asyncio import wait_run_in_executor
|
||||||
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -141,7 +145,10 @@ class Project:
|
|||||||
|
|
||||||
if hasattr(self, "_path"):
|
if hasattr(self, "_path"):
|
||||||
if path != self._path and self.is_local() is False:
|
if path != self._path and self.is_local() is False:
|
||||||
raise aiohttp.web.HTTPForbidden(text="You are not allowed to modify the project directory location")
|
raise aiohttp.web.HTTPForbidden(text="You are not allowed to modify the project directory path")
|
||||||
|
|
||||||
|
if '"' in path:
|
||||||
|
raise aiohttp.web.HTTPForbidden(text="You are not allowed to use \" in the project directory path. It's not supported by Dynamips.")
|
||||||
|
|
||||||
self._path = path
|
self._path = path
|
||||||
self._update_temporary_file()
|
self._update_temporary_file()
|
||||||
@ -166,6 +173,8 @@ class Project:
|
|||||||
@name.setter
|
@name.setter
|
||||||
def name(self, name):
|
def name(self, name):
|
||||||
|
|
||||||
|
if "/" in name or "\\" in name:
|
||||||
|
raise aiohttp.web.HTTPForbidden(text="Name can not contain path separator")
|
||||||
self._name = name
|
self._name = name
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -425,7 +434,10 @@ class Project:
|
|||||||
path = os.path.join(directory, project)
|
path = os.path.join(directory, project)
|
||||||
if os.path.exists(os.path.join(path, ".gns3_temporary")):
|
if os.path.exists(os.path.join(path, ".gns3_temporary")):
|
||||||
log.warning("Purge old temporary project {}".format(project))
|
log.warning("Purge old temporary project {}".format(project))
|
||||||
shutil.rmtree(path)
|
try:
|
||||||
|
shutil.rmtree(path)
|
||||||
|
except OSError as e:
|
||||||
|
log.error("Error when cleaning {}: {}".format(path, str(e)))
|
||||||
|
|
||||||
def modules(self):
|
def modules(self):
|
||||||
"""
|
"""
|
||||||
@ -504,3 +516,183 @@ class Project:
|
|||||||
break
|
break
|
||||||
m.update(buf)
|
m.update(buf)
|
||||||
return m.hexdigest()
|
return m.hexdigest()
|
||||||
|
|
||||||
|
def export(self, include_images=False):
|
||||||
|
"""
|
||||||
|
Export the project as zip. It's a ZipStream object.
|
||||||
|
The file will be read chunk by chunk when you iterate on
|
||||||
|
the zip.
|
||||||
|
|
||||||
|
It will ignore some files like snapshots and
|
||||||
|
|
||||||
|
:returns: ZipStream object
|
||||||
|
"""
|
||||||
|
|
||||||
|
z = zipstream.ZipFile(allowZip64=True)
|
||||||
|
# topdown allo to modify the list of directory in order to ignore
|
||||||
|
# directory
|
||||||
|
for root, dirs, files in os.walk(self._path, topdown=True):
|
||||||
|
# Remove snapshots and capture
|
||||||
|
if os.path.split(root)[-1:][0] == "project-files":
|
||||||
|
dirs[:] = [d for d in dirs if d not in ("snapshots", "captures")]
|
||||||
|
|
||||||
|
# Ignore log files and OS noise
|
||||||
|
files = [f for f in files if not f.endswith('_log.txt') and not f.endswith('.log') and f != '.DS_Store']
|
||||||
|
|
||||||
|
for file in files:
|
||||||
|
path = os.path.join(root, file)
|
||||||
|
# Try open the file
|
||||||
|
try:
|
||||||
|
open(path).close()
|
||||||
|
except OSError as e:
|
||||||
|
msg = "Could not export file {}: {}".format(path, e)
|
||||||
|
log.warn(msg)
|
||||||
|
self.emit("log.warning", {"message": msg})
|
||||||
|
continue
|
||||||
|
# We rename the .gns3 project.gns3 to avoid the task to the client to guess the file name
|
||||||
|
if file.endswith(".gns3"):
|
||||||
|
self._export_project_file(path, z, include_images)
|
||||||
|
else:
|
||||||
|
# We merge the data from all server in the same project-files directory
|
||||||
|
vm_directory = os.path.join(self._path, "servers", "vm")
|
||||||
|
if os.path.commonprefix([root, vm_directory]) == vm_directory:
|
||||||
|
z.write(path, os.path.relpath(path, vm_directory), compress_type=zipfile.ZIP_DEFLATED)
|
||||||
|
else:
|
||||||
|
z.write(path, os.path.relpath(path, self._path), compress_type=zipfile.ZIP_DEFLATED)
|
||||||
|
return z
|
||||||
|
|
||||||
|
def _export_images(self, image, type, z):
|
||||||
|
"""
|
||||||
|
Take a project file (.gns3) and export images to the zip
|
||||||
|
|
||||||
|
:param image: Image path
|
||||||
|
:param type: Type of image
|
||||||
|
:param z: Zipfile instance for the export
|
||||||
|
"""
|
||||||
|
from . import MODULES
|
||||||
|
|
||||||
|
for module in MODULES:
|
||||||
|
try:
|
||||||
|
img_directory = module.instance().get_images_directory()
|
||||||
|
except NotImplementedError:
|
||||||
|
# Some modules don't have images
|
||||||
|
continue
|
||||||
|
|
||||||
|
directory = os.path.split(img_directory)[-1:][0]
|
||||||
|
|
||||||
|
if os.path.exists(image):
|
||||||
|
path = image
|
||||||
|
else:
|
||||||
|
path = os.path.join(img_directory, image)
|
||||||
|
|
||||||
|
if os.path.exists(path):
|
||||||
|
arcname = os.path.join("images", directory, os.path.basename(image))
|
||||||
|
z.write(path, arcname)
|
||||||
|
break
|
||||||
|
|
||||||
|
def _export_project_file(self, path, z, include_images):
|
||||||
|
"""
|
||||||
|
Take a project file (.gns3) and patch it for the export
|
||||||
|
|
||||||
|
:param path: Path of the .gns3
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(path) as f:
|
||||||
|
topology = json.load(f)
|
||||||
|
if "topology" in topology and "nodes" in topology["topology"]:
|
||||||
|
for node in topology["topology"]["nodes"]:
|
||||||
|
if "properties" in node and node["type"] != "DockerVM":
|
||||||
|
for prop, value in node["properties"].items():
|
||||||
|
if prop.endswith("image"):
|
||||||
|
node["properties"][prop] = os.path.basename(value)
|
||||||
|
if include_images is True:
|
||||||
|
self._export_images(value, node["type"], z)
|
||||||
|
z.writestr("project.gns3", json.dumps(topology).encode())
|
||||||
|
|
||||||
|
def import_zip(self, stream, gns3vm=True):
|
||||||
|
"""
|
||||||
|
Import a project contain in a zip file
|
||||||
|
|
||||||
|
:param stream: A io.BytesIO of the zipfile
|
||||||
|
:param gns3vm: True move docker, iou and qemu to the GNS3 VM
|
||||||
|
"""
|
||||||
|
|
||||||
|
with zipfile.ZipFile(stream) as myzip:
|
||||||
|
myzip.extractall(self.path)
|
||||||
|
|
||||||
|
project_file = os.path.join(self.path, "project.gns3")
|
||||||
|
if os.path.exists(project_file):
|
||||||
|
with open(project_file) as f:
|
||||||
|
topology = json.load(f)
|
||||||
|
topology["project_id"] = self.id
|
||||||
|
topology["name"] = self.name
|
||||||
|
topology.setdefault("topology", {})
|
||||||
|
topology["topology"].setdefault("nodes", [])
|
||||||
|
topology["topology"]["servers"] = [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"local": True,
|
||||||
|
"vm": False
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# By default all node run on local server
|
||||||
|
for node in topology["topology"]["nodes"]:
|
||||||
|
node["server_id"] = 1
|
||||||
|
|
||||||
|
if gns3vm:
|
||||||
|
# Move to servers/vm directory the data that should be import on remote server
|
||||||
|
modules_to_vm = {
|
||||||
|
"qemu": "QemuVM",
|
||||||
|
"iou": "IOUDevice",
|
||||||
|
"docker": "DockerVM"
|
||||||
|
}
|
||||||
|
vm_directory = os.path.join(self.path, "servers", "vm", "project-files")
|
||||||
|
vm_server_use = False
|
||||||
|
|
||||||
|
for module, device_type in modules_to_vm.items():
|
||||||
|
module_directory = os.path.join(self.path, "project-files", module)
|
||||||
|
if os.path.exists(module_directory):
|
||||||
|
os.makedirs(vm_directory, exist_ok=True)
|
||||||
|
shutil.move(module_directory, os.path.join(vm_directory, module))
|
||||||
|
|
||||||
|
# Patch node to use the GNS3 VM
|
||||||
|
for node in topology["topology"]["nodes"]:
|
||||||
|
if node["type"] == device_type:
|
||||||
|
node["server_id"] = 2
|
||||||
|
vm_server_use = True
|
||||||
|
|
||||||
|
# We use the GNS3 VM. We need to add the server to the list
|
||||||
|
if vm_server_use:
|
||||||
|
topology["topology"]["servers"].append({
|
||||||
|
"id": 2,
|
||||||
|
"vm": True,
|
||||||
|
"local": False
|
||||||
|
})
|
||||||
|
|
||||||
|
# Write the modified topology
|
||||||
|
with open(project_file, "w") as f:
|
||||||
|
json.dump(topology, f, indent=4)
|
||||||
|
|
||||||
|
# Rename to a human distinctive name
|
||||||
|
shutil.move(project_file, os.path.join(self.path, self.name + ".gns3"))
|
||||||
|
if os.path.exists(os.path.join(self.path, "images")):
|
||||||
|
self._import_images()
|
||||||
|
|
||||||
|
def _import_images(self):
|
||||||
|
"""
|
||||||
|
Copy images to the images directory or delete them if they
|
||||||
|
already exists.
|
||||||
|
"""
|
||||||
|
image_dir = self._config().get("images_path")
|
||||||
|
|
||||||
|
root = os.path.join(self.path, "images")
|
||||||
|
for (dirpath, dirnames, filenames) in os.walk(root):
|
||||||
|
for filename in filenames:
|
||||||
|
path = os.path.join(dirpath, filename)
|
||||||
|
dst = os.path.join(image_dir, os.path.relpath(path, root))
|
||||||
|
os.makedirs(os.path.dirname(dst), exist_ok=True)
|
||||||
|
shutil.move(path, dst)
|
||||||
|
|
||||||
|
# Cleanup the project
|
||||||
|
shutil.rmtree(root)
|
||||||
|
@ -79,8 +79,6 @@ class ProjectManager:
|
|||||||
|
|
||||||
if project_id is not None and project_id in self._projects:
|
if project_id is not None and project_id in self._projects:
|
||||||
return self._projects[project_id]
|
return self._projects[project_id]
|
||||||
# FIXME: should we have an error?
|
|
||||||
#raise aiohttp.web.HTTPConflict(text="Project ID {} is already in use on this server".format(project_id))
|
|
||||||
project = Project(name=name, project_id=project_id, path=path, temporary=temporary)
|
project = Project(name=name, project_id=project_id, path=path, temporary=temporary)
|
||||||
self._projects[project.id] = project
|
self._projects[project.id] = project
|
||||||
return project
|
return project
|
||||||
|
@ -117,6 +117,8 @@ class Qemu(BaseManager):
|
|||||||
for path in Qemu.paths_list():
|
for path in Qemu.paths_list():
|
||||||
try:
|
try:
|
||||||
for f in os.listdir(path):
|
for f in os.listdir(path):
|
||||||
|
if f.endswith("-spice"):
|
||||||
|
continue
|
||||||
if (f.startswith("qemu-system") or f.startswith("qemu-kvm") or f == "qemu" or f == "qemu.exe") and \
|
if (f.startswith("qemu-system") or f.startswith("qemu-kvm") or f == "qemu" or f == "qemu.exe") and \
|
||||||
os.access(os.path.join(path, f), os.X_OK) and \
|
os.access(os.path.join(path, f), os.X_OK) and \
|
||||||
os.path.isfile(os.path.join(path, f)):
|
os.path.isfile(os.path.join(path, f)):
|
||||||
|
103
gns3server/modules/qemu/qcow2.py
Normal file
103
gns3server/modules/qemu/qcow2.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
import struct
|
||||||
|
|
||||||
|
|
||||||
|
class Qcow2Error(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Qcow2:
|
||||||
|
"""
|
||||||
|
Allow to parse a Qcow2 file
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, path):
|
||||||
|
|
||||||
|
self.path = path
|
||||||
|
self._reload()
|
||||||
|
|
||||||
|
def _reload(self):
|
||||||
|
# Each QCOW2 file begins with a header, in big endian format, as follows:
|
||||||
|
#
|
||||||
|
# typedef struct QCowHeader {
|
||||||
|
# uint32_t magic;
|
||||||
|
# uint32_t version;
|
||||||
|
#
|
||||||
|
# uint64_t backing_file_offset;
|
||||||
|
# uint32_t backing_file_size;
|
||||||
|
#
|
||||||
|
# uint32_t cluster_bits;
|
||||||
|
# uint64_t size; /* in bytes */
|
||||||
|
# uint32_t crypt_method;
|
||||||
|
#
|
||||||
|
# uint32_t l1_size;
|
||||||
|
# uint64_t l1_table_offset;
|
||||||
|
#
|
||||||
|
# uint64_t refcount_table_offset;
|
||||||
|
# uint32_t refcount_table_clusters;
|
||||||
|
#
|
||||||
|
# uint32_t nb_snapshots;
|
||||||
|
# uint64_t snapshots_offset;
|
||||||
|
# } QCowHeader;
|
||||||
|
struct_format = ">IIQi"
|
||||||
|
|
||||||
|
with open(self.path, 'rb') as f:
|
||||||
|
content = f.read(struct.calcsize(struct_format))
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.magic, self.version, self.backing_file_offset, self.backing_file_size = struct.unpack_from(struct_format, content)
|
||||||
|
except struct.error:
|
||||||
|
raise Qcow2Error("Invalid file header for {}".format(self.path))
|
||||||
|
|
||||||
|
if self.magic != 1363560955: # The first 4 bytes contain the characters 'Q', 'F', 'I' followed by 0xfb.
|
||||||
|
raise Qcow2Error("Invalid magic for {}".format(self.path))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def backing_file(self):
|
||||||
|
"""
|
||||||
|
When using linked clone this will return the path to the base image
|
||||||
|
|
||||||
|
:returns: None if it's not a linked clone, the path otherwise
|
||||||
|
"""
|
||||||
|
with open(self.path, 'rb') as f:
|
||||||
|
f.seek(self.backing_file_offset)
|
||||||
|
content = f.read(self.backing_file_size)
|
||||||
|
path = content.decode()
|
||||||
|
if len(path) == 0:
|
||||||
|
return None
|
||||||
|
return path
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def rebase(self, qemu_img, base_image):
|
||||||
|
"""
|
||||||
|
Rebase a linked clone in order to use the correct disk
|
||||||
|
|
||||||
|
:param qemu_img: Path to the qemu-img binary
|
||||||
|
:param base_image: Path to the base image
|
||||||
|
"""
|
||||||
|
if not os.path.exists(base_image):
|
||||||
|
raise FileNotFoundError(base_image)
|
||||||
|
command = [qemu_img, "rebase", "-u", "-b", base_image, self.path]
|
||||||
|
process = yield from asyncio.create_subprocess_exec(*command)
|
||||||
|
retcode = yield from process.wait()
|
||||||
|
if retcode != 0:
|
||||||
|
raise Qcow2Error("Could not rebase the image")
|
||||||
|
self._reload()
|
@ -30,7 +30,7 @@ import asyncio
|
|||||||
import socket
|
import socket
|
||||||
import gns3server
|
import gns3server
|
||||||
|
|
||||||
from pkg_resources import parse_version
|
from gns3server.utils import parse_version
|
||||||
from .qemu_error import QemuError
|
from .qemu_error import QemuError
|
||||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||||
from ..nios.nio_udp import NIOUDP
|
from ..nios.nio_udp import NIOUDP
|
||||||
@ -40,6 +40,9 @@ from ..base_vm import BaseVM
|
|||||||
from ...schemas.qemu import QEMU_OBJECT_SCHEMA, QEMU_PLATFORMS
|
from ...schemas.qemu import QEMU_OBJECT_SCHEMA, QEMU_PLATFORMS
|
||||||
from ...utils.asyncio import monitor_process
|
from ...utils.asyncio import monitor_process
|
||||||
from ...utils.images import md5sum
|
from ...utils.images import md5sum
|
||||||
|
from .qcow2 import Qcow2, Qcow2Error
|
||||||
|
from ...utils import macaddress_to_int, int_to_macaddress
|
||||||
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
@ -68,7 +71,6 @@ class QemuVM(BaseVM):
|
|||||||
self._host = server_config.get("host", "127.0.0.1")
|
self._host = server_config.get("host", "127.0.0.1")
|
||||||
self._monitor_host = server_config.get("monitor_host", "127.0.0.1")
|
self._monitor_host = server_config.get("monitor_host", "127.0.0.1")
|
||||||
self._linked_clone = linked_clone
|
self._linked_clone = linked_clone
|
||||||
self._command = []
|
|
||||||
self._process = None
|
self._process = None
|
||||||
self._cpulimit_process = None
|
self._cpulimit_process = None
|
||||||
self._monitor = None
|
self._monitor = None
|
||||||
@ -80,8 +82,8 @@ class QemuVM(BaseVM):
|
|||||||
try:
|
try:
|
||||||
self.qemu_path = qemu_path
|
self.qemu_path = qemu_path
|
||||||
except QemuError as e:
|
except QemuError as e:
|
||||||
# If the binary is not found for topologies 1.4 and later
|
# If the binary is not found for topologies 1.4 and later
|
||||||
# search via the platform otherwise use the binary name
|
# search via the platform otherwise use the binary name
|
||||||
if platform:
|
if platform:
|
||||||
self.platform = platform
|
self.platform = platform
|
||||||
else:
|
else:
|
||||||
@ -491,7 +493,8 @@ class QemuVM(BaseVM):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if not mac_address:
|
if not mac_address:
|
||||||
self._mac_address = "00:00:ab:%s:%s:00" % (self.id[-4:-2], self.id[-2:])
|
# use the node UUID to generate a random MAC address
|
||||||
|
self._mac_address = "00:%s:%s:%s:%s:00" % (self.project.id[-4:-2], self.project.id[-2:], self.id[-4:-2], self.id[-2:])
|
||||||
else:
|
else:
|
||||||
self._mac_address = mac_address
|
self._mac_address = mac_address
|
||||||
|
|
||||||
@ -867,15 +870,16 @@ class QemuVM(BaseVM):
|
|||||||
# check if there is enough RAM to run
|
# check if there is enough RAM to run
|
||||||
self.check_available_ram(self.ram)
|
self.check_available_ram(self.ram)
|
||||||
|
|
||||||
self._command = yield from self._build_command()
|
command = yield from self._build_command()
|
||||||
command_string = " ".join(shlex.quote(s) for s in self._command)
|
command_string = " ".join(shlex.quote(s) for s in command)
|
||||||
try:
|
try:
|
||||||
log.info("Starting QEMU with: {}".format(command_string))
|
log.info("Starting QEMU with: {}".format(command_string))
|
||||||
self._stdout_file = os.path.join(self.working_dir, "qemu.log")
|
self._stdout_file = os.path.join(self.working_dir, "qemu.log")
|
||||||
log.info("logging to {}".format(self._stdout_file))
|
log.info("logging to {}".format(self._stdout_file))
|
||||||
with open(self._stdout_file, "w", encoding="utf-8") as fd:
|
with open(self._stdout_file, "w", encoding="utf-8") as fd:
|
||||||
fd.write("Start QEMU with {}\n\nExecution log:\n".format(command_string))
|
fd.write("Start QEMU with {}\n\nExecution log:\n".format(command_string))
|
||||||
self._process = yield from asyncio.create_subprocess_exec(*self._command,
|
self.command_line = ' '.join(command)
|
||||||
|
self._process = yield from asyncio.create_subprocess_exec(*command,
|
||||||
stdout=fd,
|
stdout=fd,
|
||||||
stderr=subprocess.STDOUT,
|
stderr=subprocess.STDOUT,
|
||||||
cwd=self.working_dir)
|
cwd=self.working_dir)
|
||||||
@ -988,22 +992,18 @@ class QemuVM(BaseVM):
|
|||||||
Closes this QEMU VM.
|
Closes this QEMU VM.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
log.debug('QEMU VM "{name}" [{id}] is closing'.format(name=self._name, id=self._id))
|
if not (yield from super().close()):
|
||||||
|
return False
|
||||||
|
|
||||||
self.acpi_shutdown = False
|
self.acpi_shutdown = False
|
||||||
yield from self.stop()
|
yield from self.stop()
|
||||||
|
|
||||||
if self._console:
|
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
|
||||||
self._console = None
|
|
||||||
|
|
||||||
for adapter in self._ethernet_adapters:
|
for adapter in self._ethernet_adapters:
|
||||||
if adapter is not None:
|
if adapter is not None:
|
||||||
for nio in adapter.ports.values():
|
for nio in adapter.ports.values():
|
||||||
if nio and isinstance(nio, NIOUDP):
|
if nio and isinstance(nio, NIOUDP):
|
||||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||||
|
|
||||||
yield from self.stop()
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _get_vm_status(self):
|
def _get_vm_status(self):
|
||||||
"""
|
"""
|
||||||
@ -1084,24 +1084,24 @@ class QemuVM(BaseVM):
|
|||||||
raise QemuError("Sorry, adding a link to a started Qemu VM is not supported.")
|
raise QemuError("Sorry, adding a link to a started Qemu VM is not supported.")
|
||||||
# FIXME: does the code below work? very undocumented feature...
|
# FIXME: does the code below work? very undocumented feature...
|
||||||
# dynamically configure an UDP tunnel on the QEMU VM adapter
|
# dynamically configure an UDP tunnel on the QEMU VM adapter
|
||||||
if nio and isinstance(nio, NIOUDP):
|
# if nio and isinstance(nio, NIOUDP):
|
||||||
if self._legacy_networking:
|
# if self._legacy_networking:
|
||||||
yield from self._control_vm("host_net_remove {} gns3-{}".format(adapter_number, adapter_number))
|
# yield from self._control_vm("host_net_remove {} gns3-{}".format(adapter_number, adapter_number))
|
||||||
yield from self._control_vm("host_net_add udp vlan={},name=gns3-{},sport={},dport={},daddr={}".format(adapter_number,
|
# yield from self._control_vm("host_net_add udp vlan={},name=gns3-{},sport={},dport={},daddr={}".format(adapter_number,
|
||||||
adapter_number,
|
# adapter_number,
|
||||||
nio.lport,
|
# nio.lport,
|
||||||
nio.rport,
|
# nio.rport,
|
||||||
nio.rhost))
|
# nio.rhost))
|
||||||
else:
|
# else:
|
||||||
# Apparently there is a bug in Qemu...
|
# # Apparently there is a bug in Qemu...
|
||||||
# netdev_add [user|tap|socket|hubport|netmap],id=str[,prop=value][,...] -- add host network device
|
# # netdev_add [user|tap|socket|hubport|netmap],id=str[,prop=value][,...] -- add host network device
|
||||||
# netdev_del id -- remove host network device
|
# # netdev_del id -- remove host network device
|
||||||
yield from self._control_vm("netdev_del gns3-{}".format(adapter_number))
|
# yield from self._control_vm("netdev_del gns3-{}".format(adapter_number))
|
||||||
yield from self._control_vm("netdev_add socket,id=gns3-{},udp={}:{},localaddr={}:{}".format(adapter_number,
|
# yield from self._control_vm("netdev_add socket,id=gns3-{},udp={}:{},localaddr={}:{}".format(adapter_number,
|
||||||
nio.rhost,
|
# nio.rhost,
|
||||||
nio.rport,
|
# nio.rport,
|
||||||
self._host,
|
# self._host,
|
||||||
nio.lport))
|
# nio.lport))
|
||||||
|
|
||||||
adapter.add_nio(0, nio)
|
adapter.add_nio(0, nio)
|
||||||
log.info('QEMU VM "{name}" [{id}]: {nio} added to adapter {adapter_number}'.format(name=self._name,
|
log.info('QEMU VM "{name}" [{id}]: {nio} added to adapter {adapter_number}'.format(name=self._name,
|
||||||
@ -1237,90 +1237,47 @@ class QemuVM(BaseVM):
|
|||||||
options = []
|
options = []
|
||||||
qemu_img_path = self._get_qemu_img()
|
qemu_img_path = self._get_qemu_img()
|
||||||
|
|
||||||
if self._hda_disk_image:
|
drives = ["a", "b", "c", "d"]
|
||||||
if not os.path.isfile(self._hda_disk_image) or not os.path.exists(self._hda_disk_image):
|
|
||||||
if os.path.islink(self._hda_disk_image):
|
for disk_index, drive in enumerate(drives):
|
||||||
raise QemuError("hda disk image '{}' linked to '{}' is not accessible".format(self._hda_disk_image, os.path.realpath(self._hda_disk_image)))
|
disk_image = getattr(self, "_hd{}_disk_image".format(drive))
|
||||||
|
interface = getattr(self, "hd{}_disk_interface".format(drive))
|
||||||
|
|
||||||
|
if not disk_image:
|
||||||
|
continue
|
||||||
|
|
||||||
|
disk_name = "hd" + drive
|
||||||
|
|
||||||
|
if not os.path.isfile(disk_image) or not os.path.exists(disk_image):
|
||||||
|
if os.path.islink(disk_image):
|
||||||
|
raise QemuError("{} disk image '{}' linked to '{}' is not accessible".format(disk_name, disk_image, os.path.realpath(disk_image)))
|
||||||
else:
|
else:
|
||||||
raise QemuError("hda disk image '{}' is not accessible".format(self._hda_disk_image))
|
raise QemuError("{} disk image '{}' is not accessible".format(disk_name, disk_image))
|
||||||
if self._linked_clone:
|
if self._linked_clone:
|
||||||
hda_disk = os.path.join(self.working_dir, "hda_disk.qcow2")
|
disk = os.path.join(self.working_dir, "{}_disk.qcow2".format(disk_name))
|
||||||
if not os.path.exists(hda_disk):
|
if not os.path.exists(disk):
|
||||||
# create the disk
|
# create the disk
|
||||||
try:
|
try:
|
||||||
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
|
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
|
||||||
"backing_file={}".format(self._hda_disk_image),
|
"backing_file={}".format(disk_image),
|
||||||
"-f", "qcow2", hda_disk)
|
"-f", "qcow2", disk)
|
||||||
retcode = yield from process.wait()
|
retcode = yield from process.wait()
|
||||||
|
if retcode is not None and retcode != 0:
|
||||||
|
raise QemuError("Could not create {} disk image".format(disk_name))
|
||||||
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
raise QemuError("Could not create hda disk image {}".format(e))
|
raise QemuError("Could not create {} disk image {}".format(disk_name, e))
|
||||||
else:
|
|
||||||
hda_disk = self._hda_disk_image
|
|
||||||
options.extend(["-drive", 'file={},if={},index=0,media=disk'.format(hda_disk, self.hda_disk_interface)])
|
|
||||||
|
|
||||||
if self._hdb_disk_image:
|
|
||||||
if not os.path.isfile(self._hdb_disk_image) or not os.path.exists(self._hdb_disk_image):
|
|
||||||
if os.path.islink(self._hdb_disk_image):
|
|
||||||
raise QemuError("hdb disk image '{}' linked to '{}' is not accessible".format(self._hdb_disk_image, os.path.realpath(self._hdb_disk_image)))
|
|
||||||
else:
|
else:
|
||||||
raise QemuError("hdb disk image '{}' is not accessible".format(self._hdb_disk_image))
|
# The disk exists we check if the clone work
|
||||||
if self._linked_clone:
|
|
||||||
hdb_disk = os.path.join(self.working_dir, "hdb_disk.qcow2")
|
|
||||||
if not os.path.exists(hdb_disk):
|
|
||||||
try:
|
try:
|
||||||
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
|
qcow2 = Qcow2(disk)
|
||||||
"backing_file={}".format(self._hdb_disk_image),
|
yield from qcow2.rebase(qemu_img_path, disk_image)
|
||||||
"-f", "qcow2", hdb_disk)
|
except (Qcow2Error, OSError) as e:
|
||||||
retcode = yield from process.wait()
|
raise QemuError("Could not use qcow2 disk image {} for {} {}".format(disk_image, disk_name, e))
|
||||||
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
|
||||||
raise QemuError("Could not create hdb disk image {}".format(e))
|
|
||||||
else:
|
|
||||||
hdb_disk = self._hdb_disk_image
|
|
||||||
options.extend(["-drive", 'file={},if={},index=1,media=disk'.format(hdb_disk, self.hdb_disk_interface)])
|
|
||||||
|
|
||||||
if self._hdc_disk_image:
|
|
||||||
if not os.path.isfile(self._hdc_disk_image) or not os.path.exists(self._hdc_disk_image):
|
|
||||||
if os.path.islink(self._hdc_disk_image):
|
|
||||||
raise QemuError("hdc disk image '{}' linked to '{}' is not accessible".format(self._hdc_disk_image, os.path.realpath(self._hdc_disk_image)))
|
|
||||||
else:
|
|
||||||
raise QemuError("hdc disk image '{}' is not accessible".format(self._hdc_disk_image))
|
|
||||||
if self._linked_clone:
|
|
||||||
hdc_disk = os.path.join(self.working_dir, "hdc_disk.qcow2")
|
|
||||||
if not os.path.exists(hdc_disk):
|
|
||||||
try:
|
|
||||||
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
|
|
||||||
"backing_file={}".format(self._hdc_disk_image),
|
|
||||||
"-f", "qcow2", hdc_disk)
|
|
||||||
retcode = yield from process.wait()
|
|
||||||
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
|
||||||
raise QemuError("Could not create hdc disk image {}".format(e))
|
|
||||||
else:
|
else:
|
||||||
hdc_disk = self._hdc_disk_image
|
disk = disk_image
|
||||||
options.extend(["-drive", 'file={},if={},index=2,media=disk'.format(hdc_disk, self.hdc_disk_interface)])
|
options.extend(["-drive", 'file={},if={},index={},media=disk'.format(disk, interface, disk_index)])
|
||||||
|
|
||||||
if self._hdd_disk_image:
|
|
||||||
if not os.path.isfile(self._hdd_disk_image) or not os.path.exists(self._hdd_disk_image):
|
|
||||||
if os.path.islink(self._hdd_disk_image):
|
|
||||||
raise QemuError("hdd disk image '{}' linked to '{}' is not accessible".format(self._hdd_disk_image, os.path.realpath(self._hdd_disk_image)))
|
|
||||||
else:
|
|
||||||
raise QemuError("hdd disk image '{}' is not accessible".format(self._hdd_disk_image))
|
|
||||||
if self._linked_clone:
|
|
||||||
hdd_disk = os.path.join(self.working_dir, "hdd_disk.qcow2")
|
|
||||||
if not os.path.exists(hdd_disk):
|
|
||||||
try:
|
|
||||||
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
|
|
||||||
"backing_file={}".format(self._hdd_disk_image),
|
|
||||||
"-f", "qcow2", hdd_disk)
|
|
||||||
retcode = yield from process.wait()
|
|
||||||
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
|
||||||
raise QemuError("Could not create hdd disk image {}".format(e))
|
|
||||||
else:
|
|
||||||
hdd_disk = self._hdd_disk_image
|
|
||||||
options.extend(["-drive", 'file={},if={},index=3,media=disk'.format(hdd_disk, self.hdd_disk_interface)])
|
|
||||||
|
|
||||||
return options
|
return options
|
||||||
|
|
||||||
@ -1374,7 +1331,7 @@ class QemuVM(BaseVM):
|
|||||||
patched_qemu = True
|
patched_qemu = True
|
||||||
|
|
||||||
for adapter_number, adapter in enumerate(self._ethernet_adapters):
|
for adapter_number, adapter in enumerate(self._ethernet_adapters):
|
||||||
mac = "%s%02x" % (self._mac_address[:-2], (int(self._mac_address[-2:]) + adapter_number) % 255)
|
mac = int_to_macaddress(macaddress_to_int(self._mac_address) + adapter_number)
|
||||||
nio = adapter.get_nio(0)
|
nio = adapter.get_nio(0)
|
||||||
if self._legacy_networking:
|
if self._legacy_networking:
|
||||||
# legacy QEMU networking syntax (-net)
|
# legacy QEMU networking syntax (-net)
|
||||||
@ -1467,6 +1424,11 @@ class QemuVM(BaseVM):
|
|||||||
command.extend(["-smp", "cpus={}".format(self._cpus)])
|
command.extend(["-smp", "cpus={}".format(self._cpus)])
|
||||||
if self._run_with_kvm(self.qemu_path, self._options):
|
if self._run_with_kvm(self.qemu_path, self._options):
|
||||||
command.extend(["-enable-kvm"])
|
command.extend(["-enable-kvm"])
|
||||||
|
version = yield from self.manager.get_qemu_version(self.qemu_path)
|
||||||
|
# Issue on some combo Intel CPU + KVM + Qemu 2.4.0
|
||||||
|
# https://github.com/GNS3/gns3-server/issues/685
|
||||||
|
if version and parse_version(version) >= parse_version("2.4.0") and self.platform == "x86_64":
|
||||||
|
command.extend(["-machine", "smm=off"])
|
||||||
command.extend(["-boot", "order={}".format(self._boot_priority)])
|
command.extend(["-boot", "order={}".format(self._boot_priority)])
|
||||||
cdrom_option = self._cdrom_option()
|
cdrom_option = self._cdrom_option()
|
||||||
command.extend(cdrom_option)
|
command.extend(cdrom_option)
|
||||||
|
@ -66,7 +66,10 @@ class VirtualBox(BaseManager):
|
|||||||
elif sys.platform.startswith("darwin"):
|
elif sys.platform.startswith("darwin"):
|
||||||
vboxmanage_path = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage"
|
vboxmanage_path = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage"
|
||||||
else:
|
else:
|
||||||
vboxmanage_path = shutil.which("vboxmanage")
|
vboxmanage_path = "vboxmanage"
|
||||||
|
|
||||||
|
if not os.path.isabs(vboxmanage_path):
|
||||||
|
vboxmanage_path = shutil.which(vboxmanage_path)
|
||||||
|
|
||||||
if not vboxmanage_path:
|
if not vboxmanage_path:
|
||||||
raise VirtualBoxError("Could not find VBoxManage")
|
raise VirtualBoxError("Could not find VBoxManage")
|
||||||
@ -83,8 +86,8 @@ class VirtualBox(BaseManager):
|
|||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def execute(self, subcommand, args, timeout=60):
|
def execute(self, subcommand, args, timeout=60):
|
||||||
|
|
||||||
# We use a lock prevent parallel execution due to strange errors
|
# We use a lock prevent parallel execution due to strange errors
|
||||||
# reported by a user and reproduced by us.
|
# reported by a user and reproduced by us.
|
||||||
# https://github.com/GNS3/gns3-gui/issues/261
|
# https://github.com/GNS3/gns3-gui/issues/261
|
||||||
with (yield from self._execute_lock):
|
with (yield from self._execute_lock):
|
||||||
vboxmanage_path = self.vboxmanage_path
|
vboxmanage_path = self.vboxmanage_path
|
||||||
|
@ -28,9 +28,9 @@ import json
|
|||||||
import socket
|
import socket
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from pkg_resources import parse_version
|
from gns3server.utils import parse_version
|
||||||
from gns3server.utils.telnet_server import TelnetServer
|
from gns3server.utils.telnet_server import TelnetServer
|
||||||
from gns3server.utils.asyncio import wait_for_file_creation, wait_for_named_pipe_creation
|
from gns3server.utils.asyncio import wait_for_file_creation, wait_for_named_pipe_creation, locked_coroutine
|
||||||
from .virtualbox_error import VirtualBoxError
|
from .virtualbox_error import VirtualBoxError
|
||||||
from ..nios.nio_udp import NIOUDP
|
from ..nios.nio_udp import NIOUDP
|
||||||
from ..nios.nio_nat import NIONAT
|
from ..nios.nio_nat import NIONAT
|
||||||
@ -60,7 +60,6 @@ class VirtualBoxVM(BaseVM):
|
|||||||
self._system_properties = {}
|
self._system_properties = {}
|
||||||
self._telnet_server_thread = None
|
self._telnet_server_thread = None
|
||||||
self._serial_pipe = None
|
self._serial_pipe = None
|
||||||
self._closed = False
|
|
||||||
|
|
||||||
# VirtualBox settings
|
# VirtualBox settings
|
||||||
self._adapters = adapters
|
self._adapters = adapters
|
||||||
@ -159,7 +158,7 @@ class VirtualBoxVM(BaseVM):
|
|||||||
if self.id and os.path.isdir(os.path.join(self.working_dir, self._vmname)):
|
if self.id and os.path.isdir(os.path.join(self.working_dir, self._vmname)):
|
||||||
vbox_file = os.path.join(self.working_dir, self._vmname, self._vmname + ".vbox")
|
vbox_file = os.path.join(self.working_dir, self._vmname, self._vmname + ".vbox")
|
||||||
yield from self.manager.execute("registervm", [vbox_file])
|
yield from self.manager.execute("registervm", [vbox_file])
|
||||||
yield from self._reattach_hdds()
|
yield from self._reattach_linked_hdds()
|
||||||
else:
|
else:
|
||||||
yield from self._create_linked_clone()
|
yield from self._create_linked_clone()
|
||||||
|
|
||||||
@ -231,7 +230,7 @@ class VirtualBoxVM(BaseVM):
|
|||||||
if (yield from self.check_hw_virtualization()):
|
if (yield from self.check_hw_virtualization()):
|
||||||
self._hw_virtualization = True
|
self._hw_virtualization = True
|
||||||
|
|
||||||
@asyncio.coroutine
|
@locked_coroutine
|
||||||
def stop(self):
|
def stop(self):
|
||||||
"""
|
"""
|
||||||
Stops this VirtualBox VM.
|
Stops this VirtualBox VM.
|
||||||
@ -251,7 +250,7 @@ class VirtualBoxVM(BaseVM):
|
|||||||
log.debug("Stop result: {}".format(result))
|
log.debug("Stop result: {}".format(result))
|
||||||
|
|
||||||
log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
|
log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
|
||||||
# yield from asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
|
yield from asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
|
||||||
try:
|
try:
|
||||||
# deactivate the first serial port
|
# deactivate the first serial port
|
||||||
yield from self._modify_vm("--uart1 off")
|
yield from self._modify_vm("--uart1 off")
|
||||||
@ -314,7 +313,10 @@ class VirtualBoxVM(BaseVM):
|
|||||||
return hdds
|
return hdds
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _reattach_hdds(self):
|
def _reattach_linked_hdds(self):
|
||||||
|
"""
|
||||||
|
Reattach linked cloned hard disks.
|
||||||
|
"""
|
||||||
|
|
||||||
hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json")
|
hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json")
|
||||||
try:
|
try:
|
||||||
@ -333,10 +335,67 @@ class VirtualBoxVM(BaseVM):
|
|||||||
device=hdd_info["device"],
|
device=hdd_info["device"],
|
||||||
medium=hdd_file))
|
medium=hdd_file))
|
||||||
|
|
||||||
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium "{}"'.format(hdd_info["controller"],
|
try:
|
||||||
hdd_info["port"],
|
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium "{}"'.format(hdd_info["controller"],
|
||||||
hdd_info["device"],
|
hdd_info["port"],
|
||||||
hdd_file))
|
hdd_info["device"],
|
||||||
|
hdd_file))
|
||||||
|
|
||||||
|
except VirtualBoxError as e:
|
||||||
|
log.warn("VirtualBox VM '{name}' [{id}] error reattaching HDD {controller} {port} {device} {medium}: {error}".format(name=self.name,
|
||||||
|
id=self.id,
|
||||||
|
controller=hdd_info["controller"],
|
||||||
|
port=hdd_info["port"],
|
||||||
|
device=hdd_info["device"],
|
||||||
|
medium=hdd_file,
|
||||||
|
error=e))
|
||||||
|
continue
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def save_linked_hdds_info(self):
|
||||||
|
"""
|
||||||
|
Save linked cloned hard disks information.
|
||||||
|
|
||||||
|
:returns: disk table information
|
||||||
|
"""
|
||||||
|
|
||||||
|
hdd_table = []
|
||||||
|
if self._linked_clone:
|
||||||
|
if os.path.exists(self.working_dir):
|
||||||
|
hdd_files = yield from self._get_all_hdd_files()
|
||||||
|
vm_info = yield from self._get_vm_info()
|
||||||
|
for entry, value in vm_info.items():
|
||||||
|
match = re.search("^([\s\w]+)\-(\d)\-(\d)$", entry) # match Controller-PortNumber-DeviceNumber entry
|
||||||
|
if match:
|
||||||
|
controller = match.group(1)
|
||||||
|
port = match.group(2)
|
||||||
|
device = match.group(3)
|
||||||
|
if value in hdd_files and os.path.exists(os.path.join(self.working_dir, self._vmname, "Snapshots", os.path.basename(value))):
|
||||||
|
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
|
||||||
|
id=self.id,
|
||||||
|
controller=controller,
|
||||||
|
port=port,
|
||||||
|
device=device))
|
||||||
|
hdd_table.append(
|
||||||
|
{
|
||||||
|
"hdd": os.path.basename(value),
|
||||||
|
"controller": controller,
|
||||||
|
"port": port,
|
||||||
|
"device": device,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if hdd_table:
|
||||||
|
try:
|
||||||
|
hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json")
|
||||||
|
with open(hdd_info_file, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(hdd_table, f, indent=4)
|
||||||
|
except OSError as e:
|
||||||
|
log.warning("VirtualBox VM '{name}' [{id}] could not write HHD info file: {error}".format(name=self.name,
|
||||||
|
id=self.id,
|
||||||
|
error=e.strerror))
|
||||||
|
|
||||||
|
return hdd_table
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def close(self):
|
def close(self):
|
||||||
@ -348,6 +407,9 @@ class VirtualBoxVM(BaseVM):
|
|||||||
# VM is already closed
|
# VM is already closed
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if not (yield from super().close()):
|
||||||
|
return False
|
||||||
|
|
||||||
log.debug("VirtualBox VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
|
log.debug("VirtualBox VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
|
||||||
if self._console:
|
if self._console:
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||||
@ -363,47 +425,29 @@ class VirtualBoxVM(BaseVM):
|
|||||||
yield from self.stop()
|
yield from self.stop()
|
||||||
|
|
||||||
if self._linked_clone:
|
if self._linked_clone:
|
||||||
hdd_table = []
|
hdd_table = yield from self.save_linked_hdds_info()
|
||||||
if os.path.exists(self.working_dir):
|
for hdd in hdd_table.copy():
|
||||||
hdd_files = yield from self._get_all_hdd_files()
|
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
|
||||||
vm_info = yield from self._get_vm_info()
|
id=self.id,
|
||||||
for entry, value in vm_info.items():
|
controller=hdd["controller"],
|
||||||
match = re.search("^([\s\w]+)\-(\d)\-(\d)$", entry)
|
port=hdd["port"],
|
||||||
if match:
|
device=hdd["device"]))
|
||||||
controller = match.group(1)
|
try:
|
||||||
port = match.group(2)
|
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(hdd["controller"],
|
||||||
device = match.group(3)
|
hdd["port"],
|
||||||
if value in hdd_files:
|
hdd["device"]))
|
||||||
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
|
except VirtualBoxError as e:
|
||||||
id=self.id,
|
log.warn("VirtualBox VM '{name}' [{id}] error detaching HDD {controller} {port} {device}: {error}".format(name=self.name,
|
||||||
controller=controller,
|
id=self.id,
|
||||||
port=port,
|
controller=hdd["controller"],
|
||||||
device=device))
|
port=hdd["port"],
|
||||||
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(controller,
|
device=hdd["device"],
|
||||||
port,
|
error=e))
|
||||||
device))
|
continue
|
||||||
hdd_table.append(
|
|
||||||
{
|
|
||||||
"hdd": os.path.basename(value),
|
|
||||||
"controller": controller,
|
|
||||||
"port": port,
|
|
||||||
"device": device,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
log.info("VirtualBox VM '{name}' [{id}] unregistering".format(name=self.name, id=self.id))
|
log.info("VirtualBox VM '{name}' [{id}] unregistering".format(name=self.name, id=self.id))
|
||||||
yield from self.manager.execute("unregistervm", [self._name])
|
yield from self.manager.execute("unregistervm", [self._name])
|
||||||
|
|
||||||
if hdd_table:
|
|
||||||
try:
|
|
||||||
hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json")
|
|
||||||
with open(hdd_info_file, "w", encoding="utf-8") as f:
|
|
||||||
json.dump(hdd_table, f, indent=4)
|
|
||||||
except OSError as e:
|
|
||||||
log.warning("VirtualBox VM '{name}' [{id}] could not write HHD info file: {error}".format(name=self.name,
|
|
||||||
id=self.id,
|
|
||||||
error=e.strerror))
|
|
||||||
|
|
||||||
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
|
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
|
||||||
self._closed = True
|
self._closed = True
|
||||||
|
|
||||||
@ -879,6 +923,14 @@ class VirtualBoxVM(BaseVM):
|
|||||||
yield from self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, nio.rhost))
|
yield from self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, nio.rhost))
|
||||||
yield from self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, nio.rport))
|
yield from self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, nio.rport))
|
||||||
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
|
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
|
||||||
|
|
||||||
|
# check if the UDP tunnel has been correctly set
|
||||||
|
vm_info = yield from self._get_vm_info()
|
||||||
|
generic_driver_number = "generic{}".format(adapter_number + 1)
|
||||||
|
if generic_driver_number not in vm_info and vm_info[generic_driver_number] != "UDPTunnel":
|
||||||
|
log.warning("UDP tunnel has not been set on nic: {}".format(adapter_number + 1))
|
||||||
|
self.project.emit("log.warning", {"message": "UDP tunnel has not been set on nic: {}".format(adapter_number + 1)})
|
||||||
|
|
||||||
elif isinstance(nio, NIONAT):
|
elif isinstance(nio, NIONAT):
|
||||||
yield from self._control_vm("nic{} nat".format(adapter_number + 1))
|
yield from self._control_vm("nic{} nat".format(adapter_number + 1))
|
||||||
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
|
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
|
||||||
|
@ -31,13 +31,14 @@ import codecs
|
|||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from gns3server.utils.interfaces import interfaces
|
from gns3server.utils.interfaces import interfaces
|
||||||
from gns3server.utils.asyncio import subprocess_check_output
|
from gns3server.utils.asyncio import subprocess_check_output
|
||||||
|
from gns3server.utils import parse_version
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
from ..base_manager import BaseManager
|
from gns3server.modules.base_manager import BaseManager
|
||||||
from .vmware_vm import VMwareVM
|
from gns3server.modules.vmware.vmware_vm import VMwareVM
|
||||||
from .vmware_error import VMwareError
|
from gns3server.modules.vmware.vmware_error import VMwareError
|
||||||
from .nio_vmnet import NIOVMNET
|
from gns3server.modules.vmware.nio_vmnet import NIOVMNET
|
||||||
|
|
||||||
|
|
||||||
class VMware(BaseManager):
|
class VMware(BaseManager):
|
||||||
@ -96,33 +97,26 @@ class VMware(BaseManager):
|
|||||||
if sys.platform.startswith("win"):
|
if sys.platform.startswith("win"):
|
||||||
vmrun_path = shutil.which("vmrun")
|
vmrun_path = shutil.which("vmrun")
|
||||||
if vmrun_path is None:
|
if vmrun_path is None:
|
||||||
# look for vmrun.exe in default VMware Workstation directory
|
# look for vmrun.exe using the VMware Workstation directory listed in the registry
|
||||||
vmrun_ws = os.path.expandvars(r"%PROGRAMFILES(X86)%\VMware\VMware Workstation\vmrun.exe")
|
vmrun_path = self._find_vmrun_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Workstation")
|
||||||
if os.path.exists(vmrun_ws):
|
|
||||||
vmrun_path = vmrun_ws
|
|
||||||
else:
|
|
||||||
# look for vmrun.exe using the directory listed in the registry
|
|
||||||
vmrun_path = self._find_vmrun_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Workstation")
|
|
||||||
if vmrun_path is None:
|
if vmrun_path is None:
|
||||||
# look for vmrun.exe in default VMware VIX directory
|
# look for vmrun.exe using the VIX directory listed in the registry
|
||||||
vmrun_vix = os.path.expandvars(r"%PROGRAMFILES(X86)%\VMware\VMware VIX\vmrun.exe")
|
vmrun_path = self._find_vmrun_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware VIX")
|
||||||
if os.path.exists(vmrun_vix):
|
|
||||||
vmrun_path = vmrun_vix
|
|
||||||
else:
|
|
||||||
# look for vmrun.exe using the directory listed in the registry
|
|
||||||
vmrun_path = self._find_vmrun_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware VIX")
|
|
||||||
elif sys.platform.startswith("darwin"):
|
elif sys.platform.startswith("darwin"):
|
||||||
vmrun_path = "/Applications/VMware Fusion.app/Contents/Library/vmrun"
|
vmrun_path = "/Applications/VMware Fusion.app/Contents/Library/vmrun"
|
||||||
else:
|
else:
|
||||||
vmrun_path = shutil.which("vmrun")
|
vmrun_path = "vmrun"
|
||||||
|
|
||||||
|
if not os.path.isabs(vmrun_path):
|
||||||
|
vmrun_path = shutil.which(vmrun_path)
|
||||||
|
|
||||||
if not vmrun_path:
|
if not vmrun_path:
|
||||||
raise VMwareError("Could not find vmrun")
|
raise VMwareError("Could not find VMware vmrun, please make sure it is installed")
|
||||||
if not os.path.isfile(vmrun_path):
|
if not os.path.isfile(vmrun_path):
|
||||||
raise VMwareError("vmrun {} is not accessible".format(vmrun_path))
|
raise VMwareError("vmrun {} is not accessible".format(vmrun_path))
|
||||||
if not os.access(vmrun_path, os.X_OK):
|
if not os.access(vmrun_path, os.X_OK):
|
||||||
raise VMwareError("vmrun is not executable")
|
raise VMwareError("vmrun is not executable")
|
||||||
if os.path.basename(vmrun_path) not in ["vmrun", "vmrun.exe"]:
|
if os.path.basename(vmrun_path).lower() not in ["vmrun", "vmrun.exe"]:
|
||||||
raise VMwareError("Invalid vmrun executable name {}".format(os.path.basename(vmrun_path)))
|
raise VMwareError("Invalid vmrun executable name {}".format(os.path.basename(vmrun_path)))
|
||||||
|
|
||||||
self._vmrun_path = vmrun_path
|
self._vmrun_path = vmrun_path
|
||||||
@ -146,6 +140,50 @@ class VMware(BaseManager):
|
|||||||
version = match.group(1)
|
version = match.group(1)
|
||||||
return version
|
return version
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def _check_vmware_player_requirements(self, player_version):
|
||||||
|
"""
|
||||||
|
Check minimum requirements to use VMware Player.
|
||||||
|
|
||||||
|
VIX 1.13 was the release for Player 6.
|
||||||
|
VIX 1.14 was the release for Player 7.
|
||||||
|
VIX 1.15 was the release for Workstation Player 12.
|
||||||
|
|
||||||
|
:param player_version: VMware Player major version.
|
||||||
|
"""
|
||||||
|
|
||||||
|
player_version = int(player_version)
|
||||||
|
if player_version < 6:
|
||||||
|
raise VMwareError("Using VMware Player requires version 6 or above")
|
||||||
|
elif player_version == 6:
|
||||||
|
yield from self.check_vmrun_version(minimum_required_version="1.13.0")
|
||||||
|
elif player_version == 7:
|
||||||
|
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
|
||||||
|
elif player_version >= 12:
|
||||||
|
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def _check_vmware_workstation_requirements(self, ws_version):
|
||||||
|
"""
|
||||||
|
Check minimum requirements to use VMware Workstation.
|
||||||
|
|
||||||
|
VIX 1.13 was the release for Workstation 10.
|
||||||
|
VIX 1.14 was the release for Workstation 11.
|
||||||
|
VIX 1.15 was the release for Workstation Pro 12.
|
||||||
|
|
||||||
|
:param ws_version: VMware Workstation major version.
|
||||||
|
"""
|
||||||
|
|
||||||
|
ws_version = int(ws_version)
|
||||||
|
if ws_version < 10:
|
||||||
|
raise VMwareError("Using VMware Workstation requires version 10 or above")
|
||||||
|
elif ws_version == 10:
|
||||||
|
yield from self.check_vmrun_version(minimum_required_version="1.13.0")
|
||||||
|
elif ws_version == 11:
|
||||||
|
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
|
||||||
|
elif ws_version >= 12:
|
||||||
|
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def check_vmware_version(self):
|
def check_vmware_version(self):
|
||||||
"""
|
"""
|
||||||
@ -159,18 +197,17 @@ class VMware(BaseManager):
|
|||||||
player_version = self._find_vmware_version_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Player")
|
player_version = self._find_vmware_version_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Player")
|
||||||
if player_version:
|
if player_version:
|
||||||
log.debug("VMware Player version {} detected".format(player_version))
|
log.debug("VMware Player version {} detected".format(player_version))
|
||||||
if int(player_version) < 6:
|
yield from self._check_vmware_player_requirements(player_version)
|
||||||
raise VMwareError("Using VMware Player requires version 6 or above")
|
|
||||||
else:
|
else:
|
||||||
log.warning("Could not find VMware version")
|
log.warning("Could not find VMware version")
|
||||||
else:
|
else:
|
||||||
log.debug("VMware Workstation version {} detected".format(ws_version))
|
log.debug("VMware Workstation version {} detected".format(ws_version))
|
||||||
if int(ws_version) < 10:
|
yield from self._check_vmware_workstation_requirements(ws_version)
|
||||||
raise VMwareError("Using VMware Workstation requires version 10 or above")
|
|
||||||
return
|
|
||||||
else:
|
else:
|
||||||
if sys.platform.startswith("darwin"):
|
if sys.platform.startswith("darwin"):
|
||||||
return # FIXME: no version checking on Mac OS X
|
if not os.path.isdir("/Applications/VMware Fusion.app"):
|
||||||
|
raise VMwareError("VMware Fusion is not installed in the standard location /Applications/VMware Fusion.app")
|
||||||
|
return # FIXME: no version checking on Mac OS X but we support all versions of fusion
|
||||||
|
|
||||||
vmware_path = VMware._get_linux_vmware_binary()
|
vmware_path = VMware._get_linux_vmware_binary()
|
||||||
if vmware_path is None:
|
if vmware_path is None:
|
||||||
@ -181,16 +218,16 @@ class VMware(BaseManager):
|
|||||||
match = re.search("VMware Workstation ([0-9]+)\.", output)
|
match = re.search("VMware Workstation ([0-9]+)\.", output)
|
||||||
version = None
|
version = None
|
||||||
if match:
|
if match:
|
||||||
|
# VMware Workstation has been detected
|
||||||
version = match.group(1)
|
version = match.group(1)
|
||||||
log.debug("VMware Workstation version {} detected".format(version))
|
log.debug("VMware Workstation version {} detected".format(version))
|
||||||
if int(version) < 10:
|
yield from self._check_vmware_workstation_requirements(version)
|
||||||
raise VMwareError("Using VMware Workstation requires version 10 or above")
|
|
||||||
match = re.search("VMware Player ([0-9]+)\.", output)
|
match = re.search("VMware Player ([0-9]+)\.", output)
|
||||||
if match:
|
if match:
|
||||||
|
# VMware Player has been detected
|
||||||
version = match.group(1)
|
version = match.group(1)
|
||||||
log.debug("VMware Player version {} detected".format(version))
|
log.debug("VMware Player version {} detected".format(version))
|
||||||
if int(version) < 6:
|
yield from self._check_vmware_player_requirements(version)
|
||||||
raise VMwareError("Using VMware Player requires version 6 or above")
|
|
||||||
if version is None:
|
if version is None:
|
||||||
log.warning("Could not find VMware version. Output of VMware: {}".format(output))
|
log.warning("Could not find VMware version. Output of VMware: {}".format(output))
|
||||||
raise VMwareError("Could not find VMware version. Output of VMware: {}".format(output))
|
raise VMwareError("Could not find VMware version. Output of VMware: {}".format(output))
|
||||||
@ -358,6 +395,40 @@ class VMware(BaseManager):
|
|||||||
|
|
||||||
return stdout_data.decode("utf-8", errors="ignore").splitlines()
|
return stdout_data.decode("utf-8", errors="ignore").splitlines()
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def check_vmrun_version(self, minimum_required_version="1.13.0"):
|
||||||
|
"""
|
||||||
|
Checks the vmrun version.
|
||||||
|
|
||||||
|
VMware VIX library version must be at least >= 1.13 by default
|
||||||
|
VIX 1.13 was the release for VMware Fusion 6, Workstation 10, and Player 6.
|
||||||
|
VIX 1.14 was the release for VMware Fusion 7, Workstation 11 and Player 7.
|
||||||
|
VIX 1.15 was the release for VMware Fusion 8, Workstation Pro 12 and Workstation Player 12.
|
||||||
|
|
||||||
|
:param required_version: required vmrun version number
|
||||||
|
"""
|
||||||
|
|
||||||
|
with (yield from self._execute_lock):
|
||||||
|
vmrun_path = self.vmrun_path
|
||||||
|
if not vmrun_path:
|
||||||
|
vmrun_path = self.find_vmrun()
|
||||||
|
|
||||||
|
try:
|
||||||
|
output = yield from subprocess_check_output(vmrun_path)
|
||||||
|
match = re.search("vmrun version ([0-9\.]+)", output)
|
||||||
|
version = None
|
||||||
|
if match:
|
||||||
|
version = match.group(1)
|
||||||
|
log.debug("VMware vmrun version {} detected, minimum required: {}".format(version, minimum_required_version))
|
||||||
|
if parse_version(version) < parse_version(minimum_required_version):
|
||||||
|
raise VMwareError("VMware vmrun executable version must be >= version {}".format(minimum_required_version))
|
||||||
|
if version is None:
|
||||||
|
log.warning("Could not find VMware vmrun version. Output: {}".format(output))
|
||||||
|
raise VMwareError("Could not find VMware vmrun version. Output: {}".format(output))
|
||||||
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
|
log.error("Error while looking for the VMware vmrun version: {}".format(e))
|
||||||
|
raise VMwareError("Error while looking for the VMware vmrun version: {}".format(e))
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def remove_from_vmware_inventory(self, vmx_path):
|
def remove_from_vmware_inventory(self, vmx_path):
|
||||||
"""
|
"""
|
||||||
@ -382,7 +453,7 @@ class VMware(BaseManager):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if vmlist_entry is not None:
|
if vmlist_entry is not None:
|
||||||
for name in inventory_pairs.keys():
|
for name in inventory_pairs.copy().keys():
|
||||||
if name.startswith(vmlist_entry):
|
if name.startswith(vmlist_entry):
|
||||||
del inventory_pairs[name]
|
del inventory_pairs[name]
|
||||||
|
|
||||||
@ -576,8 +647,11 @@ class VMware(BaseManager):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if sys.platform.startswith("win"):
|
if sys.platform.startswith("win"):
|
||||||
from win32com.shell import shell, shellcon
|
import ctypes
|
||||||
documents_folder = shell.SHGetSpecialFolderPath(None, shellcon.CSIDL_PERSONAL)
|
import ctypes.wintypes
|
||||||
|
path = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
|
||||||
|
ctypes.windll.shell32.SHGetFolderPathW(None, 5, None, 0, path)
|
||||||
|
documents_folder = path.value
|
||||||
windows_type = sys.getwindowsversion().product_type
|
windows_type = sys.getwindowsversion().product_type
|
||||||
if windows_type == 2 or windows_type == 3:
|
if windows_type == 2 or windows_type == 3:
|
||||||
return '{}\My Virtual Machines'.format(documents_folder)
|
return '{}\My Virtual Machines'.format(documents_folder)
|
||||||
@ -598,26 +672,39 @@ class VMware(BaseManager):
|
|||||||
yield from self.check_vmware_version()
|
yield from self.check_vmware_version()
|
||||||
|
|
||||||
inventory_path = self.get_vmware_inventory_path()
|
inventory_path = self.get_vmware_inventory_path()
|
||||||
if os.path.exists(inventory_path):
|
if os.path.exists(inventory_path) and self.host_type != "player":
|
||||||
# FIXME: inventory may exist if VMware workstation has not been fully uninstalled, therefore VMware player VMs are not searched
|
# inventory may exist for VMware player if VMware workstation has been previously installed
|
||||||
return self._get_vms_from_inventory(inventory_path)
|
return self._get_vms_from_inventory(inventory_path)
|
||||||
else:
|
else:
|
||||||
# VMware player has no inventory file, let's search the default location for VMs.
|
# VMware player has no inventory file, let's search the default location for VMs
|
||||||
vmware_preferences_path = self.get_vmware_preferences_path()
|
vmware_preferences_path = self.get_vmware_preferences_path()
|
||||||
default_vm_path = self.get_vmware_default_vm_path()
|
default_vm_path = self.get_vmware_default_vm_path()
|
||||||
|
pairs = {}
|
||||||
if os.path.exists(vmware_preferences_path):
|
if os.path.exists(vmware_preferences_path):
|
||||||
# the default vm path may be present in VMware preferences file.
|
# the default vm path may be present in VMware preferences file.
|
||||||
try:
|
try:
|
||||||
pairs = self.parse_vmware_file(vmware_preferences_path)
|
pairs = self.parse_vmware_file(vmware_preferences_path)
|
||||||
if "prefvmx.defaultvmpath" in pairs:
|
|
||||||
default_vm_path = pairs["prefvmx.defaultvmpath"]
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.warning('Could not read VMware preferences file "{}": {}'.format(vmware_preferences_path, e))
|
log.warning('Could not read VMware preferences file "{}": {}'.format(vmware_preferences_path, e))
|
||||||
|
if "prefvmx.defaultvmpath" in pairs:
|
||||||
|
default_vm_path = pairs["prefvmx.defaultvmpath"]
|
||||||
if not os.path.isdir(default_vm_path):
|
if not os.path.isdir(default_vm_path):
|
||||||
raise VMwareError('Could not find the default VM directory: "{}"'.format(default_vm_path))
|
raise VMwareError('Could not find the default VM directory: "{}"'.format(default_vm_path))
|
||||||
return self._get_vms_from_directory(default_vm_path)
|
vms = self._get_vms_from_directory(default_vm_path)
|
||||||
|
|
||||||
|
# looks for VMX paths in the preferences file in case not all VMs are in the default directory
|
||||||
|
for key, value in pairs.items():
|
||||||
|
m = re.match(r'pref.mruVM(\d+)\.filename', key)
|
||||||
|
if m:
|
||||||
|
display_name = "pref.mruVM{}.displayName".format(m.group(1))
|
||||||
|
if display_name in pairs:
|
||||||
|
found = False
|
||||||
|
for vm in vms:
|
||||||
|
if vm["vmname"] == display_name:
|
||||||
|
found = True
|
||||||
|
if found is False:
|
||||||
|
vms.append({"vmname": pairs[display_name], "vmx_path": value})
|
||||||
|
return vms
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_linux_vmware_binary():
|
def _get_linux_vmware_binary():
|
||||||
@ -628,3 +715,10 @@ class VMware(BaseManager):
|
|||||||
if path is None:
|
if path is None:
|
||||||
path = shutil.which("vmplayer")
|
path = shutil.which("vmplayer")
|
||||||
return path
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
vmware = VMware.instance()
|
||||||
|
print("=> Check version")
|
||||||
|
loop.run_until_complete(asyncio.async(vmware.check_vmware_version()))
|
||||||
|
@ -26,12 +26,13 @@ import asyncio
|
|||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from gns3server.utils.telnet_server import TelnetServer
|
from gns3server.utils.telnet_server import TelnetServer
|
||||||
from gns3server.utils.interfaces import get_windows_interfaces
|
from gns3server.utils.interfaces import interfaces
|
||||||
from gns3server.utils.asyncio import wait_for_file_creation, wait_for_named_pipe_creation
|
from gns3server.utils.asyncio import wait_for_file_creation, wait_for_named_pipe_creation
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from .vmware_error import VMwareError
|
from .vmware_error import VMwareError
|
||||||
from ..nios.nio_udp import NIOUDP
|
from ..nios.nio_udp import NIOUDP
|
||||||
from ..nios.nio_nat import NIONAT
|
from ..nios.nio_nat import NIONAT
|
||||||
|
from ..nios.nio_tap import NIOTAP
|
||||||
from .nio_vmnet import NIOVMNET
|
from .nio_vmnet import NIOVMNET
|
||||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||||
from ..base_vm import BaseVM
|
from ..base_vm import BaseVM
|
||||||
@ -142,7 +143,10 @@ class VMwareVM(BaseVM):
|
|||||||
Creates this VM and handle linked clones.
|
Creates this VM and handle linked clones.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
yield from self.manager.check_vmrun_version()
|
||||||
if self._linked_clone and not os.path.exists(os.path.join(self.working_dir, os.path.basename(self._vmx_path))):
|
if self._linked_clone and not os.path.exists(os.path.join(self.working_dir, os.path.basename(self._vmx_path))):
|
||||||
|
if self.manager.host_type == "player":
|
||||||
|
raise VMwareError("Linked clones are not supported by VMware Player")
|
||||||
# create the base snapshot for linked clones
|
# create the base snapshot for linked clones
|
||||||
base_snapshot_name = "GNS3 Linked Base for clones"
|
base_snapshot_name = "GNS3 Linked Base for clones"
|
||||||
vmsd_path = os.path.splitext(self._vmx_path)[0] + ".vmsd"
|
vmsd_path = os.path.splitext(self._vmx_path)[0] + ".vmsd"
|
||||||
@ -227,41 +231,28 @@ class VMwareVM(BaseVM):
|
|||||||
if self._get_vmx_setting(connected):
|
if self._get_vmx_setting(connected):
|
||||||
del self._vmx_pairs[connected]
|
del self._vmx_pairs[connected]
|
||||||
|
|
||||||
# check for adapter type
|
|
||||||
if self._adapter_type != "default":
|
|
||||||
adapter_type = "ethernet{}.virtualdev".format(adapter_number)
|
|
||||||
if adapter_type in self._vmx_pairs and self._vmx_pairs[adapter_type] != self._adapter_type:
|
|
||||||
raise VMwareError("Existing VMware network adapter {} is not of type {}, please fix or set adapter type to default in GNS3".format(adapter_number,
|
|
||||||
self._adapter_type))
|
|
||||||
|
|
||||||
# # check if any vmnet interface managed by GNS3 is being used on existing VMware adapters
|
|
||||||
# if self._get_vmx_setting("ethernet{}.present".format(adapter_number), "TRUE"):
|
|
||||||
# connection_type = "ethernet{}.connectiontype".format(adapter_number)
|
|
||||||
# if connection_type in self._vmx_pairs and self._vmx_pairs[connection_type] in ("hostonly", "custom"):
|
|
||||||
# vnet = "ethernet{}.vnet".format(adapter_number)
|
|
||||||
# if vnet in self._vmx_pairs:
|
|
||||||
# vmnet = os.path.basename(self._vmx_pairs[vnet])
|
|
||||||
# #nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
|
||||||
# if self.manager.is_managed_vmnet(vmnet):
|
|
||||||
# raise VMwareError("Network adapter {} is already associated with VMnet interface {} which is managed by GNS3, please remove".format(adapter_number, vmnet))
|
|
||||||
|
|
||||||
# then configure VMware network adapters
|
# then configure VMware network adapters
|
||||||
self.manager.refresh_vmnet_list(ubridge=self._use_ubridge)
|
self.manager.refresh_vmnet_list(ubridge=self._use_ubridge)
|
||||||
for adapter_number in range(0, self._adapters):
|
for adapter_number in range(0, self._adapters):
|
||||||
|
|
||||||
# add/update the interface
|
# add/update the interface
|
||||||
|
if self._adapter_type == "default":
|
||||||
|
# force default to e1000 because some guest OS don't detect the adapter (i.e. Windows 2012 server)
|
||||||
|
# when 'virtualdev' is not set in the VMX file.
|
||||||
|
adapter_type = "e1000"
|
||||||
|
else:
|
||||||
|
adapter_type = self._adapter_type
|
||||||
ethernet_adapter = {"ethernet{}.present".format(adapter_number): "TRUE",
|
ethernet_adapter = {"ethernet{}.present".format(adapter_number): "TRUE",
|
||||||
"ethernet{}.addresstype".format(adapter_number): "generated",
|
"ethernet{}.addresstype".format(adapter_number): "generated",
|
||||||
"ethernet{}.generatedaddressoffset".format(adapter_number): "0"}
|
"ethernet{}.generatedaddressoffset".format(adapter_number): "0",
|
||||||
|
"ethernet{}.virtualdev".format(adapter_number): adapter_type}
|
||||||
self._vmx_pairs.update(ethernet_adapter)
|
self._vmx_pairs.update(ethernet_adapter)
|
||||||
if self._adapter_type != "default":
|
|
||||||
self._vmx_pairs["ethernet{}.virtualdev".format(adapter_number)] = self._adapter_type
|
|
||||||
|
|
||||||
connection_type = "ethernet{}.connectiontype".format(adapter_number)
|
connection_type = "ethernet{}.connectiontype".format(adapter_number)
|
||||||
if not self._use_any_adapter and connection_type in self._vmx_pairs and self._vmx_pairs[connection_type] in ("nat", "bridged", "hostonly"):
|
if not self._use_any_adapter and connection_type in self._vmx_pairs and self._vmx_pairs[connection_type] in ("nat", "bridged", "hostonly"):
|
||||||
continue
|
continue
|
||||||
self._vmx_pairs["ethernet{}.connectiontype".format(adapter_number)] = "custom"
|
|
||||||
|
|
||||||
|
self._vmx_pairs["ethernet{}.connectiontype".format(adapter_number)] = "custom"
|
||||||
if self._use_ubridge:
|
if self._use_ubridge:
|
||||||
# make sure we have a vmnet per adapter if we use uBridge
|
# make sure we have a vmnet per adapter if we use uBridge
|
||||||
allocate_vmnet = False
|
allocate_vmnet = False
|
||||||
@ -270,7 +261,7 @@ class VMwareVM(BaseVM):
|
|||||||
vnet = "ethernet{}.vnet".format(adapter_number)
|
vnet = "ethernet{}.vnet".format(adapter_number)
|
||||||
if vnet in self._vmx_pairs:
|
if vnet in self._vmx_pairs:
|
||||||
vmnet = os.path.basename(self._vmx_pairs[vnet])
|
vmnet = os.path.basename(self._vmx_pairs[vnet])
|
||||||
if self.manager.is_managed_vmnet(vmnet) or vmnet == "vmnet0":
|
if self.manager.is_managed_vmnet(vmnet) or vmnet in ("vmnet0", "vmnet1", "vmnet8"):
|
||||||
# vmnet already managed, try to allocate a new one
|
# vmnet already managed, try to allocate a new one
|
||||||
allocate_vmnet = True
|
allocate_vmnet = True
|
||||||
else:
|
else:
|
||||||
@ -310,6 +301,7 @@ class VMwareVM(BaseVM):
|
|||||||
:param adapter_number: adapter number
|
:param adapter_number: adapter number
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
block_host_traffic = self.manager.config.get_section_config("VMware").getboolean("block_host_traffic", False)
|
||||||
vnet = "ethernet{}.vnet".format(adapter_number)
|
vnet = "ethernet{}.vnet".format(adapter_number)
|
||||||
if vnet not in self._vmx_pairs:
|
if vnet not in self._vmx_pairs:
|
||||||
raise VMwareError("vnet {} not in VMX file".format(vnet))
|
raise VMwareError("vnet {} not in VMX file".format(vnet))
|
||||||
@ -319,18 +311,29 @@ class VMwareVM(BaseVM):
|
|||||||
yield from self._ubridge_hypervisor.send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=vnet,
|
yield from self._ubridge_hypervisor.send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=vnet,
|
||||||
interface=vmnet_interface))
|
interface=vmnet_interface))
|
||||||
elif sys.platform.startswith("win"):
|
elif sys.platform.startswith("win"):
|
||||||
windows_interfaces = get_windows_interfaces()
|
windows_interfaces = interfaces()
|
||||||
npf = None
|
npf = None
|
||||||
|
source_mac = None
|
||||||
for interface in windows_interfaces:
|
for interface in windows_interfaces:
|
||||||
if "netcard" in interface and vmnet_interface in interface["netcard"]:
|
if "netcard" in interface and vmnet_interface in interface["netcard"]:
|
||||||
npf = interface["id"]
|
npf = interface["id"]
|
||||||
|
source_mac = interface["mac_address"]
|
||||||
elif vmnet_interface in interface["name"]:
|
elif vmnet_interface in interface["name"]:
|
||||||
npf = interface["id"]
|
npf = interface["id"]
|
||||||
|
source_mac = interface["mac_address"]
|
||||||
if npf:
|
if npf:
|
||||||
yield from self._ubridge_hypervisor.send('bridge add_nio_ethernet {name} "{interface}"'.format(name=vnet,
|
yield from self._ubridge_hypervisor.send('bridge add_nio_ethernet {name} "{interface}"'.format(name=vnet,
|
||||||
interface=npf))
|
interface=npf))
|
||||||
else:
|
else:
|
||||||
raise VMwareError("Could not find NPF id for VMnet interface {}".format(vmnet_interface))
|
raise VMwareError("Could not find NPF id for VMnet interface {}".format(vmnet_interface))
|
||||||
|
|
||||||
|
if block_host_traffic:
|
||||||
|
if source_mac:
|
||||||
|
yield from self._ubridge_hypervisor.send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=vnet,
|
||||||
|
mac=source_mac))
|
||||||
|
else:
|
||||||
|
log.warn("Could not block host network traffic on {} (no MAC address found)".format(vmnet_interface))
|
||||||
|
|
||||||
elif sys.platform.startswith("darwin"):
|
elif sys.platform.startswith("darwin"):
|
||||||
yield from self._ubridge_hypervisor.send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=vnet,
|
yield from self._ubridge_hypervisor.send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=vnet,
|
||||||
interface=vmnet_interface))
|
interface=vmnet_interface))
|
||||||
@ -343,6 +346,8 @@ class VMwareVM(BaseVM):
|
|||||||
lport=nio.lport,
|
lport=nio.lport,
|
||||||
rhost=nio.rhost,
|
rhost=nio.rhost,
|
||||||
rport=nio.rport))
|
rport=nio.rport))
|
||||||
|
elif isinstance(nio, NIOTAP):
|
||||||
|
yield from self._ubridge_hypervisor.send('bridge add_nio_tap {name} {tap}'.format(name=vnet, tap=nio.tap_device))
|
||||||
|
|
||||||
if nio.capturing:
|
if nio.capturing:
|
||||||
yield from self._ubridge_hypervisor.send('bridge start_capture {name} "{pcap_file}"'.format(name=vnet,
|
yield from self._ubridge_hypervisor.send('bridge start_capture {name} "{pcap_file}"'.format(name=vnet,
|
||||||
@ -350,6 +355,14 @@ class VMwareVM(BaseVM):
|
|||||||
|
|
||||||
yield from self._ubridge_hypervisor.send('bridge start {name}'.format(name=vnet))
|
yield from self._ubridge_hypervisor.send('bridge start {name}'.format(name=vnet))
|
||||||
|
|
||||||
|
# TODO: this only work when using PCAP (NIO Ethernet): current default on Linux is NIO RAW LINUX
|
||||||
|
# source_mac = None
|
||||||
|
# for interface in interfaces():
|
||||||
|
# if interface["name"] == vmnet_interface:
|
||||||
|
# source_mac = interface["mac_address"]
|
||||||
|
# if source_mac:
|
||||||
|
# yield from self._ubridge_hypervisor.send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=vnet, mac=source_mac))
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _delete_ubridge_connection(self, adapter_number):
|
def _delete_ubridge_connection(self, adapter_number):
|
||||||
"""
|
"""
|
||||||
@ -436,21 +449,25 @@ class VMwareVM(BaseVM):
|
|||||||
else:
|
else:
|
||||||
yield from self._control_vm("start")
|
yield from self._control_vm("start")
|
||||||
|
|
||||||
if self._use_ubridge and self._ubridge_hypervisor:
|
try:
|
||||||
for adapter_number in range(0, self._adapters):
|
if self._use_ubridge and self._ubridge_hypervisor:
|
||||||
nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
for adapter_number in range(0, self._adapters):
|
||||||
if nio:
|
nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
||||||
yield from self._add_ubridge_connection(nio, adapter_number)
|
if nio:
|
||||||
|
yield from self._add_ubridge_connection(nio, adapter_number)
|
||||||
|
|
||||||
if self._enable_remote_console and self._console is not None:
|
if self._enable_remote_console and self._console is not None:
|
||||||
try:
|
try:
|
||||||
if sys.platform.startswith("win"):
|
if sys.platform.startswith("win"):
|
||||||
yield from wait_for_named_pipe_creation(self._get_pipe_name())
|
yield from wait_for_named_pipe_creation(self._get_pipe_name())
|
||||||
else:
|
else:
|
||||||
yield from wait_for_file_creation(self._get_pipe_name()) # wait for VMware to create the pipe file.
|
yield from wait_for_file_creation(self._get_pipe_name()) # wait for VMware to create the pipe file.
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
raise VMwareError('Pipe file "{}" for remote console has not been created by VMware'.format(self._get_pipe_name()))
|
raise VMwareError('Pipe file "{}" for remote console has not been created by VMware'.format(self._get_pipe_name()))
|
||||||
self._start_remote_console()
|
self._start_remote_console()
|
||||||
|
except VMwareError:
|
||||||
|
yield from self.stop()
|
||||||
|
raise
|
||||||
|
|
||||||
if self._get_vmx_setting("vhv.enable", "TRUE"):
|
if self._get_vmx_setting("vhv.enable", "TRUE"):
|
||||||
self._hw_virtualization = True
|
self._hw_virtualization = True
|
||||||
@ -470,11 +487,12 @@ class VMwareVM(BaseVM):
|
|||||||
yield from self._ubridge_hypervisor.stop()
|
yield from self._ubridge_hypervisor.stop()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if self.acpi_shutdown:
|
if (yield from self.is_running()):
|
||||||
# use ACPI to shutdown the VM
|
if self.acpi_shutdown:
|
||||||
yield from self._control_vm("stop", "soft")
|
# use ACPI to shutdown the VM
|
||||||
else:
|
yield from self._control_vm("stop", "soft")
|
||||||
yield from self._control_vm("stop")
|
else:
|
||||||
|
yield from self._control_vm("stop")
|
||||||
finally:
|
finally:
|
||||||
self._started = False
|
self._started = False
|
||||||
|
|
||||||
@ -483,17 +501,15 @@ class VMwareVM(BaseVM):
|
|||||||
self._vmnets.clear()
|
self._vmnets.clear()
|
||||||
# remove the adapters managed by GNS3
|
# remove the adapters managed by GNS3
|
||||||
for adapter_number in range(0, self._adapters):
|
for adapter_number in range(0, self._adapters):
|
||||||
if self._get_vmx_setting("ethernet{}.vnet".format(adapter_number)) or \
|
vnet = "ethernet{}.vnet".format(adapter_number)
|
||||||
self._get_vmx_setting("ethernet{}.connectiontype".format(adapter_number)) is None:
|
if self._get_vmx_setting(vnet) or self._get_vmx_setting("ethernet{}.connectiontype".format(adapter_number)) is None:
|
||||||
vnet = "ethernet{}.vnet".format(adapter_number)
|
|
||||||
if vnet in self._vmx_pairs:
|
if vnet in self._vmx_pairs:
|
||||||
vmnet = os.path.basename(self._vmx_pairs[vnet])
|
vmnet = os.path.basename(self._vmx_pairs[vnet])
|
||||||
if not self.manager.is_managed_vmnet(vmnet):
|
if not self.manager.is_managed_vmnet(vmnet):
|
||||||
continue
|
continue
|
||||||
log.debug("removing adapter {}".format(adapter_number))
|
log.debug("removing adapter {}".format(adapter_number))
|
||||||
for key in list(self._vmx_pairs.keys()):
|
self._vmx_pairs[vnet] = "vmnet1"
|
||||||
if key.startswith("ethernet{}.".format(adapter_number)):
|
self._vmx_pairs["ethernet{}.connectiontype".format(adapter_number)] = "custom"
|
||||||
del self._vmx_pairs[key]
|
|
||||||
|
|
||||||
# re-enable any remaining network adapters
|
# re-enable any remaining network adapters
|
||||||
for adapter_number in range(self._adapters, self._maximum_adapters):
|
for adapter_number in range(self._adapters, self._maximum_adapters):
|
||||||
@ -541,14 +557,8 @@ class VMwareVM(BaseVM):
|
|||||||
Closes this VMware VM.
|
Closes this VMware VM.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self._closed:
|
if not (yield from super().close()):
|
||||||
# VM is already closed
|
return False
|
||||||
return
|
|
||||||
|
|
||||||
log.debug("VMware VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
|
|
||||||
if self._console:
|
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
|
||||||
self._console = None
|
|
||||||
|
|
||||||
for adapter in self._ethernet_adapters.values():
|
for adapter in self._ethernet_adapters.values():
|
||||||
if adapter is not None:
|
if adapter is not None:
|
||||||
@ -566,9 +576,6 @@ class VMwareVM(BaseVM):
|
|||||||
if self._linked_clone:
|
if self._linked_clone:
|
||||||
yield from self.manager.remove_from_vmware_inventory(self._vmx_path)
|
yield from self.manager.remove_from_vmware_inventory(self._vmx_path)
|
||||||
|
|
||||||
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
|
|
||||||
self._closed = True
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def headless(self):
|
def headless(self):
|
||||||
"""
|
"""
|
||||||
@ -975,8 +982,8 @@ class VMwareVM(BaseVM):
|
|||||||
try:
|
try:
|
||||||
adapter = self._ethernet_adapters[adapter_number]
|
adapter = self._ethernet_adapters[adapter_number]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise VMwareError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
|
raise VMwareError("Adapter {adapter_number} doesn't exist on VMware VM '{name}'".format(name=self.name,
|
||||||
adapter_number=adapter_number))
|
adapter_number=adapter_number))
|
||||||
|
|
||||||
nio = adapter.get_nio(0)
|
nio = adapter.get_nio(0)
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ order to run a VPCS VM.
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import socket
|
||||||
import subprocess
|
import subprocess
|
||||||
import signal
|
import signal
|
||||||
import re
|
import re
|
||||||
@ -31,7 +32,7 @@ import shutil
|
|||||||
from ...utils.asyncio import wait_for_process_termination
|
from ...utils.asyncio import wait_for_process_termination
|
||||||
from ...utils.asyncio import monitor_process
|
from ...utils.asyncio import monitor_process
|
||||||
from ...utils.asyncio import subprocess_check_output
|
from ...utils.asyncio import subprocess_check_output
|
||||||
from pkg_resources import parse_version
|
from gns3server.utils import parse_version
|
||||||
from .vpcs_error import VPCSError
|
from .vpcs_error import VPCSError
|
||||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||||
from ..nios.nio_udp import NIOUDP
|
from ..nios.nio_udp import NIOUDP
|
||||||
@ -59,7 +60,6 @@ class VPCSVM(BaseVM):
|
|||||||
def __init__(self, name, vm_id, project, manager, console=None, startup_script=None):
|
def __init__(self, name, vm_id, project, manager, console=None, startup_script=None):
|
||||||
|
|
||||||
super().__init__(name, vm_id, project, manager, console=console)
|
super().__init__(name, vm_id, project, manager, console=console)
|
||||||
self._command = []
|
|
||||||
self._process = None
|
self._process = None
|
||||||
self._vpcs_stdout_file = ""
|
self._vpcs_stdout_file = ""
|
||||||
self._vpcs_version = None
|
self._vpcs_version = None
|
||||||
@ -76,10 +76,8 @@ class VPCSVM(BaseVM):
|
|||||||
Closes this VPCS VM.
|
Closes this VPCS VM.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
log.debug('VPCS "{name}" [{id}] is closing'.format(name=self._name, id=self._id))
|
if not (yield from super().close()):
|
||||||
if self._console:
|
return False
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
|
||||||
self._console = None
|
|
||||||
|
|
||||||
nio = self._ethernet_adapter.get_nio(0)
|
nio = self._ethernet_adapter.get_nio(0)
|
||||||
if isinstance(nio, NIOUDP):
|
if isinstance(nio, NIOUDP):
|
||||||
@ -88,6 +86,8 @@ class VPCSVM(BaseVM):
|
|||||||
if self.is_running():
|
if self.is_running():
|
||||||
self._terminate_process()
|
self._terminate_process()
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _check_requirements(self):
|
def _check_requirements(self):
|
||||||
"""
|
"""
|
||||||
@ -115,7 +115,8 @@ class VPCSVM(BaseVM):
|
|||||||
"console": self._console,
|
"console": self._console,
|
||||||
"project_id": self.project.id,
|
"project_id": self.project.id,
|
||||||
"startup_script": self.startup_script,
|
"startup_script": self.startup_script,
|
||||||
"startup_script_path": self.relative_startup_script}
|
"startup_script_path": self.relative_startup_script,
|
||||||
|
"command_line": self.command_line}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def relative_startup_script(self):
|
def relative_startup_script(self):
|
||||||
@ -139,9 +140,11 @@ class VPCSVM(BaseVM):
|
|||||||
:returns: path to VPCS
|
:returns: path to VPCS
|
||||||
"""
|
"""
|
||||||
|
|
||||||
path = self._manager.config.get_section_config("VPCS").get("vpcs_path", "vpcs")
|
search_path = self._manager.config.get_section_config("VPCS").get("vpcs_path", "vpcs")
|
||||||
if path == "vpcs":
|
path = shutil.which(search_path)
|
||||||
path = shutil.which("vpcs")
|
# shutil.which return None if the path doesn't exists
|
||||||
|
if not path:
|
||||||
|
return search_path
|
||||||
return path
|
return path
|
||||||
|
|
||||||
@BaseVM.name.setter
|
@BaseVM.name.setter
|
||||||
@ -223,16 +226,17 @@ class VPCSVM(BaseVM):
|
|||||||
if not self._ethernet_adapter.get_nio(0):
|
if not self._ethernet_adapter.get_nio(0):
|
||||||
raise VPCSError("This VPCS instance must be connected in order to start")
|
raise VPCSError("This VPCS instance must be connected in order to start")
|
||||||
|
|
||||||
self._command = self._build_command()
|
command = self._build_command()
|
||||||
try:
|
try:
|
||||||
log.info("Starting VPCS: {}".format(self._command))
|
log.info("Starting VPCS: {}".format(command))
|
||||||
self._vpcs_stdout_file = os.path.join(self.working_dir, "vpcs.log")
|
self._vpcs_stdout_file = os.path.join(self.working_dir, "vpcs.log")
|
||||||
log.info("Logging to {}".format(self._vpcs_stdout_file))
|
log.info("Logging to {}".format(self._vpcs_stdout_file))
|
||||||
flags = 0
|
flags = 0
|
||||||
if sys.platform.startswith("win32"):
|
if sys.platform.startswith("win32"):
|
||||||
flags = subprocess.CREATE_NEW_PROCESS_GROUP
|
flags = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||||
with open(self._vpcs_stdout_file, "w", encoding="utf-8") as fd:
|
with open(self._vpcs_stdout_file, "w", encoding="utf-8") as fd:
|
||||||
self._process = yield from asyncio.create_subprocess_exec(*self._command,
|
self.command_line = ' '.join(command)
|
||||||
|
self._process = yield from asyncio.create_subprocess_exec(*command,
|
||||||
stdout=fd,
|
stdout=fd,
|
||||||
stderr=subprocess.STDOUT,
|
stderr=subprocess.STDOUT,
|
||||||
cwd=self.working_dir,
|
cwd=self.working_dir,
|
||||||
@ -418,8 +422,10 @@ class VPCSVM(BaseVM):
|
|||||||
command.extend(["-m", str(self._manager.get_mac_id(self.id))]) # the unique ID is used to set the MAC address offset
|
command.extend(["-m", str(self._manager.get_mac_id(self.id))]) # the unique ID is used to set the MAC address offset
|
||||||
command.extend(["-i", "1"]) # option to start only one VPC instance
|
command.extend(["-i", "1"]) # option to start only one VPC instance
|
||||||
command.extend(["-F"]) # option to avoid the daemonization of VPCS
|
command.extend(["-F"]) # option to avoid the daemonization of VPCS
|
||||||
if self._vpcs_version > parse_version("0.8"):
|
if self._vpcs_version >= parse_version("0.8b"):
|
||||||
command.extend(["-R"]) # disable relay feature of VPCS (starting with VPCS 0.8)
|
command.extend(["-R"]) # disable the relay feature of VPCS (starting with VPCS 0.8)
|
||||||
|
else:
|
||||||
|
log.warn("The VPCS relay feature could not be disabled because the VPCS version is below 0.8b")
|
||||||
|
|
||||||
nio = self._ethernet_adapter.get_nio(0)
|
nio = self._ethernet_adapter.get_nio(0)
|
||||||
if nio:
|
if nio:
|
||||||
@ -427,7 +433,10 @@ class VPCSVM(BaseVM):
|
|||||||
# UDP tunnel
|
# UDP tunnel
|
||||||
command.extend(["-s", str(nio.lport)]) # source UDP port
|
command.extend(["-s", str(nio.lport)]) # source UDP port
|
||||||
command.extend(["-c", str(nio.rport)]) # destination UDP port
|
command.extend(["-c", str(nio.rport)]) # destination UDP port
|
||||||
command.extend(["-t", nio.rhost]) # destination host
|
try:
|
||||||
|
command.extend(["-t", socket.gethostbyname(nio.rhost)]) # destination host, we need to resolve the hostname because VPCS doesn't support it
|
||||||
|
except socket.gaierror as e:
|
||||||
|
raise VPCSError("Can't resolve hostname {}".format(nio.rhost))
|
||||||
|
|
||||||
elif isinstance(nio, NIOTAP):
|
elif isinstance(nio, NIOTAP):
|
||||||
# TAP interface
|
# TAP interface
|
||||||
|
@ -26,6 +26,7 @@ import datetime
|
|||||||
import sys
|
import sys
|
||||||
import locale
|
import locale
|
||||||
import argparse
|
import argparse
|
||||||
|
import psutil
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from gns3server.server import Server
|
from gns3server.server import Server
|
||||||
@ -90,6 +91,7 @@ def parse_arguments(argv):
|
|||||||
parser.add_argument("--host", help="run on the given host/IP address")
|
parser.add_argument("--host", help="run on the given host/IP address")
|
||||||
parser.add_argument("--port", help="run on the given port", type=int)
|
parser.add_argument("--port", help="run on the given port", type=int)
|
||||||
parser.add_argument("--ssl", action="store_true", help="run in SSL mode")
|
parser.add_argument("--ssl", action="store_true", help="run in SSL mode")
|
||||||
|
parser.add_argument("--controller", action="store_true", help="start as a GNS3 controller")
|
||||||
parser.add_argument("--config", help="Configuration file")
|
parser.add_argument("--config", help="Configuration file")
|
||||||
parser.add_argument("--certfile", help="SSL cert file")
|
parser.add_argument("--certfile", help="SSL cert file")
|
||||||
parser.add_argument("--certkey", help="SSL key file")
|
parser.add_argument("--certkey", help="SSL key file")
|
||||||
@ -111,12 +113,13 @@ def parse_arguments(argv):
|
|||||||
config = Config.instance().get_section_config("Server")
|
config = Config.instance().get_section_config("Server")
|
||||||
defaults = {
|
defaults = {
|
||||||
"host": config.get("host", "0.0.0.0"),
|
"host": config.get("host", "0.0.0.0"),
|
||||||
"port": config.get("port", 8000),
|
"port": config.get("port", 3080),
|
||||||
"ssl": config.getboolean("ssl", False),
|
"ssl": config.getboolean("ssl", False),
|
||||||
"certfile": config.get("certfile", ""),
|
"certfile": config.get("certfile", ""),
|
||||||
"certkey": config.get("certkey", ""),
|
"certkey": config.get("certkey", ""),
|
||||||
"record": config.get("record", ""),
|
"record": config.get("record", ""),
|
||||||
"local": config.getboolean("local", False),
|
"local": config.getboolean("local", False),
|
||||||
|
"controller": config.getboolean("controller", False),
|
||||||
"allow": config.getboolean("allow_remote_console", False),
|
"allow": config.getboolean("allow_remote_console", False),
|
||||||
"quiet": config.getboolean("quiet", False),
|
"quiet": config.getboolean("quiet", False),
|
||||||
"debug": config.getboolean("debug", False),
|
"debug": config.getboolean("debug", False),
|
||||||
@ -133,6 +136,7 @@ def set_config(args):
|
|||||||
config = Config.instance()
|
config = Config.instance()
|
||||||
server_config = config.get_section_config("Server")
|
server_config = config.get_section_config("Server")
|
||||||
server_config["local"] = str(args.local)
|
server_config["local"] = str(args.local)
|
||||||
|
server_config["controller"] = str(args.controller)
|
||||||
server_config["allow_remote_console"] = str(args.allow)
|
server_config["allow_remote_console"] = str(args.allow)
|
||||||
server_config["host"] = args.host
|
server_config["host"] = args.host
|
||||||
server_config["port"] = str(args.port)
|
server_config["port"] = str(args.port)
|
||||||
@ -177,6 +181,21 @@ def pid_lock(path):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def kill_ghosts():
|
||||||
|
"""
|
||||||
|
Kill process from previous GNS3 session
|
||||||
|
"""
|
||||||
|
detect_process = ["vpcs", "ubridge", "dynamips"]
|
||||||
|
for proc in psutil.process_iter():
|
||||||
|
try:
|
||||||
|
name = proc.name().lower().split(".")[0]
|
||||||
|
if name in detect_process:
|
||||||
|
proc.kill()
|
||||||
|
log.warning("Killed ghost process %s", name)
|
||||||
|
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def run():
|
def run():
|
||||||
args = parse_arguments(sys.argv[1:])
|
args = parse_arguments(sys.argv[1:])
|
||||||
|
|
||||||
@ -186,6 +205,7 @@ def run():
|
|||||||
|
|
||||||
if args.pid:
|
if args.pid:
|
||||||
pid_lock(args.pid)
|
pid_lock(args.pid)
|
||||||
|
kill_ghosts()
|
||||||
|
|
||||||
level = logging.INFO
|
level = logging.INFO
|
||||||
if args.debug:
|
if args.debug:
|
||||||
@ -201,6 +221,9 @@ def run():
|
|||||||
|
|
||||||
set_config(args)
|
set_config(args)
|
||||||
server_config = Config.instance().get_section_config("Server")
|
server_config = Config.instance().get_section_config("Server")
|
||||||
|
if server_config.getboolean("controller"):
|
||||||
|
log.info("Controller mode is enabled.")
|
||||||
|
|
||||||
if server_config.getboolean("local"):
|
if server_config.getboolean("local"):
|
||||||
log.warning("Local mode is enabled. Beware, clients will have full control on your filesystem")
|
log.warning("Local mode is enabled. Beware, clients will have full control on your filesystem")
|
||||||
|
|
||||||
|
@ -23,58 +23,49 @@ DOCKER_CREATE_SCHEMA = {
|
|||||||
"properties": {
|
"properties": {
|
||||||
"vm_id": {
|
"vm_id": {
|
||||||
"description": "Docker VM instance identifier",
|
"description": "Docker VM instance identifier",
|
||||||
"oneOf": [
|
"type": "string",
|
||||||
{"type": "string",
|
"minLength": 36,
|
||||||
"minLength": 36,
|
"maxLength": 36,
|
||||||
"maxLength": 36,
|
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
|
||||||
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"},
|
|
||||||
{"type": "integer"} # for legacy projects
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"name": {
|
"name": {
|
||||||
"description": "Docker container name",
|
"description": "Docker container name",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"minLength": 1,
|
"minLength": 1,
|
||||||
},
|
},
|
||||||
"startcmd": {
|
|
||||||
"description": "Docker CMD entry",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
"imagename": {
|
|
||||||
"description": "Docker image name",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
"adapters": {
|
|
||||||
"description": "number of adapters",
|
|
||||||
"type": "integer",
|
|
||||||
"minimum": 0,
|
|
||||||
"maximum": 64,
|
|
||||||
},
|
|
||||||
"adapter_type": {
|
|
||||||
"description": "Docker adapter type",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
"console": {
|
"console": {
|
||||||
"description": "console name",
|
"description": "console TCP port",
|
||||||
"type": "string",
|
"minimum": 1,
|
||||||
"minLength": 1,
|
"maximum": 65535,
|
||||||
|
"type": ["integer", "null"]
|
||||||
},
|
},
|
||||||
},
|
"console_type": {
|
||||||
"additionalProperties": False,
|
"description": "console type",
|
||||||
}
|
"enum": ["telnet", "vnc", "http", "https"]
|
||||||
|
},
|
||||||
DOCKER_UPDATE_SCHEMA = {
|
"console_resolution": {
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
"description": "console resolution for VNC",
|
||||||
"description": "Request validation to update a Docker container",
|
"type": ["string", "null"],
|
||||||
"type": "object",
|
"pattern": "^[0-9]+x[0-9]+$"
|
||||||
"properties": {
|
},
|
||||||
"name": {
|
"console_http_port": {
|
||||||
"description": "Docker container name",
|
"description": "Internal port in the container of the HTTP server",
|
||||||
|
"type": "integer",
|
||||||
|
},
|
||||||
|
"console_http_path": {
|
||||||
|
"description": "Path of the web interface",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"minLength": 1,
|
},
|
||||||
|
"aux": {
|
||||||
|
"description": "auxilary TCP port",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 65535,
|
||||||
|
"type": ["integer", "null"]
|
||||||
|
},
|
||||||
|
"start_command": {
|
||||||
|
"description": "Docker CMD entry",
|
||||||
|
"type": ["string", "null"],
|
||||||
|
"minLength": 0,
|
||||||
},
|
},
|
||||||
"image": {
|
"image": {
|
||||||
"description": "Docker image name",
|
"description": "Docker image name",
|
||||||
@ -83,32 +74,78 @@ DOCKER_UPDATE_SCHEMA = {
|
|||||||
},
|
},
|
||||||
"adapters": {
|
"adapters": {
|
||||||
"description": "number of adapters",
|
"description": "number of adapters",
|
||||||
"type": "integer",
|
"type": ["integer", "null"],
|
||||||
"minimum": 0,
|
"minimum": 0,
|
||||||
"maximum": 64,
|
"maximum": 99,
|
||||||
},
|
|
||||||
"adapter_type": {
|
|
||||||
"description": "Docker adapter type",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
},
|
||||||
|
"environment": {
|
||||||
|
"description": "Docker environment",
|
||||||
|
"type": ["string", "null"],
|
||||||
|
"minLength": 0,
|
||||||
|
}
|
||||||
|
|
||||||
},
|
},
|
||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
DOCKER_CAPTURE_SCHEMA = {
|
|
||||||
|
DOCKER_UPDATE_SCHEMA = {
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
"description": "Request validation to start a packet capture on a Docker container port",
|
"description": "Request validation to create a new Docker container",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"capture_file_name": {
|
"name": {
|
||||||
"description": "Capture file name",
|
"description": "Docker container name",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"minLength": 1,
|
"minLength": 1,
|
||||||
},
|
},
|
||||||
|
"console": {
|
||||||
|
"description": "console TCP port",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 65535,
|
||||||
|
"type": ["integer", "null"]
|
||||||
|
},
|
||||||
|
"console_resolution": {
|
||||||
|
"description": "console resolution for VNC",
|
||||||
|
"type": ["string", "null"],
|
||||||
|
"pattern": "^[0-9]+x[0-9]+$"
|
||||||
|
},
|
||||||
|
"console_type": {
|
||||||
|
"description": "console type",
|
||||||
|
"enum": ["telnet", "vnc", "http", "https"]
|
||||||
|
},
|
||||||
|
"console_http_port": {
|
||||||
|
"description": "Internal port in the container of the HTTP server",
|
||||||
|
"type": "integer",
|
||||||
|
},
|
||||||
|
"console_http_path": {
|
||||||
|
"description": "Path of the web interface",
|
||||||
|
"type": "string",
|
||||||
|
},
|
||||||
|
"aux": {
|
||||||
|
"description": "auxilary TCP port",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 65535,
|
||||||
|
"type": ["integer", "null"]
|
||||||
|
},
|
||||||
|
"start_command": {
|
||||||
|
"description": "Docker CMD entry",
|
||||||
|
"type": ["string", "null"],
|
||||||
|
"minLength": 0,
|
||||||
|
},
|
||||||
|
"environment": {
|
||||||
|
"description": "Docker environment",
|
||||||
|
"type": ["string", "null"],
|
||||||
|
"minLength": 0,
|
||||||
|
},
|
||||||
|
"adapters": {
|
||||||
|
"description": "number of adapters",
|
||||||
|
"type": ["integer", "null"],
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 99,
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
"required": ["capture_file_name"]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DOCKER_OBJECT_SCHEMA = {
|
DOCKER_OBJECT_SCHEMA = {
|
||||||
@ -128,12 +165,41 @@ DOCKER_OBJECT_SCHEMA = {
|
|||||||
"maxLength": 36,
|
"maxLength": 36,
|
||||||
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
|
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
|
||||||
},
|
},
|
||||||
"cid": {
|
"aux": {
|
||||||
|
"description": "auxilary TCP port",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 65535,
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"console": {
|
||||||
|
"description": "console TCP port",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 65535,
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"console_resolution": {
|
||||||
|
"description": "console resolution for VNC",
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "^[0-9]+x[0-9]+$"
|
||||||
|
},
|
||||||
|
"console_type": {
|
||||||
|
"description": "console type",
|
||||||
|
"enum": ["telnet", "vnc", "http", "https"]
|
||||||
|
},
|
||||||
|
"console_http_port": {
|
||||||
|
"description": "Internal port in the container of the HTTP server",
|
||||||
|
"type": "integer",
|
||||||
|
},
|
||||||
|
"console_http_path": {
|
||||||
|
"description": "Path of the web interface",
|
||||||
|
"type": "string",
|
||||||
|
},
|
||||||
|
"container_id": {
|
||||||
"description": "Docker container ID",
|
"description": "Docker container ID",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"minLength": 64,
|
"minLength": 12,
|
||||||
"maxLength": 64,
|
"maxLength": 64,
|
||||||
"pattern": "^[a-zA-Z0-9_.-]{64}$"
|
"pattern": "^[a-f0-9]+$"
|
||||||
},
|
},
|
||||||
"project_id": {
|
"project_id": {
|
||||||
"description": "Project UUID",
|
"description": "Project UUID",
|
||||||
@ -149,16 +215,44 @@ DOCKER_OBJECT_SCHEMA = {
|
|||||||
},
|
},
|
||||||
"adapters": {
|
"adapters": {
|
||||||
"description": "number of adapters",
|
"description": "number of adapters",
|
||||||
"type": "integer",
|
"type": ["integer", "null"],
|
||||||
"minimum": 0,
|
"minimum": 0,
|
||||||
"maximum": 64,
|
"maximum": 99,
|
||||||
},
|
},
|
||||||
"adapter_type": {
|
"start_command": {
|
||||||
"description": "Docker adapter type",
|
"description": "Docker CMD entry",
|
||||||
"type": "string",
|
"type": ["string", "null"],
|
||||||
"minLength": 1,
|
"minLength": 0,
|
||||||
},
|
},
|
||||||
|
"environment": {
|
||||||
|
"description": "Docker environment",
|
||||||
|
"type": ["string", "null"],
|
||||||
|
"minLength": 0,
|
||||||
|
},
|
||||||
|
"vm_directory": {
|
||||||
|
"decription": "Path to the VM working directory",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
"required": ["vm_id", "project_id"]
|
"required": ["vm_id", "project_id", "image", "container_id", "adapters", "aux", "console", "console_type", "console_resolution", "start_command", "environment", "vm_directory"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
DOCKER_LIST_IMAGES_SCHEMA = {
|
||||||
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
"description": "Docker list of images",
|
||||||
|
"type": "array",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"image": {
|
||||||
|
"description": "Docker image name",
|
||||||
|
"type": "string",
|
||||||
|
"minLength": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
@ -346,23 +346,3 @@ DEVICE_NIO_SCHEMA = {
|
|||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
"required": ["nio"]
|
"required": ["nio"]
|
||||||
}
|
}
|
||||||
|
|
||||||
DEVICE_CAPTURE_SCHEMA = {
|
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
|
||||||
"description": "Request validation to start a packet capture on an Device instance port",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"capture_file_name": {
|
|
||||||
"description": "Capture file name",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
"data_link_type": {
|
|
||||||
"description": "PCAP data link type",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"additionalProperties": False,
|
|
||||||
"required": ["capture_file_name", "data_link_type"]
|
|
||||||
}
|
|
||||||
|
@ -491,25 +491,6 @@ VM_UPDATE_SCHEMA = {
|
|||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
VM_CAPTURE_SCHEMA = {
|
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
|
||||||
"description": "Request validation to start a packet capture on a Dynamips VM instance port",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"capture_file_name": {
|
|
||||||
"description": "Capture file name",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
"data_link_type": {
|
|
||||||
"description": "PCAP data link type",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"additionalProperties": False,
|
|
||||||
"required": ["capture_file_name", "data_link_type"]
|
|
||||||
}
|
|
||||||
|
|
||||||
VM_OBJECT_SCHEMA = {
|
VM_OBJECT_SCHEMA = {
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
@ -254,32 +254,18 @@ IOU_OBJECT_SCHEMA = {
|
|||||||
"iourc_path": {
|
"iourc_path": {
|
||||||
"description": "Path of the iourc file used by remote servers",
|
"description": "Path of the iourc file used by remote servers",
|
||||||
"type": ["string", "null"]
|
"type": ["string", "null"]
|
||||||
|
},
|
||||||
|
"command_line": {
|
||||||
|
"description": "Last command line used by GNS3 to start QEMU",
|
||||||
|
"type": "string"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
"required": ["name", "vm_id", "console", "project_id", "path", "md5sum", "serial_adapters", "ethernet_adapters",
|
"required": ["name", "vm_id", "console", "project_id", "path", "md5sum", "serial_adapters", "ethernet_adapters",
|
||||||
"ram", "nvram", "l1_keepalives", "startup_config", "private_config", "use_default_iou_values"]
|
"ram", "nvram", "l1_keepalives", "startup_config", "private_config", "use_default_iou_values",
|
||||||
|
"command_line"]
|
||||||
}
|
}
|
||||||
|
|
||||||
IOU_CAPTURE_SCHEMA = {
|
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
|
||||||
"description": "Request validation to start a packet capture on a IOU instance",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"capture_file_name": {
|
|
||||||
"description": "Capture file name",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
"data_link_type": {
|
|
||||||
"description": "PCAP data link type",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"additionalProperties": False,
|
|
||||||
"required": ["capture_file_name", "data_link_type"]
|
|
||||||
}
|
|
||||||
|
|
||||||
IOU_CONFIGS_SCHEMA = {
|
IOU_CONFIGS_SCHEMA = {
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
@ -558,6 +558,10 @@ QEMU_OBJECT_SCHEMA = {
|
|||||||
"description": "Additional QEMU options",
|
"description": "Additional QEMU options",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
},
|
},
|
||||||
|
"command_line": {
|
||||||
|
"description": "Last command line used by GNS3 to start QEMU",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
"required": ["vm_id",
|
"required": ["vm_id",
|
||||||
@ -598,7 +602,8 @@ QEMU_OBJECT_SCHEMA = {
|
|||||||
"cpu_throttling",
|
"cpu_throttling",
|
||||||
"process_priority",
|
"process_priority",
|
||||||
"options",
|
"options",
|
||||||
"vm_directory"]
|
"vm_directory",
|
||||||
|
"command_line"]
|
||||||
}
|
}
|
||||||
|
|
||||||
QEMU_BINARY_FILTER_SCHEMA = {
|
QEMU_BINARY_FILTER_SCHEMA = {
|
||||||
|
@ -147,20 +147,6 @@ VBOX_UPDATE_SCHEMA = {
|
|||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
VBOX_CAPTURE_SCHEMA = {
|
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
|
||||||
"description": "Request validation to start a packet capture on a VirtualBox VM instance port",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"capture_file_name": {
|
|
||||||
"description": "Capture file name",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"additionalProperties": False,
|
|
||||||
"required": ["capture_file_name"]
|
|
||||||
}
|
|
||||||
|
|
||||||
VBOX_OBJECT_SCHEMA = {
|
VBOX_OBJECT_SCHEMA = {
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
@ -41,3 +41,24 @@ VM_LIST_IMAGES_SCHEMA = {
|
|||||||
],
|
],
|
||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
VM_CAPTURE_SCHEMA = {
|
||||||
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
"description": "Request validation to start a packet capture on a port",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"capture_file_name": {
|
||||||
|
"description": "Capture file name",
|
||||||
|
"type": "string",
|
||||||
|
"minLength": 1,
|
||||||
|
},
|
||||||
|
"data_link_type": {
|
||||||
|
"description": "PCAP data link type",
|
||||||
|
"type": "string",
|
||||||
|
"minLength": 1,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": False,
|
||||||
|
"required": ["capture_file_name"]
|
||||||
|
}
|
||||||
|
@ -140,20 +140,6 @@ VMWARE_UPDATE_SCHEMA = {
|
|||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
VMWARE_CAPTURE_SCHEMA = {
|
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
|
||||||
"description": "Request validation to start a packet capture on a VMware VM instance port",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"capture_file_name": {
|
|
||||||
"description": "Capture file name",
|
|
||||||
"type": "string",
|
|
||||||
"minLength": 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"additionalProperties": False,
|
|
||||||
"required": ["capture_file_name"]
|
|
||||||
}
|
|
||||||
|
|
||||||
VMWARE_OBJECT_SCHEMA = {
|
VMWARE_OBJECT_SCHEMA = {
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
@ -121,7 +121,11 @@ VPCS_OBJECT_SCHEMA = {
|
|||||||
"description": "Path of the VPCS startup script relative to project directory",
|
"description": "Path of the VPCS startup script relative to project directory",
|
||||||
"type": ["string", "null"]
|
"type": ["string", "null"]
|
||||||
},
|
},
|
||||||
|
"command_line": {
|
||||||
|
"description": "Last command line used by GNS3 to start QEMU",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
"required": ["name", "vm_id", "status", "console", "project_id", "startup_script_path"]
|
"required": ["name", "vm_id", "status", "console", "project_id", "startup_script_path", "command_line"]
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,6 @@ import time
|
|||||||
import atexit
|
import atexit
|
||||||
|
|
||||||
from .web.route import Route
|
from .web.route import Route
|
||||||
from .web.request_handler import RequestHandler
|
|
||||||
from .config import Config
|
from .config import Config
|
||||||
from .modules import MODULES
|
from .modules import MODULES
|
||||||
from .modules.port_manager import PortManager
|
from .modules.port_manager import PortManager
|
||||||
@ -106,7 +105,7 @@ class Server:
|
|||||||
|
|
||||||
def _signal_handling(self):
|
def _signal_handling(self):
|
||||||
|
|
||||||
def signal_handler(signame):
|
def signal_handler(signame, *args):
|
||||||
log.warning("Server has got signal {}, exiting...".format(signame))
|
log.warning("Server has got signal {}, exiting...".format(signame))
|
||||||
asyncio.async(self.shutdown_server())
|
asyncio.async(self.shutdown_server())
|
||||||
|
|
||||||
@ -199,8 +198,13 @@ class Server:
|
|||||||
Starts the server.
|
Starts the server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
server_logger = logging.getLogger('aiohttp.server')
|
||||||
|
# In debug mode we don't use the standard request log but a more complete in response.py
|
||||||
|
if log.getEffectiveLevel() == logging.DEBUG:
|
||||||
|
server_logger.setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
logger = logging.getLogger("asyncio")
|
logger = logging.getLogger("asyncio")
|
||||||
logger.setLevel(logging.WARNING)
|
logger.setLevel(logging.ERROR)
|
||||||
|
|
||||||
server_config = Config.instance().get_section_config("Server")
|
server_config = Config.instance().get_section_config("Server")
|
||||||
if sys.platform.startswith("win"):
|
if sys.platform.startswith("win"):
|
||||||
@ -239,7 +243,7 @@ class Server:
|
|||||||
m.port_manager = self._port_manager
|
m.port_manager = self._port_manager
|
||||||
|
|
||||||
log.info("Starting server on {}:{}".format(self._host, self._port))
|
log.info("Starting server on {}:{}".format(self._host, self._port))
|
||||||
self._handler = app.make_handler(handler=RequestHandler)
|
self._handler = app.make_handler()
|
||||||
server = self._run_application(self._handler, ssl_context)
|
server = self._run_application(self._handler, ssl_context)
|
||||||
self._loop.run_until_complete(server)
|
self._loop.run_until_complete(server)
|
||||||
self._signal_handling()
|
self._signal_handling()
|
||||||
|
@ -19,13 +19,14 @@
|
|||||||
Represents a uBridge hypervisor and starts/stops the associated uBridge process.
|
Represents a uBridge hypervisor and starts/stops the associated uBridge process.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import asyncio
|
import asyncio
|
||||||
import socket
|
import socket
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from pkg_resources import parse_version
|
from gns3server.utils import parse_version
|
||||||
from gns3server.utils.asyncio import wait_for_process_termination
|
from gns3server.utils.asyncio import wait_for_process_termination
|
||||||
from gns3server.utils.asyncio import subprocess_check_output
|
from gns3server.utils.asyncio import subprocess_check_output
|
||||||
from .ubridge_hypervisor import UBridgeHypervisor
|
from .ubridge_hypervisor import UBridgeHypervisor
|
||||||
@ -119,15 +120,15 @@ class Hypervisor(UBridgeHypervisor):
|
|||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _check_ubridge_version(self):
|
def _check_ubridge_version(self):
|
||||||
"""
|
"""
|
||||||
Checks if the ubridge executable version is >= 0.9.1
|
Checks if the ubridge executable version is >= 0.9.4
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
output = yield from subprocess_check_output(self._path, "-v", cwd=self._working_dir)
|
output = yield from subprocess_check_output(self._path, "-v", cwd=self._working_dir)
|
||||||
match = re.search("ubridge version ([0-9a-z\.]+)", output)
|
match = re.search("ubridge version ([0-9a-z\.]+)", output)
|
||||||
if match:
|
if match:
|
||||||
version = match.group(1)
|
version = match.group(1)
|
||||||
if parse_version(version) < parse_version("0.9.1"):
|
if parse_version(version) < parse_version("0.9.4"):
|
||||||
raise UbridgeError("uBridge executable version must be >= 0.9.1")
|
raise UbridgeError("uBridge executable version must be >= 0.9.4")
|
||||||
else:
|
else:
|
||||||
raise UbridgeError("Could not determine uBridge version for {}".format(self._path))
|
raise UbridgeError("Could not determine uBridge version for {}".format(self._path))
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
@ -140,6 +141,12 @@ class Hypervisor(UBridgeHypervisor):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
yield from self._check_ubridge_version()
|
yield from self._check_ubridge_version()
|
||||||
|
env = os.environ.copy()
|
||||||
|
if sys.platform.startswith("win"):
|
||||||
|
# add the Npcap directory to $PATH to force Dynamips to use npcap DLL instead of Winpcap (if installed)
|
||||||
|
system_root = os.path.join(os.path.expandvars("%SystemRoot%"), "System32", "Npcap")
|
||||||
|
if os.path.isdir(system_root):
|
||||||
|
env["PATH"] = system_root + ';' + env["PATH"]
|
||||||
try:
|
try:
|
||||||
command = self._build_command()
|
command = self._build_command()
|
||||||
log.info("starting ubridge: {}".format(command))
|
log.info("starting ubridge: {}".format(command))
|
||||||
@ -149,7 +156,8 @@ class Hypervisor(UBridgeHypervisor):
|
|||||||
self._process = yield from asyncio.create_subprocess_exec(*command,
|
self._process = yield from asyncio.create_subprocess_exec(*command,
|
||||||
stdout=fd,
|
stdout=fd,
|
||||||
stderr=subprocess.STDOUT,
|
stderr=subprocess.STDOUT,
|
||||||
cwd=self._working_dir)
|
cwd=self._working_dir,
|
||||||
|
env=env)
|
||||||
|
|
||||||
log.info("ubridge started PID={}".format(self._process.pid))
|
log.info("ubridge started PID={}".format(self._process.pid))
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
@ -225,4 +233,6 @@ class Hypervisor(UBridgeHypervisor):
|
|||||||
|
|
||||||
command = [self._path]
|
command = [self._path]
|
||||||
command.extend(["-H", "{}:{}".format(self._host, self._port)])
|
command.extend(["-H", "{}:{}".format(self._host, self._port)])
|
||||||
|
if log.getEffectiveLevel() == logging.DEBUG:
|
||||||
|
command.extend(["-d", "2"])
|
||||||
return command
|
return command
|
||||||
|
@ -24,3 +24,10 @@ class UbridgeError(Exception):
|
|||||||
|
|
||||||
def __init__(self, message):
|
def __init__(self, message):
|
||||||
Exception.__init__(self, message)
|
Exception.__init__(self, message)
|
||||||
|
|
||||||
|
|
||||||
|
class UbridgeNamespaceError(Exception):
|
||||||
|
"""
|
||||||
|
Raised if ubridge can not move a container to a namespace
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
@ -16,6 +16,8 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
|
import re
|
||||||
|
import textwrap
|
||||||
import posixpath
|
import posixpath
|
||||||
|
|
||||||
|
|
||||||
@ -26,3 +28,62 @@ def force_unix_path(path):
|
|||||||
|
|
||||||
path = path.replace("\\", "/")
|
path = path.replace("\\", "/")
|
||||||
return posixpath.normpath(path)
|
return posixpath.normpath(path)
|
||||||
|
|
||||||
|
|
||||||
|
def macaddress_to_int(mac_address):
|
||||||
|
"""
|
||||||
|
Convert a macaddress with the format 00:0c:29:11:b0:0a to a int
|
||||||
|
|
||||||
|
:param mac_address: The mac address
|
||||||
|
|
||||||
|
:returns: Integer
|
||||||
|
"""
|
||||||
|
return int(mac_address.replace(":", ""), 16)
|
||||||
|
|
||||||
|
|
||||||
|
def int_to_macaddress(integer):
|
||||||
|
"""
|
||||||
|
Convert an integer to a mac address
|
||||||
|
"""
|
||||||
|
return ":".join(textwrap.wrap("%012x" % (integer), width=2))
|
||||||
|
|
||||||
|
|
||||||
|
def parse_version(version):
|
||||||
|
"""
|
||||||
|
Return a comparable tuple from a version string. We try to force tuple to semver with version like 1.2.0
|
||||||
|
|
||||||
|
Replace pkg_resources.parse_version which now display a warning when use for comparing version with tuple
|
||||||
|
|
||||||
|
:returns: Version string as comparable tuple
|
||||||
|
"""
|
||||||
|
|
||||||
|
release_type_found = False
|
||||||
|
version_infos = re.split('(\.|[a-z]+)', version)
|
||||||
|
version = []
|
||||||
|
for info in version_infos:
|
||||||
|
if info == '.' or len(info) == 0:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
info = int(info)
|
||||||
|
# We pad with zero to compare only on string
|
||||||
|
# This avoid issue when comparing version with different length
|
||||||
|
version.append("%06d" % (info,))
|
||||||
|
except ValueError:
|
||||||
|
# Force to a version with three number
|
||||||
|
if len(version) == 1:
|
||||||
|
version.append("00000")
|
||||||
|
if len(version) == 2:
|
||||||
|
version.append("000000")
|
||||||
|
# We want rc to be at lower level than dev version
|
||||||
|
if info == 'rc':
|
||||||
|
info = 'c'
|
||||||
|
version.append(info)
|
||||||
|
release_type_found = True
|
||||||
|
if release_type_found is False:
|
||||||
|
# Force to a version with three number
|
||||||
|
if len(version) == 1:
|
||||||
|
version.append("00000")
|
||||||
|
if len(version) == 2:
|
||||||
|
version.append("000000")
|
||||||
|
version.append("final")
|
||||||
|
return tuple(version)
|
||||||
|
@ -16,24 +16,26 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
|
import functools
|
||||||
import asyncio
|
import asyncio
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def wait_run_in_executor(func, *args):
|
def wait_run_in_executor(func, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
Run blocking code in a different thread and wait
|
Run blocking code in a different thread and wait
|
||||||
for the result.
|
for the result.
|
||||||
|
|
||||||
:param func: Run this function in a different thread
|
:param func: Run this function in a different thread
|
||||||
:param args: Parameters of the function
|
:param args: Parameters of the function
|
||||||
|
:param kwargs: Keyword parameters of the function
|
||||||
:returns: Return the result of the function
|
:returns: Return the result of the function
|
||||||
"""
|
"""
|
||||||
|
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
future = loop.run_in_executor(None, func, *args)
|
future = loop.run_in_executor(None, functools.partial(func, *args, **kwargs))
|
||||||
yield from asyncio.wait([future])
|
yield from asyncio.wait([future])
|
||||||
return future.result()
|
return future.result()
|
||||||
|
|
||||||
@ -54,7 +56,7 @@ def subprocess_check_output(*args, cwd=None, env=None):
|
|||||||
if output is None:
|
if output is None:
|
||||||
return ""
|
return ""
|
||||||
# If we received garbage we ignore invalid characters
|
# If we received garbage we ignore invalid characters
|
||||||
# it should happend only when user try to use another binary
|
# it should happend only when user try to use another binary
|
||||||
# and the code of VPCS, dynamips... Will detect it's not the correct binary
|
# and the code of VPCS, dynamips... Will detect it's not the correct binary
|
||||||
return output.decode("utf-8", errors="ignore")
|
return output.decode("utf-8", errors="ignore")
|
||||||
|
|
||||||
@ -125,3 +127,24 @@ def wait_for_named_pipe_creation(pipe_path, timeout=60):
|
|||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
raise asyncio.TimeoutError()
|
raise asyncio.TimeoutError()
|
||||||
|
|
||||||
|
|
||||||
|
def locked_coroutine(f):
|
||||||
|
"""
|
||||||
|
Method decorator that replace asyncio.coroutine that warranty
|
||||||
|
that this specific method of this class instance will not we
|
||||||
|
executed twice at the same time
|
||||||
|
"""
|
||||||
|
@asyncio.coroutine
|
||||||
|
def new_function(*args, **kwargs):
|
||||||
|
|
||||||
|
# In the instance of the class we will store
|
||||||
|
# a lock has an attribute.
|
||||||
|
lock_var_name = "__" + f.__name__ + "_lock"
|
||||||
|
if not hasattr(args[0], lock_var_name):
|
||||||
|
setattr(args[0], lock_var_name, asyncio.Lock())
|
||||||
|
|
||||||
|
with (yield from getattr(args[0], lock_var_name)):
|
||||||
|
return (yield from f(*args, **kwargs))
|
||||||
|
|
||||||
|
return new_function
|
116
gns3server/utils/asyncio/raw_command_server.py
Normal file
116
gns3server/utils/asyncio/raw_command_server.py
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright (C) 2014 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import re
|
||||||
|
import asyncio
|
||||||
|
import asyncio.subprocess
|
||||||
|
|
||||||
|
import logging
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
READ_SIZE = 4096
|
||||||
|
|
||||||
|
|
||||||
|
class AsyncioRawCommandServer:
|
||||||
|
"""
|
||||||
|
Expose a process on the network his stdoud and stdin will be forward
|
||||||
|
on network
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, command, replaces=[]):
|
||||||
|
"""
|
||||||
|
:param command: Command to run
|
||||||
|
:param replaces: List of tuple to replace in the output ex: [(b":8080", b":6000")]
|
||||||
|
"""
|
||||||
|
self._command = command
|
||||||
|
self._replaces = replaces
|
||||||
|
# We limit number of process
|
||||||
|
self._lock = asyncio.Semaphore(value=4)
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def run(self, network_reader, network_writer):
|
||||||
|
yield from self._lock.acquire()
|
||||||
|
process = yield from asyncio.subprocess.create_subprocess_exec(*self._command,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.STDOUT,
|
||||||
|
stdin=asyncio.subprocess.PIPE)
|
||||||
|
try:
|
||||||
|
yield from self._process(network_reader, network_writer, process.stdout, process.stdin)
|
||||||
|
except ConnectionResetError:
|
||||||
|
network_writer.close()
|
||||||
|
if process.returncode is None:
|
||||||
|
process.kill()
|
||||||
|
yield from process.wait()
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def _process(self, network_reader, network_writer, process_reader, process_writer):
|
||||||
|
network_read = asyncio.async(network_reader.read(READ_SIZE))
|
||||||
|
reader_read = asyncio.async(process_reader.read(READ_SIZE))
|
||||||
|
timeout = 30
|
||||||
|
|
||||||
|
while True:
|
||||||
|
done, pending = yield from asyncio.wait(
|
||||||
|
[
|
||||||
|
network_read,
|
||||||
|
reader_read
|
||||||
|
],
|
||||||
|
timeout=timeout,
|
||||||
|
return_when=asyncio.FIRST_COMPLETED)
|
||||||
|
if len(done) == 0:
|
||||||
|
raise ConnectionResetError()
|
||||||
|
for coro in done:
|
||||||
|
data = coro.result()
|
||||||
|
if coro == network_read:
|
||||||
|
if network_reader.at_eof():
|
||||||
|
raise ConnectionResetError()
|
||||||
|
|
||||||
|
network_read = asyncio.async(network_reader.read(READ_SIZE))
|
||||||
|
|
||||||
|
process_writer.write(data)
|
||||||
|
yield from process_writer.drain()
|
||||||
|
elif coro == reader_read:
|
||||||
|
if process_reader.at_eof():
|
||||||
|
raise ConnectionResetError()
|
||||||
|
|
||||||
|
reader_read = asyncio.async(process_reader.read(READ_SIZE))
|
||||||
|
|
||||||
|
for replace in self._replaces:
|
||||||
|
data = data.replace(replace[0], replace[1])
|
||||||
|
timeout = 2 # We reduce the timeout when the process start to return stuff to avoid problem with server not closing the connection
|
||||||
|
|
||||||
|
network_writer.write(data)
|
||||||
|
yield from network_writer.drain()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
command = ["nc", "localhost", "80"]
|
||||||
|
server = AsyncioRawCommandServer(command)
|
||||||
|
coro = asyncio.start_server(server.run, '127.0.0.1', 4444, loop=loop)
|
||||||
|
s = loop.run_until_complete(coro)
|
||||||
|
|
||||||
|
try:
|
||||||
|
loop.run_forever()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
pass
|
||||||
|
# Close the server
|
||||||
|
s.close()
|
||||||
|
loop.run_until_complete(s.wait_closed())
|
||||||
|
loop.close()
|
285
gns3server/utils/asyncio/telnet_server.py
Normal file
285
gns3server/utils/asyncio/telnet_server.py
Normal file
@ -0,0 +1,285 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright (C) 2014 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import re
|
||||||
|
import asyncio
|
||||||
|
import asyncio.subprocess
|
||||||
|
|
||||||
|
import logging
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Mostly from https://code.google.com/p/miniboa/source/browse/trunk/miniboa/telnet.py
|
||||||
|
|
||||||
|
# Telnet Commands
|
||||||
|
SE = 240 # End of sub-negotiation parameters
|
||||||
|
NOP = 241 # No operation
|
||||||
|
DATMK = 242 # Data stream portion of a sync.
|
||||||
|
BREAK = 243 # NVT Character BRK
|
||||||
|
IP = 244 # Interrupt Process
|
||||||
|
AO = 245 # Abort Output
|
||||||
|
AYT = 246 # Are you there
|
||||||
|
EC = 247 # Erase Character
|
||||||
|
EL = 248 # Erase Line
|
||||||
|
GA = 249 # The Go Ahead Signal
|
||||||
|
SB = 250 # Sub-option to follow
|
||||||
|
WILL = 251 # Will; request or confirm option begin
|
||||||
|
WONT = 252 # Wont; deny option request
|
||||||
|
DO = 253 # Do = Request or confirm remote option
|
||||||
|
DONT = 254 # Don't = Demand or confirm option halt
|
||||||
|
IAC = 255 # Interpret as Command
|
||||||
|
SEND = 1 # Sub-process negotiation SEND command
|
||||||
|
IS = 0 # Sub-process negotiation IS command
|
||||||
|
|
||||||
|
# Telnet Options
|
||||||
|
BINARY = 0 # Transmit Binary
|
||||||
|
ECHO = 1 # Echo characters back to sender
|
||||||
|
RECON = 2 # Reconnection
|
||||||
|
SGA = 3 # Suppress Go-Ahead
|
||||||
|
TMARK = 6 # Timing Mark
|
||||||
|
TTYPE = 24 # Terminal Type
|
||||||
|
NAWS = 31 # Negotiate About Window Size
|
||||||
|
LINEMO = 34 # Line Mode
|
||||||
|
|
||||||
|
READ_SIZE = 1024
|
||||||
|
|
||||||
|
|
||||||
|
class AsyncioTelnetServer:
|
||||||
|
|
||||||
|
def __init__(self, reader=None, writer=None, binary=True, echo=False):
|
||||||
|
self._reader = reader
|
||||||
|
self._writer = writer
|
||||||
|
self._clients = set()
|
||||||
|
self._lock = asyncio.Lock()
|
||||||
|
self._reader_process = None
|
||||||
|
self._current_read = None
|
||||||
|
|
||||||
|
self._binary = binary
|
||||||
|
# If echo is true when the client send data
|
||||||
|
# the data is echo on his terminal by telnet otherwise
|
||||||
|
# it's our job (or the wrapped app) to send back the data
|
||||||
|
self._echo = echo
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def run(self, network_reader, network_writer):
|
||||||
|
# Keep track of connected clients
|
||||||
|
self._clients.add(network_writer)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Send initial telnet session opening
|
||||||
|
if self._echo:
|
||||||
|
network_writer.write(bytes([IAC, WILL, ECHO]))
|
||||||
|
else:
|
||||||
|
network_writer.write(bytes([
|
||||||
|
IAC, WONT, ECHO,
|
||||||
|
IAC, DONT, ECHO]))
|
||||||
|
|
||||||
|
if self._binary:
|
||||||
|
network_writer.write(bytes([
|
||||||
|
IAC, WILL, SGA,
|
||||||
|
IAC, WILL, BINARY,
|
||||||
|
IAC, DO, BINARY]))
|
||||||
|
else:
|
||||||
|
network_writer.write(bytes([
|
||||||
|
IAC, WONT, SGA,
|
||||||
|
IAC, DONT, SGA,
|
||||||
|
IAC, WONT, BINARY,
|
||||||
|
IAC, DONT, BINARY]))
|
||||||
|
yield from network_writer.drain()
|
||||||
|
|
||||||
|
yield from self._process(network_reader, network_writer)
|
||||||
|
except ConnectionResetError:
|
||||||
|
with (yield from self._lock):
|
||||||
|
|
||||||
|
network_writer.close()
|
||||||
|
|
||||||
|
if self._reader_process == network_reader:
|
||||||
|
self._reader_process = None
|
||||||
|
# Cancel current read from this reader
|
||||||
|
self._current_read.cancel()
|
||||||
|
self._clients.remove(network_writer)
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def _get_reader(self, network_reader):
|
||||||
|
"""
|
||||||
|
Get a reader or None if another reader is already reading.
|
||||||
|
"""
|
||||||
|
with (yield from self._lock):
|
||||||
|
if self._reader_process is None:
|
||||||
|
self._reader_process = network_reader
|
||||||
|
if self._reader_process == network_reader:
|
||||||
|
self._current_read = asyncio.async(self._reader.read(READ_SIZE))
|
||||||
|
return self._current_read
|
||||||
|
return None
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def _process(self, network_reader, network_writer):
|
||||||
|
network_read = asyncio.async(network_reader.read(READ_SIZE))
|
||||||
|
reader_read = yield from self._get_reader(network_reader)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if reader_read is None:
|
||||||
|
reader_read = yield from self._get_reader(network_reader)
|
||||||
|
if reader_read is None:
|
||||||
|
done, pending = yield from asyncio.wait(
|
||||||
|
[
|
||||||
|
network_read,
|
||||||
|
],
|
||||||
|
timeout=1,
|
||||||
|
return_when=asyncio.FIRST_COMPLETED)
|
||||||
|
else:
|
||||||
|
done, pending = yield from asyncio.wait(
|
||||||
|
[
|
||||||
|
network_read,
|
||||||
|
reader_read
|
||||||
|
],
|
||||||
|
return_when=asyncio.FIRST_COMPLETED)
|
||||||
|
for coro in done:
|
||||||
|
data = coro.result()
|
||||||
|
|
||||||
|
if coro == network_read:
|
||||||
|
if network_reader.at_eof():
|
||||||
|
raise ConnectionResetError()
|
||||||
|
|
||||||
|
network_read = asyncio.async(network_reader.read(READ_SIZE))
|
||||||
|
|
||||||
|
if IAC in data:
|
||||||
|
data = yield from self._IAC_parser(data, network_reader, network_writer)
|
||||||
|
if len(data) == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not self._binary:
|
||||||
|
data = data.replace(b"\r\n", b"\n")
|
||||||
|
|
||||||
|
if self._writer:
|
||||||
|
self._writer.write(data)
|
||||||
|
yield from self._writer.drain()
|
||||||
|
elif coro == reader_read:
|
||||||
|
if self._reader.at_eof():
|
||||||
|
raise ConnectionResetError()
|
||||||
|
|
||||||
|
reader_read = yield from self._get_reader(network_reader)
|
||||||
|
|
||||||
|
# Replicate the output on all clients
|
||||||
|
for writer in self._clients:
|
||||||
|
writer.write(data)
|
||||||
|
yield from writer.drain()
|
||||||
|
|
||||||
|
def _IAC_parser(self, buf, network_reader, network_writer):
|
||||||
|
"""
|
||||||
|
Processes and removes any Telnet commands from the buffer.
|
||||||
|
|
||||||
|
:param buf: buffer
|
||||||
|
:returns: buffer minus Telnet commands
|
||||||
|
"""
|
||||||
|
|
||||||
|
skip_to = 0
|
||||||
|
while True:
|
||||||
|
# Locate an IAC to process
|
||||||
|
iac_loc = buf.find(IAC, skip_to)
|
||||||
|
if iac_loc < 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Get the TELNET command
|
||||||
|
iac_cmd = bytearray([IAC])
|
||||||
|
try:
|
||||||
|
iac_cmd.append(buf[iac_loc + 1])
|
||||||
|
except IndexError:
|
||||||
|
d = yield from network_reader.read(1)
|
||||||
|
buf.extend(d)
|
||||||
|
iac_cmd.append(buf[iac_loc + 1])
|
||||||
|
|
||||||
|
# Is this just a 2-byte TELNET command?
|
||||||
|
if iac_cmd[1] not in [WILL, WONT, DO, DONT]:
|
||||||
|
if iac_cmd[1] == AYT:
|
||||||
|
log.debug("Telnet server received Are-You-There (AYT)")
|
||||||
|
network_writer.write(b'\r\nYour Are-You-There received. I am here.\r\n')
|
||||||
|
elif iac_cmd[1] == IAC:
|
||||||
|
# It's data, not an IAC
|
||||||
|
iac_cmd.pop()
|
||||||
|
# This prevents the 0xff from being
|
||||||
|
# interrupted as yet another IAC
|
||||||
|
skip_to = iac_loc + 1
|
||||||
|
log.debug("Received IAC IAC")
|
||||||
|
elif iac_cmd[1] == NOP:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
log.debug("Unhandled telnet command: "
|
||||||
|
"{0:#x} {1:#x}".format(*iac_cmd))
|
||||||
|
|
||||||
|
# This must be a 3-byte TELNET command
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
iac_cmd.append(buf[iac_loc + 2])
|
||||||
|
except IndexError:
|
||||||
|
d = yield from network_reader.read(1)
|
||||||
|
buf.extend(d)
|
||||||
|
iac_cmd.append(buf[iac_loc + 2])
|
||||||
|
# We do ECHO, SGA, and BINARY. Period.
|
||||||
|
if iac_cmd[1] == DO:
|
||||||
|
if iac_cmd[2] not in [ECHO, SGA, BINARY]:
|
||||||
|
network_writer.write(bytes([IAC, WONT, iac_cmd[2]]))
|
||||||
|
log.debug("Telnet WON'T {:#x}".format(iac_cmd[2]))
|
||||||
|
else:
|
||||||
|
if iac_cmd[2] == SGA:
|
||||||
|
if self._binary:
|
||||||
|
network_writer.write(bytes([IAC, WILL, iac_cmd[2]]))
|
||||||
|
else:
|
||||||
|
network_writer.write(bytes([IAC, WONT, iac_cmd[2]]))
|
||||||
|
log.debug("Telnet WON'T {:#x}".format(iac_cmd[2]))
|
||||||
|
|
||||||
|
elif iac_cmd[1] == DONT:
|
||||||
|
log.debug("Unhandled DONT telnet command: "
|
||||||
|
"{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
|
||||||
|
elif iac_cmd[1] == WILL:
|
||||||
|
log.debug("Unhandled WILL telnet command: "
|
||||||
|
"{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
|
||||||
|
elif iac_cmd[1] == WONT:
|
||||||
|
log.debug("Unhandled WONT telnet command: "
|
||||||
|
"{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
|
||||||
|
else:
|
||||||
|
log.debug("Unhandled telnet command: "
|
||||||
|
"{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
|
||||||
|
|
||||||
|
# Remove the entire TELNET command from the buffer
|
||||||
|
buf = buf.replace(iac_cmd, b'', 1)
|
||||||
|
|
||||||
|
yield from network_writer.drain()
|
||||||
|
|
||||||
|
# Return the new copy of the buffer, minus telnet commands
|
||||||
|
return buf
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
process = loop.run_until_complete(asyncio.async(asyncio.subprocess.create_subprocess_exec("/bin/sh", "-i",
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.STDOUT,
|
||||||
|
stdin=asyncio.subprocess.PIPE)))
|
||||||
|
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=False, echo=False)
|
||||||
|
|
||||||
|
coro = asyncio.start_server(server.run, '127.0.0.1', 4444, loop=loop)
|
||||||
|
s = loop.run_until_complete(coro)
|
||||||
|
|
||||||
|
try:
|
||||||
|
loop.run_forever()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
pass
|
||||||
|
# Close the server
|
||||||
|
s.close()
|
||||||
|
loop.run_until_complete(s.wait_closed())
|
||||||
|
loop.close()
|
@ -36,7 +36,8 @@ def md5sum(path):
|
|||||||
try:
|
try:
|
||||||
with open(path + '.md5sum') as f:
|
with open(path + '.md5sum') as f:
|
||||||
return f.read()
|
return f.read()
|
||||||
except OSError:
|
# Unicode error is when user rename an image to .md5sum ....
|
||||||
|
except (OSError, UnicodeDecodeError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -23,7 +23,7 @@ import struct
|
|||||||
import psutil
|
import psutil
|
||||||
|
|
||||||
if psutil.version_info < (3, 0, 0):
|
if psutil.version_info < (3, 0, 0):
|
||||||
raise Exception("psutil version should >= 3.0.0. If you are under ubuntu/debian install gns3 via apt instead of pip")
|
raise Exception("psutil version should >= 3.0.0. If you are under Ubuntu/Debian install gns3 via apt instead of pip")
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
@ -59,6 +59,7 @@ def _get_windows_interfaces_from_registry():
|
|||||||
interfaces.append({"id": npf_interface,
|
interfaces.append({"id": npf_interface,
|
||||||
"name": name,
|
"name": name,
|
||||||
"ip_address": ip_address,
|
"ip_address": ip_address,
|
||||||
|
"mac_address": "", # TODO: find MAC address in registry
|
||||||
"netcard": netcard})
|
"netcard": netcard})
|
||||||
winreg.CloseKey(hkeyinterface)
|
winreg.CloseKey(hkeyinterface)
|
||||||
winreg.CloseKey(hkeycon)
|
winreg.CloseKey(hkeycon)
|
||||||
@ -99,6 +100,7 @@ def get_windows_interfaces():
|
|||||||
interfaces.append({"id": npf_interface,
|
interfaces.append({"id": npf_interface,
|
||||||
"name": adapter.NetConnectionID,
|
"name": adapter.NetConnectionID,
|
||||||
"ip_address": ip_address,
|
"ip_address": ip_address,
|
||||||
|
"mac_address": adapter.MACAddress,
|
||||||
"netcard": adapter.name})
|
"netcard": adapter.name})
|
||||||
except (AttributeError, pywintypes.com_error):
|
except (AttributeError, pywintypes.com_error):
|
||||||
log.warn("Could not use the COM service to retrieve interface info, trying using the registry...")
|
log.warn("Could not use the COM service to retrieve interface info, trying using the registry...")
|
||||||
@ -137,6 +139,23 @@ def is_interface_up(interface):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def _check_windows_service(service_name):
|
||||||
|
|
||||||
|
import pywintypes
|
||||||
|
import win32service
|
||||||
|
import win32serviceutil
|
||||||
|
|
||||||
|
try:
|
||||||
|
if win32serviceutil.QueryServiceStatus(service_name, None)[1] != win32service.SERVICE_RUNNING:
|
||||||
|
return False
|
||||||
|
except pywintypes.error as e:
|
||||||
|
if e.winerror == 1060:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
raise aiohttp.web.HTTPInternalServerError(text="Could not check if the {} service is running: {}".format(service_name, e.strerror))
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def interfaces():
|
def interfaces():
|
||||||
"""
|
"""
|
||||||
Gets the network interfaces on this server.
|
Gets the network interfaces on this server.
|
||||||
@ -148,21 +167,32 @@ def interfaces():
|
|||||||
if not sys.platform.startswith("win"):
|
if not sys.platform.startswith("win"):
|
||||||
for interface in sorted(psutil.net_if_addrs().keys()):
|
for interface in sorted(psutil.net_if_addrs().keys()):
|
||||||
ip_address = ""
|
ip_address = ""
|
||||||
|
mac_address = ""
|
||||||
for addr in psutil.net_if_addrs()[interface]:
|
for addr in psutil.net_if_addrs()[interface]:
|
||||||
# get the first available IPv4 address only
|
# get the first available IPv4 address only
|
||||||
if addr.family == socket.AF_INET:
|
if addr.family == socket.AF_INET:
|
||||||
ip_address = addr.address
|
ip_address = addr.address
|
||||||
break
|
if addr.family == psutil.AF_LINK:
|
||||||
|
mac_address = addr.address
|
||||||
results.append({"id": interface,
|
results.append({"id": interface,
|
||||||
"name": interface,
|
"name": interface,
|
||||||
"ip_address": ip_address})
|
"ip_address": ip_address,
|
||||||
|
"mac_address": mac_address})
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
results = get_windows_interfaces()
|
service_installed = True
|
||||||
|
if not _check_windows_service("npf") and not _check_windows_service("npcap"):
|
||||||
|
service_installed = False
|
||||||
|
else:
|
||||||
|
results = get_windows_interfaces()
|
||||||
except ImportError:
|
except ImportError:
|
||||||
message = "pywin32 module is not installed, please install it on the server to get the available interface names"
|
message = "pywin32 module is not installed, please install it on the server to get the available interface names"
|
||||||
raise aiohttp.web.HTTPInternalServerError(text=message)
|
raise aiohttp.web.HTTPInternalServerError(text=message)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log.error("uncaught exception {type}".format(type=type(e)), exc_info=1)
|
log.error("uncaught exception {type}".format(type=type(e)), exc_info=1)
|
||||||
raise aiohttp.web.HTTPInternalServerError(text="uncaught exception: {}".format(e))
|
raise aiohttp.web.HTTPInternalServerError(text="uncaught exception: {}".format(e))
|
||||||
|
|
||||||
|
if service_installed is False:
|
||||||
|
raise aiohttp.web.HTTPInternalServerError(text="The Winpcap or Npcap is not installed or running")
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
@ -166,7 +166,10 @@ def vmnet_windows(args, vmnet_range_start, vmnet_range_end):
|
|||||||
continue
|
continue
|
||||||
print("Adding vmnet{}...".format(vmnet_number))
|
print("Adding vmnet{}...".format(vmnet_number))
|
||||||
os.system('"{}" -- add adapter vmnet{}'.format(vnetlib_path, vmnet_number))
|
os.system('"{}" -- add adapter vmnet{}'.format(vnetlib_path, vmnet_number))
|
||||||
|
os.system("net stop npf")
|
||||||
|
os.system("net start npf")
|
||||||
|
os.system("net stop npcap")
|
||||||
|
os.system("net start npcap")
|
||||||
|
|
||||||
def vmnet_unix(args, vmnet_range_start, vmnet_range_end):
|
def vmnet_unix(args, vmnet_range_start, vmnet_range_end):
|
||||||
"""
|
"""
|
@ -23,5 +23,5 @@
|
|||||||
# or negative for a release candidate or beta (after the base version
|
# or negative for a release candidate or beta (after the base version
|
||||||
# number has been incremented)
|
# number has been incremented)
|
||||||
|
|
||||||
__version__ = "1.4.1"
|
__version__ = "1.5.3"
|
||||||
__version_info__ = (1, 4, 1, 0)
|
__version_info__ = (1, 5, 3, 0)
|
||||||
|
@ -74,7 +74,7 @@ class ColouredStreamHandler(logging.StreamHandler):
|
|||||||
stream.write(msg)
|
stream.write(msg)
|
||||||
stream.write(self.terminator)
|
stream.write(self.terminator)
|
||||||
self.flush()
|
self.flush()
|
||||||
# On OSX when frozen flush raise a BrokenPipeError
|
# On OSX when frozen flush raise a BrokenPipeError
|
||||||
except BrokenPipeError:
|
except BrokenPipeError:
|
||||||
pass
|
pass
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
import json
|
import json
|
||||||
import jsonschema
|
import jsonschema
|
||||||
import aiohttp.web
|
import aiohttp.web
|
||||||
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
import jinja2
|
import jinja2
|
||||||
@ -41,7 +42,8 @@ class Response(aiohttp.web.Response):
|
|||||||
headers['Server'] = "Python/{0[0]}.{0[1]} GNS3/{1}".format(sys.version_info, __version__)
|
headers['Server'] = "Python/{0[0]}.{0[1]} GNS3/{1}".format(sys.version_info, __version__)
|
||||||
super().__init__(headers=headers, **kwargs)
|
super().__init__(headers=headers, **kwargs)
|
||||||
|
|
||||||
def start(self, request):
|
@asyncio.coroutine
|
||||||
|
def prepare(self, request):
|
||||||
if log.getEffectiveLevel() == logging.DEBUG:
|
if log.getEffectiveLevel() == logging.DEBUG:
|
||||||
log.info("%s %s", request.method, request.path_qs)
|
log.info("%s %s", request.method, request.path_qs)
|
||||||
log.debug("%s", dict(request.headers))
|
log.debug("%s", dict(request.headers))
|
||||||
@ -51,7 +53,7 @@ class Response(aiohttp.web.Response):
|
|||||||
log.debug(dict(self.headers))
|
log.debug(dict(self.headers))
|
||||||
if hasattr(self, 'body') and self.body is not None and self.headers["CONTENT-TYPE"] == "application/json":
|
if hasattr(self, 'body') and self.body is not None and self.headers["CONTENT-TYPE"] == "application/json":
|
||||||
log.debug(json.loads(self.body.decode('utf-8')))
|
log.debug(json.loads(self.body.decode('utf-8')))
|
||||||
return super().start(request)
|
return (yield from super().prepare(request))
|
||||||
|
|
||||||
def html(self, answer):
|
def html(self, answer):
|
||||||
"""
|
"""
|
||||||
|
@ -17,11 +17,13 @@
|
|||||||
|
|
||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
import jsonschema
|
import urllib
|
||||||
import asyncio
|
import asyncio
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import logging
|
import logging
|
||||||
import traceback
|
import traceback
|
||||||
|
import jsonschema
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -33,10 +35,11 @@ from ..config import Config
|
|||||||
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def parse_request(request, input_schema):
|
def parse_request(request, input_schema, raw):
|
||||||
"""Parse body of request and raise HTTP errors in case of problems"""
|
"""Parse body of request and raise HTTP errors in case of problems"""
|
||||||
|
|
||||||
content_length = request.content_length
|
content_length = request.content_length
|
||||||
if content_length is not None and content_length > 0:
|
if content_length is not None and content_length > 0 and not raw:
|
||||||
body = yield from request.read()
|
body = yield from request.read()
|
||||||
try:
|
try:
|
||||||
request.json = json.loads(body.decode('utf-8'))
|
request.json = json.loads(body.decode('utf-8'))
|
||||||
@ -45,13 +48,21 @@ def parse_request(request, input_schema):
|
|||||||
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON {}".format(e))
|
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON {}".format(e))
|
||||||
else:
|
else:
|
||||||
request.json = {}
|
request.json = {}
|
||||||
try:
|
|
||||||
jsonschema.validate(request.json, input_schema)
|
# Parse the query string
|
||||||
except jsonschema.ValidationError as e:
|
if len(request.query_string) > 0:
|
||||||
log.error("Invalid input query. JSON schema error: {}".format(e.message))
|
for (k, v) in urllib.parse.parse_qs(request.query_string).items():
|
||||||
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON: {} in schema: {}".format(
|
request.json[k] = v[0]
|
||||||
e.message,
|
|
||||||
json.dumps(e.schema)))
|
if input_schema:
|
||||||
|
try:
|
||||||
|
jsonschema.validate(request.json, input_schema)
|
||||||
|
except jsonschema.ValidationError as e:
|
||||||
|
log.error("Invalid input query. JSON schema error: {}".format(e.message))
|
||||||
|
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON: {} in schema: {}".format(
|
||||||
|
e.message,
|
||||||
|
json.dumps(e.schema)))
|
||||||
|
|
||||||
return request
|
return request
|
||||||
|
|
||||||
|
|
||||||
@ -161,12 +172,13 @@ class Route(object):
|
|||||||
if api_version is None or raw is True:
|
if api_version is None or raw is True:
|
||||||
response = Response(request=request, route=route, output_schema=output_schema)
|
response = Response(request=request, route=route, output_schema=output_schema)
|
||||||
|
|
||||||
|
request = yield from parse_request(request, None, raw)
|
||||||
yield from func(request, response)
|
yield from func(request, response)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
# API call
|
# API call
|
||||||
try:
|
try:
|
||||||
request = yield from parse_request(request, input_schema)
|
request = yield from parse_request(request, input_schema, raw)
|
||||||
record_file = server_config.get("record")
|
record_file = server_config.get("record")
|
||||||
if record_file:
|
if record_file:
|
||||||
try:
|
try:
|
||||||
@ -180,7 +192,7 @@ class Route(object):
|
|||||||
except aiohttp.web.HTTPBadRequest as e:
|
except aiohttp.web.HTTPBadRequest as e:
|
||||||
response = Response(request=request, route=route)
|
response = Response(request=request, route=route)
|
||||||
response.set_status(e.status)
|
response.set_status(e.status)
|
||||||
response.json({"message": e.text, "status": e.status, "path": route, "request": request.json})
|
response.json({"message": e.text, "status": e.status, "path": route, "request": request.json, "method": request.method})
|
||||||
except aiohttp.web.HTTPException as e:
|
except aiohttp.web.HTTPException as e:
|
||||||
response = Response(request=request, route=route)
|
response = Response(request=request, route=route)
|
||||||
response.set_status(e.status)
|
response.set_status(e.status)
|
||||||
|
@ -1,14 +1,19 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=GNS3 server
|
Description=GNS3 server
|
||||||
|
Wants=network-online.target
|
||||||
|
After=network.target network-online.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=forking
|
Type=forking
|
||||||
Environment=statedir=/var/cache/gns3
|
|
||||||
PIDFile=/var/run/gns3.pid
|
|
||||||
ExecStart=/usr/local/bin/gns3server --log /var/log/gns3.log \
|
|
||||||
--pid /var/run/gns3.pid --daemon
|
|
||||||
Restart=on-abort
|
|
||||||
User=gns3
|
User=gns3
|
||||||
|
Group=gns3
|
||||||
|
PermissionsStartOnly=true
|
||||||
|
ExecStartPre=/bin/mkdir -p /var/log/gns3 /var/run/gns3
|
||||||
|
ExecStartPre=/bin/chown -R gns3:gns3 /var/log/gns3 /var/run/gns3
|
||||||
|
ExecStart=/usr/local/bin/gns3server --log /var/log/gns3/gns3.log \
|
||||||
|
--pid /var/run/gns3/gns3.pid --daemon
|
||||||
|
Restart=on-abort
|
||||||
|
PIDFile=/var/run/gns3/gns3.pid
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
jsonschema>=2.4.0
|
jsonschema>=2.4.0
|
||||||
aiohttp==0.17.4
|
aiohttp>=1.2.0
|
||||||
|
aiohttp_cors>=0.4.0
|
||||||
|
yarl>=0.7.0
|
||||||
Jinja2>=2.7.3
|
Jinja2>=2.7.3
|
||||||
raven>=5.2.0
|
raven>=5.2.0
|
||||||
docker-py==1.4.0
|
|
||||||
psutil>=3.0.0
|
psutil>=3.0.0
|
||||||
|
zipstream>=1.1.3
|
||||||
|
355
scripts/remote-install.sh
Normal file
355
scripts/remote-install.sh
Normal file
@ -0,0 +1,355 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#
|
||||||
|
# Install GNS3 on a remote Ubuntu 14.04 LTS server
|
||||||
|
# This create a dedicated user and setup all the package
|
||||||
|
# and optionnaly a VPN
|
||||||
|
#
|
||||||
|
|
||||||
|
function help {
|
||||||
|
echo "Usage:" >&2
|
||||||
|
echo "--with-openvpn: Install Open VPN" >&2
|
||||||
|
echo "--with-iou: Install IOU" >&2
|
||||||
|
echo "--with-i386-repository: Add i386 repositories require by IOU if they are not available on the system. Warning this will replace your source.list in order to use official ubuntu mirror" >&2
|
||||||
|
echo "--unstable: Use the GNS3 unstable repository"
|
||||||
|
echo "--help: This help" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
function log {
|
||||||
|
tput setaf 2
|
||||||
|
echo "=> $1" >&2
|
||||||
|
tput sgr0
|
||||||
|
}
|
||||||
|
|
||||||
|
lsb_release -d | grep "Ubuntu 14.04" > /dev/null
|
||||||
|
if [ $? != 0 ]
|
||||||
|
then
|
||||||
|
echo "You can use this script on Ubuntu 14.04 LTS only"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Read the options
|
||||||
|
USE_VPN=0
|
||||||
|
USE_IOU=0
|
||||||
|
I386_REPO=0
|
||||||
|
UNSTABLE=0
|
||||||
|
|
||||||
|
TEMP=`getopt -o h --long with-openvpn,with-iou,with-i386-repository,unstable,help -n 'gns3-remote-install.sh' -- "$@"`
|
||||||
|
if [ $? != 0 ]
|
||||||
|
then
|
||||||
|
help
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
eval set -- "$TEMP"
|
||||||
|
|
||||||
|
# extract options and their arguments into variables.
|
||||||
|
while true ; do
|
||||||
|
case "$1" in
|
||||||
|
--with-openvpn)
|
||||||
|
USE_VPN=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--with-iou)
|
||||||
|
USE_IOU=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--with-i386-repository)
|
||||||
|
I386_REPO=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--unstable)
|
||||||
|
UNSTABLE=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
help
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
--) shift ; break ;;
|
||||||
|
*) echo "Internal error! $1" ; exit 1 ;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Exit in case of error
|
||||||
|
set -e
|
||||||
|
|
||||||
|
export DEBIAN_FRONTEND="noninteractive"
|
||||||
|
|
||||||
|
log "Add GNS3 repository"
|
||||||
|
|
||||||
|
if [ $UNSTABLE == 1 ]
|
||||||
|
then
|
||||||
|
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||||
|
deb http://ppa.launchpad.net/gns3/unstable/ubuntu trusty main
|
||||||
|
deb-src http://ppa.launchpad.net/gns3/unstable/ubuntu trusty main
|
||||||
|
deb http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
|
||||||
|
deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
|
||||||
|
EOFLIST
|
||||||
|
else
|
||||||
|
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||||
|
deb http://ppa.launchpad.net/gns3/ppa/ubuntu trusty main
|
||||||
|
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu trusty main
|
||||||
|
deb http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
|
||||||
|
deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
|
||||||
|
EOFLIST
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $I386_REPO == 1 ]
|
||||||
|
then
|
||||||
|
cat <<EOFLIST2 >> /etc/apt/sources.list
|
||||||
|
###### Ubuntu Main Repos
|
||||||
|
deb http://archive.ubuntu.com/ubuntu/ trusty main universe multiverse
|
||||||
|
deb-src http://archive.ubuntu.com/ubuntu/ trusty main universe multiverse
|
||||||
|
|
||||||
|
###### Ubuntu Update Repos
|
||||||
|
deb http://archive.ubuntu.com/ubuntu/ trusty-security main universe multiverse
|
||||||
|
deb http://archive.ubuntu.com/ubuntu/ trusty-updates main universe multiverse
|
||||||
|
deb-src http://archive.ubuntu.com/ubuntu/ trusty-security main universe multiverse
|
||||||
|
deb-src http://archive.ubuntu.com/ubuntu/ trusty-updates main universe multiverse
|
||||||
|
EOFLIST2
|
||||||
|
fi
|
||||||
|
|
||||||
|
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A2E3EF7B
|
||||||
|
|
||||||
|
log "Update system packages"
|
||||||
|
apt-get update
|
||||||
|
|
||||||
|
log "Upgrade packages"
|
||||||
|
apt-get upgrade -y
|
||||||
|
|
||||||
|
log " Install GNS3 packages"
|
||||||
|
apt-get install -y gns3-server
|
||||||
|
|
||||||
|
log "Create user GNS3 with /opt/gns3 as home directory"
|
||||||
|
if [ ! -d "/opt/gns3/" ]
|
||||||
|
then
|
||||||
|
useradd -d /opt/gns3/ -m gns3
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Install docker"
|
||||||
|
if [ ! -f "/usr/bin/docker" ]
|
||||||
|
then
|
||||||
|
curl -sSL https://get.docker.com | bash
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Add GNS3 to the docker group"
|
||||||
|
usermod -aG docker gns3
|
||||||
|
|
||||||
|
if [ $USE_IOU == 1 ]
|
||||||
|
then
|
||||||
|
log "IOU setup"
|
||||||
|
dpkg --add-architecture i386
|
||||||
|
apt-get update
|
||||||
|
|
||||||
|
apt-get install -y gns3-iou
|
||||||
|
|
||||||
|
# Force the host name to gns3vm
|
||||||
|
echo gns3vm > /etc/hostname
|
||||||
|
|
||||||
|
# Force hostid for IOU
|
||||||
|
dd if=/dev/zero bs=4 count=1 of=/etc/hostid
|
||||||
|
|
||||||
|
# Block iou call. The server is down
|
||||||
|
echo "127.0.0.254 xml.cisco.com" | tee --append /etc/hosts
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Add gns3 to the kvm group"
|
||||||
|
usermod -aG kvm gns3
|
||||||
|
|
||||||
|
log "Setup GNS3 server"
|
||||||
|
|
||||||
|
mkdir -p /etc/gns3
|
||||||
|
cat <<EOFC > /etc/gns3/gns3_server.conf
|
||||||
|
[Server]
|
||||||
|
host = 0.0.0.0
|
||||||
|
port = 3080
|
||||||
|
images_path = /opt/gns3/images
|
||||||
|
projects_path = /opt/gns3/projects
|
||||||
|
report_errors = True
|
||||||
|
|
||||||
|
[Qemu]
|
||||||
|
enable_kvm = True
|
||||||
|
EOFC
|
||||||
|
|
||||||
|
chown -R gns3:gns3 /etc/gns3
|
||||||
|
chmod -R 700 /etc/gns3
|
||||||
|
|
||||||
|
cat <<EOFI > /etc/init/gns3.conf
|
||||||
|
description "GNS3 server"
|
||||||
|
author "GNS3 Team"
|
||||||
|
|
||||||
|
start on filesystem or runlevel [2345]
|
||||||
|
stop on runlevel [016]
|
||||||
|
respawn
|
||||||
|
console log
|
||||||
|
|
||||||
|
|
||||||
|
script
|
||||||
|
exec start-stop-daemon --start --make-pidfile --pidfile /var/run/gns3.pid --chuid gns3 --exec "/usr/bin/gns3server"
|
||||||
|
end script
|
||||||
|
|
||||||
|
pre-start script
|
||||||
|
echo "" > /var/log/upstart/gns3.log
|
||||||
|
echo "[`date`] GNS3 Starting"
|
||||||
|
end script
|
||||||
|
|
||||||
|
pre-stop script
|
||||||
|
echo "[`date`] GNS3 Stopping"
|
||||||
|
end script
|
||||||
|
EOFI
|
||||||
|
|
||||||
|
chown root:root /etc/init/gns3.conf
|
||||||
|
chmod 644 /etc/init/gns3.conf
|
||||||
|
|
||||||
|
|
||||||
|
log "Start GNS3 service"
|
||||||
|
set +e
|
||||||
|
service gns3 stop
|
||||||
|
set -e
|
||||||
|
service gns3 start
|
||||||
|
|
||||||
|
log "GNS3 installed with success"
|
||||||
|
|
||||||
|
if [ $USE_VPN == 1 ]
|
||||||
|
then
|
||||||
|
log "Setup VPN"
|
||||||
|
|
||||||
|
cat <<EOFSERVER > /etc/gns3/gns3_server.conf
|
||||||
|
[Server]
|
||||||
|
host = 172.16.253.1
|
||||||
|
port = 3080
|
||||||
|
images_path = /opt/gns3/images
|
||||||
|
projects_path = /opt/gns3/projects
|
||||||
|
report_errors = True
|
||||||
|
|
||||||
|
[Qemu]
|
||||||
|
enable_kvm = True
|
||||||
|
EOFSERVER
|
||||||
|
|
||||||
|
log "Install packages for Open VPN"
|
||||||
|
|
||||||
|
apt-get install -y \
|
||||||
|
openvpn \
|
||||||
|
uuid \
|
||||||
|
dnsutils \
|
||||||
|
nginx-light
|
||||||
|
|
||||||
|
MY_IP_ADDR=$(dig @ns1.google.com -t txt o-o.myaddr.l.google.com +short | sed 's/"//g')
|
||||||
|
|
||||||
|
log "IP detected: $MY_IP_ADDR"
|
||||||
|
|
||||||
|
UUID=$(uuid)
|
||||||
|
|
||||||
|
log "Update motd"
|
||||||
|
|
||||||
|
cat <<EOFMOTD > /etc/update-motd.d/70-openvpn
|
||||||
|
#!/bin/sh
|
||||||
|
echo ""
|
||||||
|
echo "_______________________________________________________________________________________________"
|
||||||
|
echo "Download the VPN configuration here:"
|
||||||
|
echo "http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn"
|
||||||
|
echo ""
|
||||||
|
echo "And add it to your openvpn client."
|
||||||
|
echo ""
|
||||||
|
echo "apt-get remove nginx-light to disable the HTTP server."
|
||||||
|
echo "And remove this file with rm /etc/update-motd.d/70-openvpn"
|
||||||
|
EOFMOTD
|
||||||
|
chmod 755 /etc/update-motd.d/70-openvpn
|
||||||
|
|
||||||
|
|
||||||
|
mkdir -p /etc/openvpn/
|
||||||
|
|
||||||
|
[ -d /dev/net ] || mkdir -p /dev/net
|
||||||
|
[ -c /dev/net/tun ] || mknod /dev/net/tun c 10 200
|
||||||
|
|
||||||
|
log "Create keys"
|
||||||
|
|
||||||
|
[ -f /etc/openvpn/dh.pem ] || openssl dhparam -out /etc/openvpn/dh.pem 2048
|
||||||
|
[ -f /etc/openvpn/key.pem ] || openssl genrsa -out /etc/openvpn/key.pem 2048
|
||||||
|
chmod 600 /etc/openvpn/key.pem
|
||||||
|
[ -f /etc/openvpn/csr.pem ] || openssl req -new -key /etc/openvpn/key.pem -out /etc/openvpn/csr.pem -subj /CN=OpenVPN/
|
||||||
|
[ -f /etc/openvpn/cert.pem ] || openssl x509 -req -in /etc/openvpn/csr.pem -out /etc/openvpn/cert.pem -signkey /etc/openvpn/key.pem -days 24855
|
||||||
|
|
||||||
|
log "Create client configuration"
|
||||||
|
cat <<EOFCLIENT > /root/client.ovpn
|
||||||
|
client
|
||||||
|
nobind
|
||||||
|
comp-lzo
|
||||||
|
dev tun
|
||||||
|
<key>
|
||||||
|
`cat /etc/openvpn/key.pem`
|
||||||
|
</key>
|
||||||
|
<cert>
|
||||||
|
`cat /etc/openvpn/cert.pem`
|
||||||
|
</cert>
|
||||||
|
<ca>
|
||||||
|
`cat /etc/openvpn/cert.pem`
|
||||||
|
</ca>
|
||||||
|
<dh>
|
||||||
|
`cat /etc/openvpn/dh.pem`
|
||||||
|
</dh>
|
||||||
|
<connection>
|
||||||
|
remote $MY_IP_ADDR 1194 udp
|
||||||
|
</connection>
|
||||||
|
EOFCLIENT
|
||||||
|
|
||||||
|
cat <<EOFUDP > /etc/openvpn/udp1194.conf
|
||||||
|
server 172.16.253.0 255.255.255.0
|
||||||
|
verb 3
|
||||||
|
duplicate-cn
|
||||||
|
comp-lzo
|
||||||
|
key key.pem
|
||||||
|
ca cert.pem
|
||||||
|
cert cert.pem
|
||||||
|
dh dh.pem
|
||||||
|
keepalive 10 60
|
||||||
|
persist-key
|
||||||
|
persist-tun
|
||||||
|
proto udp
|
||||||
|
port 1194
|
||||||
|
dev tun1194
|
||||||
|
status openvpn-status-1194.log
|
||||||
|
log-append /var/log/openvpn-udp1194.log
|
||||||
|
EOFUDP
|
||||||
|
|
||||||
|
echo "Setup HTTP server for serving client certificate"
|
||||||
|
mkdir -p /usr/share/nginx/openvpn/$UUID
|
||||||
|
cp /root/client.ovpn /usr/share/nginx/openvpn/$UUID/$HOSTNAME.ovpn
|
||||||
|
touch /usr/share/nginx/openvpn/$UUID/index.html
|
||||||
|
touch /usr/share/nginx/openvpn/index.html
|
||||||
|
|
||||||
|
cat <<EOFNGINX > /etc/nginx/sites-available/openvpn
|
||||||
|
server {
|
||||||
|
listen 8003;
|
||||||
|
root /usr/share/nginx/openvpn;
|
||||||
|
}
|
||||||
|
EOFNGINX
|
||||||
|
|
||||||
|
[ -f /etc/nginx/sites-enabled/openvpn ] || ln -s /etc/nginx/sites-available/openvpn /etc/nginx/sites-enabled/
|
||||||
|
service nginx stop
|
||||||
|
service nginx start
|
||||||
|
|
||||||
|
log "Restart OpenVPN"
|
||||||
|
|
||||||
|
set +e
|
||||||
|
service openvpn stop
|
||||||
|
service openvpn start
|
||||||
|
|
||||||
|
log "Download http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn to setup your OpenVPN client after rebooting the server"
|
||||||
|
|
||||||
|
fi
|
14
setup.py
14
setup.py
@ -38,17 +38,7 @@ class PyTest(TestCommand):
|
|||||||
errcode = pytest.main(self.test_args)
|
errcode = pytest.main(self.test_args)
|
||||||
sys.exit(errcode)
|
sys.exit(errcode)
|
||||||
|
|
||||||
dependencies = [
|
dependencies = open("requirements.txt", "r").read().splitlines()
|
||||||
"jsonschema>=2.4.0",
|
|
||||||
"aiohttp>=0.15.1",
|
|
||||||
"Jinja2>=2.7.3",
|
|
||||||
"raven>=5.2.0",
|
|
||||||
"docker-py>=1.4.0",
|
|
||||||
"psutil>=3.0.0"
|
|
||||||
]
|
|
||||||
|
|
||||||
if sys.platform.startswith("win"):
|
|
||||||
dependencies.append("pywin32>=219")
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="gns3-server",
|
name="gns3-server",
|
||||||
@ -63,7 +53,7 @@ setup(
|
|||||||
entry_points={
|
entry_points={
|
||||||
"console_scripts": [
|
"console_scripts": [
|
||||||
"gns3server = gns3server.main:main",
|
"gns3server = gns3server.main:main",
|
||||||
"gns3vmnet = utils.vmnet:main",
|
"gns3vmnet = gns3server.utils.vmnet:main",
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
packages=find_packages(".", exclude=["docs", "tests"]),
|
packages=find_packages(".", exclude=["docs", "tests"]),
|
||||||
|
@ -81,16 +81,25 @@ def _get_unused_port():
|
|||||||
def server(request, loop, port_manager, monkeypatch):
|
def server(request, loop, port_manager, monkeypatch):
|
||||||
"""A GNS3 server"""
|
"""A GNS3 server"""
|
||||||
|
|
||||||
port = _get_unused_port()
|
|
||||||
host = "localhost"
|
|
||||||
app = web.Application()
|
app = web.Application()
|
||||||
for method, route, handler in Route.get_routes():
|
for method, route, handler in Route.get_routes():
|
||||||
app.router.add_route(method, route, handler)
|
app.router.add_route(method, route, handler)
|
||||||
for module in MODULES:
|
for module in MODULES:
|
||||||
instance = module.instance()
|
instance = module.instance()
|
||||||
instance.port_manager = port_manager
|
instance.port_manager = port_manager
|
||||||
srv = loop.create_server(app.make_handler(), host, port)
|
|
||||||
srv = loop.run_until_complete(srv)
|
host = "localhost"
|
||||||
|
|
||||||
|
# We try multiple time. Because on Travis test can fail when because the port is taken by someone else
|
||||||
|
for i in range(0, 5):
|
||||||
|
port = _get_unused_port()
|
||||||
|
try:
|
||||||
|
srv = loop.create_server(app.make_handler(), host, port)
|
||||||
|
srv = loop.run_until_complete(srv)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
def tear_down():
|
def tear_down():
|
||||||
for module in MODULES:
|
for module in MODULES:
|
||||||
@ -151,6 +160,7 @@ def run_around_tests(monkeypatch, port_manager):
|
|||||||
config.set("Server", "project_directory", os.path.join(tmppath, 'projects'))
|
config.set("Server", "project_directory", os.path.join(tmppath, 'projects'))
|
||||||
config.set("Server", "images_path", os.path.join(tmppath, 'images'))
|
config.set("Server", "images_path", os.path.join(tmppath, 'images'))
|
||||||
config.set("Server", "auth", False)
|
config.set("Server", "auth", False)
|
||||||
|
config.set("Server", "controller", False)
|
||||||
|
|
||||||
# Prevent executions of the VM if we forgot to mock something
|
# Prevent executions of the VM if we forgot to mock something
|
||||||
config.set("VirtualBox", "vboxmanage_path", tmppath)
|
config.set("VirtualBox", "vboxmanage_path", tmppath)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
# it under the terms of the GNU General Public License as published by
|
# it under the terms of the GNU General Public License as published by
|
||||||
@ -15,14 +15,22 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import aiohttp.web
|
import pytest
|
||||||
import logging
|
|
||||||
|
from gns3server.controller import Controller
|
||||||
|
from gns3server.config import Config
|
||||||
|
|
||||||
|
|
||||||
class RequestHandler(aiohttp.web.RequestHandler):
|
@pytest.fixture
|
||||||
|
def controller():
|
||||||
|
Controller._instance = None
|
||||||
|
return Controller.instance()
|
||||||
|
|
||||||
def log_access(self, message, environ, response, time):
|
|
||||||
|
|
||||||
# In debug mode we don't use the standard request log but a more complete in response.py
|
|
||||||
if self.logger.getEffectiveLevel() != logging.DEBUG:
|
def test_isEnabled(controller):
|
||||||
super().log_access(message, environ, response, time)
|
Config.instance().set("Server", "controller", False)
|
||||||
|
assert not controller.isEnabled()
|
||||||
|
Config.instance().set("Server", "controller", True)
|
||||||
|
assert controller.isEnabled()
|
||||||
|
|
@ -61,20 +61,20 @@ class Query:
|
|||||||
body = json.dumps(body)
|
body = json.dumps(body)
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def go(future):
|
def go_request(future):
|
||||||
response = yield from aiohttp.request(method, self.get_url(path, api_version), data=body)
|
response = yield from aiohttp.request(method, self.get_url(path, api_version), data=body)
|
||||||
future.set_result(response)
|
future.set_result(response)
|
||||||
future = asyncio.Future()
|
future = asyncio.Future()
|
||||||
asyncio.async(go(future))
|
asyncio.async(go_request(future))
|
||||||
self._loop.run_until_complete(future)
|
self._loop.run_until_complete(future)
|
||||||
response = future.result()
|
response = future.result()
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def go(future, response):
|
def go_read(future, response):
|
||||||
response = yield from response.read()
|
response = yield from response.read()
|
||||||
future.set_result(response)
|
future.set_result(response)
|
||||||
future = asyncio.Future()
|
future = asyncio.Future()
|
||||||
asyncio.async(go(future, response))
|
asyncio.async(go_read(future, response))
|
||||||
self._loop.run_until_complete(future)
|
self._loop.run_until_complete(future)
|
||||||
response.body = future.result()
|
response.body = future.result()
|
||||||
x_route = response.headers.get('X-Route', None)
|
x_route = response.headers.get('X-Route', None)
|
||||||
@ -104,7 +104,7 @@ class Query:
|
|||||||
if path is None:
|
if path is None:
|
||||||
return
|
return
|
||||||
with open(self._example_file_path(method, route), 'w+') as f:
|
with open(self._example_file_path(method, route), 'w+') as f:
|
||||||
f.write("curl -i -X {} 'http://localhost:8000/v{}{}'".format(method, api_version, path))
|
f.write("curl -i -X {} 'http://localhost:3080/v{}{}'".format(method, api_version, path))
|
||||||
if body:
|
if body:
|
||||||
f.write(" -d '{}'".format(re.sub(r"\n", "", json.dumps(json.loads(body), sort_keys=True))))
|
f.write(" -d '{}'".format(re.sub(r"\n", "", json.dumps(json.loads(body), sort_keys=True))))
|
||||||
f.write("\n\n")
|
f.write("\n\n")
|
||||||
|
185
tests/handlers/api/test_docker.py
Normal file
185
tests/handlers/api/test_docker.py
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import os
|
||||||
|
import stat
|
||||||
|
import sys
|
||||||
|
import uuid
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
from tests.utils import asyncio_patch
|
||||||
|
from unittest.mock import patch, MagicMock, PropertyMock
|
||||||
|
from gns3server.modules.docker import Docker
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def base_params():
|
||||||
|
"""Return standard parameters"""
|
||||||
|
return {"name": "PC TEST 1", "image": "nginx", "start_command": "nginx-daemon", "adapters": 2, "environment": "YES=1\nNO=0", "console_type": "telnet", "console_resolution": "1280x1024"}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.yield_fixture(autouse=True)
|
||||||
|
def mock_connection():
|
||||||
|
docker = Docker.instance()
|
||||||
|
docker._connected = True
|
||||||
|
docker._connector = MagicMock()
|
||||||
|
yield
|
||||||
|
Docker._instance = None
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def vm(server, project, base_params):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "nginx"}]) as mock_list:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value={"Id": "8bd8153ea8f5"}) as mock:
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms".format(project_id=project.id), base_params)
|
||||||
|
if response.status != 201:
|
||||||
|
print(response.body)
|
||||||
|
assert response.status == 201
|
||||||
|
return response.json
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_create(server, project, base_params):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "nginx"}]) as mock_list:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value={"Id": "8bd8153ea8f5"}) as mock:
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms".format(project_id=project.id), base_params)
|
||||||
|
assert response.status == 201
|
||||||
|
assert response.route == "/projects/{project_id}/docker/vms"
|
||||||
|
assert response.json["name"] == "PC TEST 1"
|
||||||
|
assert response.json["project_id"] == project.id
|
||||||
|
assert response.json["container_id"] == "8bd8153ea8f5"
|
||||||
|
assert response.json["image"] == "nginx:latest"
|
||||||
|
assert response.json["adapters"] == 2
|
||||||
|
assert response.json["environment"] == "YES=1\nNO=0"
|
||||||
|
assert response.json["console_resolution"] == "1280x1024"
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_start(server, vm):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.start", return_value=True) as mock:
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||||
|
assert mock.called
|
||||||
|
assert response.status == 204
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_stop(server, vm):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.stop", return_value=True) as mock:
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/stop".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||||
|
assert mock.called
|
||||||
|
assert response.status == 204
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_reload(server, vm):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.restart", return_value=True) as mock:
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/reload".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||||
|
assert mock.called
|
||||||
|
assert response.status == 204
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_delete(server, vm):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.delete", return_value=True) as mock:
|
||||||
|
response = server.delete("/projects/{project_id}/docker/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||||
|
assert mock.called
|
||||||
|
assert response.status == 204
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_reload(server, vm):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.pause", return_value=True) as mock:
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/suspend".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||||
|
assert mock.called
|
||||||
|
assert response.status == 204
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_nio_create_udp(server, vm):
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"},
|
||||||
|
example=True)
|
||||||
|
assert response.status == 201
|
||||||
|
assert response.route == "/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_delete_nio(server, vm):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.adapter_remove_nio_binding") as mock:
|
||||||
|
response = server.delete("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||||
|
assert response.status == 204
|
||||||
|
assert response.route == "/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_update(server, vm, tmpdir, free_console_port):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.update") as mock:
|
||||||
|
response = server.put("/projects/{project_id}/docker/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"name": "test",
|
||||||
|
"console": free_console_port,
|
||||||
|
"start_command": "yes",
|
||||||
|
"environment": "GNS3=1\nGNS4=0"},
|
||||||
|
example=True)
|
||||||
|
assert mock.called
|
||||||
|
assert response.status == 200
|
||||||
|
assert response.json["name"] == "test"
|
||||||
|
assert response.json["console"] == free_console_port
|
||||||
|
assert response.json["start_command"] == "yes"
|
||||||
|
assert response.json["environment"] == "GNS3=1\nGNS4=0"
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_start_capture(server, vm, tmpdir, project):
|
||||||
|
|
||||||
|
with patch("gns3server.modules.docker.docker_vm.DockerVM.is_running", return_value=True) as mock:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.start_capture") as start_capture:
|
||||||
|
|
||||||
|
params = {"capture_file_name": "test.pcap", "data_link_type": "DLT_EN10MB"}
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=params, example=True)
|
||||||
|
|
||||||
|
assert response.status == 200
|
||||||
|
|
||||||
|
assert start_capture.called
|
||||||
|
assert "test.pcap" in response.json["pcap_file_path"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_start_capture_not_started(server, vm, tmpdir):
|
||||||
|
|
||||||
|
with patch("gns3server.modules.docker.docker_vm.DockerVM.is_running", return_value=False) as mock:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.start_capture") as start_capture:
|
||||||
|
|
||||||
|
params = {"capture_file_name": "test.pcap", "data_link_type": "DLT_EN10MB"}
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=params)
|
||||||
|
|
||||||
|
assert not start_capture.called
|
||||||
|
assert response.status == 409
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_stop_capture(server, vm, tmpdir, project):
|
||||||
|
|
||||||
|
with patch("gns3server.modules.docker.docker_vm.DockerVM.is_running", return_value=True) as mock:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.stop_capture") as stop_capture:
|
||||||
|
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||||
|
|
||||||
|
assert response.status == 204
|
||||||
|
|
||||||
|
assert stop_capture.called
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_stop_capture_not_started(server, vm, tmpdir):
|
||||||
|
|
||||||
|
with patch("gns3server.modules.docker.docker_vm.DockerVM.is_running", return_value=False) as mock:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.stop_capture") as stop_capture:
|
||||||
|
|
||||||
|
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||||
|
|
||||||
|
assert not stop_capture.called
|
||||||
|
assert response.status == 409
|
@ -173,9 +173,9 @@ def test_upload_vm(server, tmpdir):
|
|||||||
|
|
||||||
|
|
||||||
def test_upload_vm_permission_denied(server, tmpdir):
|
def test_upload_vm_permission_denied(server, tmpdir):
|
||||||
with open(str(tmpdir / "test2"), "w+") as f:
|
with open(str(tmpdir / "test2.tmp"), "w+") as f:
|
||||||
f.write("")
|
f.write("")
|
||||||
os.chmod(str(tmpdir / "test2"), 0)
|
os.chmod(str(tmpdir / "test2.tmp"), 0)
|
||||||
|
|
||||||
with patch("gns3server.modules.Dynamips.get_images_directory", return_value=str(tmpdir),):
|
with patch("gns3server.modules.Dynamips.get_images_directory", return_value=str(tmpdir),):
|
||||||
response = server.post("/dynamips/vms/test2", body="TEST", raw=True)
|
response = server.post("/dynamips/vms/test2", body="TEST", raw=True)
|
||||||
|
@ -27,15 +27,15 @@ from gns3server.version import __version__
|
|||||||
|
|
||||||
|
|
||||||
def test_stream(server, tmpdir, loop):
|
def test_stream(server, tmpdir, loop):
|
||||||
with open(str(tmpdir / "test"), 'w+') as f:
|
with open(str(tmpdir / "test.pcap"), 'w+') as f:
|
||||||
f.write("hello")
|
f.write("hello")
|
||||||
|
|
||||||
def go(future):
|
def go(future):
|
||||||
query = json.dumps({"location": str(tmpdir / "test")})
|
query = json.dumps({"location": str(tmpdir / "test.pcap")})
|
||||||
headers = {'content-type': 'application/json'}
|
headers = {'content-type': 'application/json'}
|
||||||
response = yield from aiohttp.request("GET", server.get_url("/files/stream", 1), data=query, headers=headers)
|
response = yield from aiohttp.request("GET", server.get_url("/files/stream", 1), data=query, headers=headers)
|
||||||
response.body = yield from response.content.read(5)
|
response.body = yield from response.content.read(5)
|
||||||
with open(str(tmpdir / "test"), 'a') as f:
|
with open(str(tmpdir / "test.pcap"), 'a') as f:
|
||||||
f.write("world")
|
f.write("world")
|
||||||
response.body += yield from response.content.read(5)
|
response.body += yield from response.content.read(5)
|
||||||
response.close()
|
response.close()
|
||||||
@ -48,7 +48,7 @@ def test_stream(server, tmpdir, loop):
|
|||||||
assert response.body == b'helloworld'
|
assert response.body == b'helloworld'
|
||||||
|
|
||||||
|
|
||||||
def test_stream_file_not_found(server, tmpdir, loop):
|
def test_stream_file_not_pcap(server, tmpdir, loop):
|
||||||
def go(future):
|
def go(future):
|
||||||
query = json.dumps({"location": str(tmpdir / "test")})
|
query = json.dumps({"location": str(tmpdir / "test")})
|
||||||
headers = {'content-type': 'application/json'}
|
headers = {'content-type': 'application/json'}
|
||||||
@ -56,6 +56,20 @@ def test_stream_file_not_found(server, tmpdir, loop):
|
|||||||
response.close()
|
response.close()
|
||||||
future.set_result(response)
|
future.set_result(response)
|
||||||
|
|
||||||
|
future = asyncio.Future()
|
||||||
|
asyncio.async(go(future))
|
||||||
|
response = loop.run_until_complete(future)
|
||||||
|
assert response.status == 403
|
||||||
|
|
||||||
|
|
||||||
|
def test_stream_file_not_found(server, tmpdir, loop):
|
||||||
|
def go(future):
|
||||||
|
query = json.dumps({"location": str(tmpdir / "test.pcap")})
|
||||||
|
headers = {'content-type': 'application/json'}
|
||||||
|
response = yield from aiohttp.request("GET", server.get_url("/files/stream", 1), data=query, headers=headers)
|
||||||
|
response.close()
|
||||||
|
future.set_result(response)
|
||||||
|
|
||||||
future = asyncio.Future()
|
future = asyncio.Future()
|
||||||
asyncio.async(go(future))
|
asyncio.async(go(future))
|
||||||
response = loop.run_until_complete(future)
|
response = loop.run_until_complete(future)
|
||||||
|
@ -139,7 +139,8 @@ def test_iou_start(server, vm):
|
|||||||
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM.start", return_value=True) as mock:
|
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM.start", return_value=True) as mock:
|
||||||
response = server.post("/projects/{project_id}/iou/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
response = server.post("/projects/{project_id}/iou/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status == 204
|
assert response.status == 200
|
||||||
|
assert response.json["name"] == "PC TEST 1"
|
||||||
|
|
||||||
|
|
||||||
def test_iou_start_with_iourc(server, vm, tmpdir):
|
def test_iou_start_with_iourc(server, vm, tmpdir):
|
||||||
@ -148,7 +149,7 @@ def test_iou_start_with_iourc(server, vm, tmpdir):
|
|||||||
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM.start", return_value=True) as mock:
|
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM.start", return_value=True) as mock:
|
||||||
response = server.post("/projects/{project_id}/iou/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=body, example=True)
|
response = server.post("/projects/{project_id}/iou/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=body, example=True)
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status == 204
|
assert response.status == 200
|
||||||
|
|
||||||
response = server.get("/projects/{project_id}/iou/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
response = server.get("/projects/{project_id}/iou/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||||
assert response.status == 200
|
assert response.status == 200
|
||||||
@ -348,11 +349,3 @@ def test_upload_vm(server, tmpdir):
|
|||||||
assert checksum == "033bd94b1168d7e4f0d644c3c95e35bf"
|
assert checksum == "033bd94b1168d7e4f0d644c3c95e35bf"
|
||||||
|
|
||||||
|
|
||||||
def test_upload_vm_permission_denied(server, tmpdir):
|
|
||||||
with open(str(tmpdir / "test2"), "w+") as f:
|
|
||||||
f.write("")
|
|
||||||
os.chmod(str(tmpdir / "test2"), 0)
|
|
||||||
|
|
||||||
with patch("gns3server.modules.IOU.get_images_directory", return_value=str(tmpdir),):
|
|
||||||
response = server.post("/iou/vms/test2", body="TEST", raw=True)
|
|
||||||
assert response.status == 409
|
|
||||||
|
@ -23,8 +23,9 @@ import uuid
|
|||||||
import os
|
import os
|
||||||
import asyncio
|
import asyncio
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
import zipfile
|
||||||
|
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch, MagicMock
|
||||||
from tests.utils import asyncio_patch
|
from tests.utils import asyncio_patch
|
||||||
|
|
||||||
from gns3server.handlers.api.project_handler import ProjectHandler
|
from gns3server.handlers.api.project_handler import ProjectHandler
|
||||||
@ -261,3 +262,75 @@ def test_get_file(server, tmpdir):
|
|||||||
|
|
||||||
response = server.get("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True)
|
response = server.get("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True)
|
||||||
assert response.status == 403
|
assert response.status == 403
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_file(server, tmpdir):
|
||||||
|
|
||||||
|
with patch("gns3server.config.Config.get_section_config", return_value={"project_directory": str(tmpdir)}):
|
||||||
|
project = ProjectManager.instance().create_project()
|
||||||
|
|
||||||
|
with open(os.path.join(project.path, "hello"), "w+") as f:
|
||||||
|
f.write("world")
|
||||||
|
|
||||||
|
response = server.post("/projects/{project_id}/files/hello".format(project_id=project.id), body="universe", raw=True)
|
||||||
|
assert response.status == 200
|
||||||
|
|
||||||
|
with open(os.path.join(project.path, "hello")) as f:
|
||||||
|
content = f.read()
|
||||||
|
assert content == "universe"
|
||||||
|
|
||||||
|
response = server.post("/projects/{project_id}/files/test/false".format(project_id=project.id), body="universe", raw=True)
|
||||||
|
assert response.status == 404
|
||||||
|
|
||||||
|
response = server.post("/projects/{project_id}/files/../hello".format(project_id=project.id), body="universe", raw=True)
|
||||||
|
assert response.status == 403
|
||||||
|
|
||||||
|
|
||||||
|
def test_export(server, tmpdir, loop, project):
|
||||||
|
|
||||||
|
os.makedirs(project.path, exist_ok=True)
|
||||||
|
with open(os.path.join(project.path, 'a'), 'w+') as f:
|
||||||
|
f.write('hello')
|
||||||
|
|
||||||
|
response = server.get("/projects/{project_id}/export".format(project_id=project.id), raw=True)
|
||||||
|
assert response.status == 200
|
||||||
|
assert response.headers['CONTENT-TYPE'] == 'application/gns3project'
|
||||||
|
assert response.headers['CONTENT-DISPOSITION'] == 'attachment; filename="{}.gns3project"'.format(project.name)
|
||||||
|
|
||||||
|
with open(str(tmpdir / 'project.zip'), 'wb+') as f:
|
||||||
|
f.write(response.body)
|
||||||
|
|
||||||
|
with zipfile.ZipFile(str(tmpdir / 'project.zip')) as myzip:
|
||||||
|
with myzip.open("a") as myfile:
|
||||||
|
content = myfile.read()
|
||||||
|
assert content == b"hello"
|
||||||
|
|
||||||
|
|
||||||
|
def test_export_include_image(server, tmpdir, loop, project):
|
||||||
|
|
||||||
|
project.export = MagicMock()
|
||||||
|
response = server.get("/projects/{project_id}/export".format(project_id=project.id), raw=True)
|
||||||
|
project.export.assert_called_with(include_images=False)
|
||||||
|
|
||||||
|
response = server.get("/projects/{project_id}/export?include_images=0".format(project_id=project.id), raw=True)
|
||||||
|
project.export.assert_called_with(include_images=False)
|
||||||
|
|
||||||
|
response = server.get("/projects/{project_id}/export?include_images=1".format(project_id=project.id), raw=True)
|
||||||
|
project.export.assert_called_with(include_images=True)
|
||||||
|
|
||||||
|
|
||||||
|
def test_import(server, tmpdir, loop, project):
|
||||||
|
|
||||||
|
with zipfile.ZipFile(str(tmpdir / "test.zip"), 'w') as myzip:
|
||||||
|
myzip.writestr("demo", b"hello")
|
||||||
|
|
||||||
|
project_id = project.id
|
||||||
|
|
||||||
|
with open(str(tmpdir / "test.zip"), "rb") as f:
|
||||||
|
response = server.post("/projects/{project_id}/import".format(project_id=project_id), body=f.read(), raw=True)
|
||||||
|
assert response.status == 201
|
||||||
|
|
||||||
|
project = ProjectManager.instance().get_project(project_id=project_id)
|
||||||
|
with open(os.path.join(project.path, "demo")) as f:
|
||||||
|
content = f.read()
|
||||||
|
assert content == "hello"
|
||||||
|
@ -114,7 +114,8 @@ def test_qemu_start(server, vm):
|
|||||||
with asyncio_patch("gns3server.modules.qemu.qemu_vm.QemuVM.start", return_value=True) as mock:
|
with asyncio_patch("gns3server.modules.qemu.qemu_vm.QemuVM.start", return_value=True) as mock:
|
||||||
response = server.post("/projects/{project_id}/qemu/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
response = server.post("/projects/{project_id}/qemu/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status == 204
|
assert response.status == 200
|
||||||
|
assert response.json["name"] == "PC TEST 1"
|
||||||
|
|
||||||
|
|
||||||
def test_qemu_stop(server, vm):
|
def test_qemu_stop(server, vm):
|
||||||
@ -262,9 +263,9 @@ def test_upload_vm_forbiden_location(server, tmpdir):
|
|||||||
|
|
||||||
|
|
||||||
def test_upload_vm_permission_denied(server, tmpdir):
|
def test_upload_vm_permission_denied(server, tmpdir):
|
||||||
with open(str(tmpdir / "test2"), "w+") as f:
|
with open(str(tmpdir / "test2.tmp"), "w+") as f:
|
||||||
f.write("")
|
f.write("")
|
||||||
os.chmod(str(tmpdir / "test2"), 0)
|
os.chmod(str(tmpdir / "test2.tmp"), 0)
|
||||||
|
|
||||||
with patch("gns3server.modules.Qemu.get_images_directory", return_value=str(tmpdir),):
|
with patch("gns3server.modules.Qemu.get_images_directory", return_value=str(tmpdir),):
|
||||||
response = server.post("/qemu/vms/test2", body="TEST", raw=True)
|
response = server.post("/qemu/vms/test2", body="TEST", raw=True)
|
||||||
|
@ -102,7 +102,8 @@ def test_vpcs_start(server, vm):
|
|||||||
with asyncio_patch("gns3server.modules.vpcs.vpcs_vm.VPCSVM.start", return_value=True) as mock:
|
with asyncio_patch("gns3server.modules.vpcs.vpcs_vm.VPCSVM.start", return_value=True) as mock:
|
||||||
response = server.post("/projects/{project_id}/vpcs/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
response = server.post("/projects/{project_id}/vpcs/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status == 204
|
assert response.status == 200
|
||||||
|
assert response.json["name"] == "PC TEST 1"
|
||||||
|
|
||||||
|
|
||||||
def test_vpcs_stop(server, vm):
|
def test_vpcs_stop(server, vm):
|
||||||
|
@ -219,7 +219,6 @@ def test_backup_projects(server, tmpdir, loop):
|
|||||||
assert response.headers['CONTENT-TYPE'] == 'application/x-gtar'
|
assert response.headers['CONTENT-TYPE'] == 'application/x-gtar'
|
||||||
|
|
||||||
with open(str(tmpdir / 'projects.tar'), 'wb+') as f:
|
with open(str(tmpdir / 'projects.tar'), 'wb+') as f:
|
||||||
print(len(response.body))
|
|
||||||
f.write(response.body)
|
f.write(response.body)
|
||||||
|
|
||||||
tar = tarfile.open(str(tmpdir / 'projects.tar'), 'r')
|
tar = tarfile.open(str(tmpdir / 'projects.tar'), 'r')
|
||||||
|
136
tests/modules/docker/test_docker.py
Normal file
136
tests/modules/docker/test_docker.py
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import asyncio
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
from tests.utils import asyncio_patch
|
||||||
|
from gns3server.modules.docker import Docker
|
||||||
|
from gns3server.modules.docker.docker_error import DockerError
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def vm():
|
||||||
|
vm = Docker()
|
||||||
|
vm._connected = True
|
||||||
|
vm._connector = MagicMock()
|
||||||
|
vm._connector.closed = False
|
||||||
|
return vm
|
||||||
|
|
||||||
|
|
||||||
|
def test_query_success(loop, vm):
|
||||||
|
|
||||||
|
response = MagicMock()
|
||||||
|
response.status = 200
|
||||||
|
response.headers = {'CONTENT-TYPE': 'application/json'}
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def read():
|
||||||
|
return b'{"c": false}'
|
||||||
|
|
||||||
|
response.read.side_effect = read
|
||||||
|
with asyncio_patch("aiohttp.request", return_value=response) as mock:
|
||||||
|
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
|
||||||
|
mock.assert_called_with('POST',
|
||||||
|
'http://docker/test',
|
||||||
|
connector=vm._connector,
|
||||||
|
data='{"a": true}',
|
||||||
|
headers={'content-type': 'application/json'},
|
||||||
|
params={'b': 1})
|
||||||
|
|
||||||
|
assert data == {"c": False}
|
||||||
|
|
||||||
|
|
||||||
|
def test_query_error(loop, vm):
|
||||||
|
|
||||||
|
response = MagicMock()
|
||||||
|
response.status = 404
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def read():
|
||||||
|
return b"NOT FOUND"
|
||||||
|
|
||||||
|
response.read.side_effect = read
|
||||||
|
with asyncio_patch("aiohttp.request", return_value=response) as mock:
|
||||||
|
with pytest.raises(DockerError):
|
||||||
|
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
|
||||||
|
mock.assert_called_with('POST',
|
||||||
|
'http://docker/test',
|
||||||
|
connector=vm._connector,
|
||||||
|
data='{"a": true}',
|
||||||
|
headers={'content-type': 'application/json'},
|
||||||
|
params={'b': 1})
|
||||||
|
|
||||||
|
|
||||||
|
def test_query_error_json(loop, vm):
|
||||||
|
|
||||||
|
response = MagicMock()
|
||||||
|
response.status = 404
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def read():
|
||||||
|
return b'{"message": "Error"}'
|
||||||
|
|
||||||
|
response.read.side_effect = read
|
||||||
|
with asyncio_patch("aiohttp.request", return_value=response) as mock:
|
||||||
|
with pytest.raises(DockerError):
|
||||||
|
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
|
||||||
|
mock.assert_called_with('POST',
|
||||||
|
'http://docker/test',
|
||||||
|
connector=vm._connector,
|
||||||
|
data='{"a": true}',
|
||||||
|
headers={'content-type': 'application/json'},
|
||||||
|
params={'b': 1})
|
||||||
|
|
||||||
|
|
||||||
|
def test_list_images(loop):
|
||||||
|
response = [
|
||||||
|
{
|
||||||
|
"RepoTags": [
|
||||||
|
"ubuntu:12.04",
|
||||||
|
"ubuntu:precise",
|
||||||
|
"ubuntu:latest"
|
||||||
|
],
|
||||||
|
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||||
|
"Created": 1365714795,
|
||||||
|
"Size": 131506275,
|
||||||
|
"VirtualSize": 131506275
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"RepoTags": [
|
||||||
|
"ubuntu:12.10",
|
||||||
|
"ubuntu:quantal",
|
||||||
|
"<none>:<none>"
|
||||||
|
],
|
||||||
|
"ParentId": "27cf784147099545",
|
||||||
|
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
|
||||||
|
"Created": 1364102658,
|
||||||
|
"Size": 24653,
|
||||||
|
"VirtualSize": 180116135
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
images = loop.run_until_complete(asyncio.async(Docker.instance().list_images()))
|
||||||
|
mock.assert_called_with("GET", "images/json", params={"all": 0})
|
||||||
|
assert len(images) == 5
|
||||||
|
assert {"image": "ubuntu:12.04"} in images
|
||||||
|
assert {"image": "ubuntu:precise"} in images
|
||||||
|
assert {"image": "ubuntu:latest"} in images
|
||||||
|
assert {"image": "ubuntu:12.10"} in images
|
||||||
|
assert {"image": "ubuntu:quantal"} in images
|
939
tests/modules/docker/test_docker_vm.py
Normal file
939
tests/modules/docker/test_docker_vm.py
Normal file
@ -0,0 +1,939 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import uuid
|
||||||
|
import asyncio
|
||||||
|
import os
|
||||||
|
from tests.utils import asyncio_patch, AsyncioMagicMock
|
||||||
|
|
||||||
|
from gns3server.ubridge.ubridge_error import UbridgeNamespaceError
|
||||||
|
from gns3server.modules.docker.docker_vm import DockerVM
|
||||||
|
from gns3server.modules.docker.docker_error import *
|
||||||
|
from gns3server.modules.docker import Docker
|
||||||
|
from gns3server.utils.get_resource import get_resource
|
||||||
|
|
||||||
|
|
||||||
|
from unittest.mock import patch, MagicMock, PropertyMock, call
|
||||||
|
from gns3server.config import Config
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def manager(port_manager):
|
||||||
|
m = Docker.instance()
|
||||||
|
m.port_manager = port_manager
|
||||||
|
return m
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="function")
|
||||||
|
def vm(project, manager):
|
||||||
|
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||||
|
vm._cid = "e90e34656842"
|
||||||
|
vm.allocate_aux = False
|
||||||
|
return vm
|
||||||
|
|
||||||
|
|
||||||
|
def test_json(vm, project):
|
||||||
|
assert vm.__json__() == {
|
||||||
|
'container_id': 'e90e34656842',
|
||||||
|
'image': 'ubuntu:latest',
|
||||||
|
'name': 'test',
|
||||||
|
'project_id': project.id,
|
||||||
|
'vm_id': vm.id,
|
||||||
|
'adapters': 1,
|
||||||
|
'console': vm.console,
|
||||||
|
'console_type': 'telnet',
|
||||||
|
'console_resolution': '1024x768',
|
||||||
|
'console_http_port': 80,
|
||||||
|
'console_http_path': '/',
|
||||||
|
'aux': vm.aux,
|
||||||
|
'start_command': vm.start_command,
|
||||||
|
'environment': vm.environment,
|
||||||
|
'vm_directory': vm.working_dir
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_command(vm):
|
||||||
|
|
||||||
|
vm.start_command = "hello"
|
||||||
|
assert vm.start_command == "hello"
|
||||||
|
vm.start_command = " "
|
||||||
|
assert vm.start_command is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_create(loop, project, manager):
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"Id": "e90e34656806",
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||||
|
loop.run_until_complete(asyncio.async(vm.create()))
|
||||||
|
mock.assert_called_with("POST", "containers/create", data={
|
||||||
|
"Tty": True,
|
||||||
|
"OpenStdin": True,
|
||||||
|
"StdinOnce": False,
|
||||||
|
"HostConfig":
|
||||||
|
{
|
||||||
|
"CapAdd": ["ALL"],
|
||||||
|
"Binds": [
|
||||||
|
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||||
|
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||||
|
],
|
||||||
|
"Privileged": True
|
||||||
|
},
|
||||||
|
"Volumes": {},
|
||||||
|
"NetworkDisabled": True,
|
||||||
|
"Name": "test",
|
||||||
|
"Hostname": "test",
|
||||||
|
"Image": "ubuntu:latest",
|
||||||
|
"Env": [
|
||||||
|
"container=docker",
|
||||||
|
"GNS3_MAX_ETHERNET=eth0",
|
||||||
|
"GNS3_VOLUMES=/etc/network"
|
||||||
|
],
|
||||||
|
"Entrypoint": ["/gns3/init.sh"],
|
||||||
|
"Cmd": ["/bin/sh"]
|
||||||
|
})
|
||||||
|
assert vm._cid == "e90e34656806"
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_with_tag(loop, project, manager):
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"Id": "e90e34656806",
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest:16.04")
|
||||||
|
loop.run_until_complete(asyncio.async(vm.create()))
|
||||||
|
mock.assert_called_with("POST", "containers/create", data={
|
||||||
|
"Tty": True,
|
||||||
|
"OpenStdin": True,
|
||||||
|
"StdinOnce": False,
|
||||||
|
"HostConfig":
|
||||||
|
{
|
||||||
|
"CapAdd": ["ALL"],
|
||||||
|
"Binds": [
|
||||||
|
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||||
|
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||||
|
],
|
||||||
|
"Privileged": True
|
||||||
|
},
|
||||||
|
"Volumes": {},
|
||||||
|
"NetworkDisabled": True,
|
||||||
|
"Name": "test",
|
||||||
|
"Hostname": "test",
|
||||||
|
"Image": "ubuntu:latest:16.04",
|
||||||
|
"Env": [
|
||||||
|
"container=docker",
|
||||||
|
"GNS3_MAX_ETHERNET=eth0",
|
||||||
|
"GNS3_VOLUMES=/etc/network"
|
||||||
|
],
|
||||||
|
"Entrypoint": ["/gns3/init.sh"],
|
||||||
|
"Cmd": ["/bin/sh"]
|
||||||
|
})
|
||||||
|
assert vm._cid == "e90e34656806"
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_vnc(loop, project, manager):
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"Id": "e90e34656806",
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", console_type="vnc", console=5900)
|
||||||
|
vm._start_vnc = MagicMock()
|
||||||
|
vm._display = 42
|
||||||
|
loop.run_until_complete(asyncio.async(vm.create()))
|
||||||
|
mock.assert_called_with("POST", "containers/create", data={
|
||||||
|
"Tty": True,
|
||||||
|
"OpenStdin": True,
|
||||||
|
"StdinOnce": False,
|
||||||
|
"HostConfig":
|
||||||
|
{
|
||||||
|
"CapAdd": ["ALL"],
|
||||||
|
"Binds": [
|
||||||
|
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||||
|
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")),
|
||||||
|
'/tmp/.X11-unix/:/tmp/.X11-unix/'
|
||||||
|
],
|
||||||
|
"Privileged": True
|
||||||
|
},
|
||||||
|
"Volumes": {},
|
||||||
|
"NetworkDisabled": True,
|
||||||
|
"Name": "test",
|
||||||
|
"Hostname": "test",
|
||||||
|
"Image": "ubuntu:latest",
|
||||||
|
"Env": [
|
||||||
|
"container=docker",
|
||||||
|
"GNS3_MAX_ETHERNET=eth0",
|
||||||
|
"GNS3_VOLUMES=/etc/network",
|
||||||
|
"QT_GRAPHICSSYSTEM=native",
|
||||||
|
"DISPLAY=:42"
|
||||||
|
],
|
||||||
|
"Entrypoint": ["/gns3/init.sh"],
|
||||||
|
"Cmd": ["/bin/sh"]
|
||||||
|
})
|
||||||
|
assert vm._start_vnc.called
|
||||||
|
assert vm._cid == "e90e34656806"
|
||||||
|
assert vm._console_type == "vnc"
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_start_cmd(loop, project, manager):
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"Id": "e90e34656806",
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||||
|
vm._start_command = "/bin/ls"
|
||||||
|
loop.run_until_complete(asyncio.async(vm.create()))
|
||||||
|
mock.assert_called_with("POST", "containers/create", data={
|
||||||
|
"Tty": True,
|
||||||
|
"OpenStdin": True,
|
||||||
|
"StdinOnce": False,
|
||||||
|
"HostConfig":
|
||||||
|
{
|
||||||
|
"CapAdd": ["ALL"],
|
||||||
|
"Binds": [
|
||||||
|
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||||
|
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||||
|
],
|
||||||
|
"Privileged": True
|
||||||
|
},
|
||||||
|
"Volumes": {},
|
||||||
|
"Entrypoint": ["/gns3/init.sh"],
|
||||||
|
"Cmd": ["/bin/ls"],
|
||||||
|
"NetworkDisabled": True,
|
||||||
|
"Name": "test",
|
||||||
|
"Hostname": "test",
|
||||||
|
"Image": "ubuntu:latest",
|
||||||
|
"Env": [
|
||||||
|
"container=docker",
|
||||||
|
"GNS3_MAX_ETHERNET=eth0",
|
||||||
|
"GNS3_VOLUMES=/etc/network"
|
||||||
|
]
|
||||||
|
})
|
||||||
|
assert vm._cid == "e90e34656806"
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_environment(loop, project, manager):
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"Id": "e90e34656806",
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||||
|
vm.environment = "YES=1\nNO=0"
|
||||||
|
loop.run_until_complete(asyncio.async(vm.create()))
|
||||||
|
mock.assert_called_with("POST", "containers/create", data={
|
||||||
|
"Tty": True,
|
||||||
|
"OpenStdin": True,
|
||||||
|
"StdinOnce": False,
|
||||||
|
"HostConfig":
|
||||||
|
{
|
||||||
|
"CapAdd": ["ALL"],
|
||||||
|
"Binds": [
|
||||||
|
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||||
|
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||||
|
],
|
||||||
|
"Privileged": True
|
||||||
|
},
|
||||||
|
"Env": [
|
||||||
|
"container=docker",
|
||||||
|
"GNS3_MAX_ETHERNET=eth0",
|
||||||
|
"GNS3_VOLUMES=/etc/network",
|
||||||
|
"YES=1",
|
||||||
|
"NO=0"
|
||||||
|
],
|
||||||
|
"Volumes": {},
|
||||||
|
"NetworkDisabled": True,
|
||||||
|
"Name": "test",
|
||||||
|
"Hostname": "test",
|
||||||
|
"Image": "ubuntu:latest",
|
||||||
|
"Entrypoint": ["/gns3/init.sh"],
|
||||||
|
"Cmd": ["/bin/sh"]
|
||||||
|
})
|
||||||
|
assert vm._cid == "e90e34656806"
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_image_not_available(loop, project, manager):
|
||||||
|
|
||||||
|
call = 0
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def informations():
|
||||||
|
nonlocal call
|
||||||
|
if call == 0:
|
||||||
|
call += 1
|
||||||
|
raise DockerHttp404Error("missing")
|
||||||
|
else:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"Id": "e90e34656806",
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
|
||||||
|
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||||
|
vm._get_image_informations = MagicMock()
|
||||||
|
vm._get_image_informations.side_effect = informations
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM.pull_image", return_value=True) as mock_pull:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.create()))
|
||||||
|
mock.assert_called_with("POST", "containers/create", data={
|
||||||
|
"Tty": True,
|
||||||
|
"OpenStdin": True,
|
||||||
|
"StdinOnce": False,
|
||||||
|
"HostConfig":
|
||||||
|
{
|
||||||
|
"CapAdd": ["ALL"],
|
||||||
|
"Binds": [
|
||||||
|
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||||
|
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||||
|
],
|
||||||
|
"Privileged": True
|
||||||
|
},
|
||||||
|
"Volumes": {},
|
||||||
|
"NetworkDisabled": True,
|
||||||
|
"Name": "test",
|
||||||
|
"Hostname": "test",
|
||||||
|
"Image": "ubuntu:latest",
|
||||||
|
"Env": [
|
||||||
|
"container=docker",
|
||||||
|
"GNS3_MAX_ETHERNET=eth0",
|
||||||
|
"GNS3_VOLUMES=/etc/network"
|
||||||
|
],
|
||||||
|
"Entrypoint": ["/gns3/init.sh"],
|
||||||
|
"Cmd": ["/bin/sh"]
|
||||||
|
})
|
||||||
|
assert vm._cid == "e90e34656806"
|
||||||
|
mock_pull.assert_called_with("ubuntu:latest")
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_container_state(loop, vm):
|
||||||
|
response = {
|
||||||
|
"State": {
|
||||||
|
"Error": "",
|
||||||
|
"ExitCode": 9,
|
||||||
|
"FinishedAt": "2015-01-06T15:47:32.080254511Z",
|
||||||
|
"OOMKilled": False,
|
||||||
|
"Paused": False,
|
||||||
|
"Pid": 0,
|
||||||
|
"Restarting": False,
|
||||||
|
"Running": True,
|
||||||
|
"StartedAt": "2015-01-06T15:47:32.072697474Z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
assert loop.run_until_complete(asyncio.async(vm._get_container_state())) == "running"
|
||||||
|
|
||||||
|
response["State"]["Running"] = False
|
||||||
|
response["State"]["Paused"] = True
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
assert loop.run_until_complete(asyncio.async(vm._get_container_state())) == "paused"
|
||||||
|
|
||||||
|
response["State"]["Running"] = False
|
||||||
|
response["State"]["Paused"] = False
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
assert loop.run_until_complete(asyncio.async(vm._get_container_state())) == "exited"
|
||||||
|
|
||||||
|
|
||||||
|
def test_is_running(loop, vm):
|
||||||
|
response = {
|
||||||
|
"State": {
|
||||||
|
"Running": False,
|
||||||
|
"Paused": False
|
||||||
|
}
|
||||||
|
}
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
assert loop.run_until_complete(asyncio.async(vm.is_running())) is False
|
||||||
|
|
||||||
|
response["State"]["Running"] = True
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
assert loop.run_until_complete(asyncio.async(vm.is_running())) is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_pause(loop, vm):
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.pause()))
|
||||||
|
|
||||||
|
mock.assert_called_with("POST", "containers/e90e34656842/pause")
|
||||||
|
assert vm.status == "paused"
|
||||||
|
|
||||||
|
|
||||||
|
def test_unpause(loop, vm):
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.unpause()))
|
||||||
|
|
||||||
|
mock.assert_called_with("POST", "containers/e90e34656842/unpause")
|
||||||
|
|
||||||
|
|
||||||
|
def test_start(loop, vm, manager, free_console_port):
|
||||||
|
|
||||||
|
assert vm.status != "started"
|
||||||
|
vm.adapters = 1
|
||||||
|
|
||||||
|
vm.allocate_aux = True
|
||||||
|
vm._start_aux = AsyncioMagicMock()
|
||||||
|
|
||||||
|
vm._get_container_state = AsyncioMagicMock(return_value="stopped")
|
||||||
|
vm._start_ubridge = AsyncioMagicMock()
|
||||||
|
vm._get_namespace = AsyncioMagicMock(return_value=42)
|
||||||
|
vm._add_ubridge_connection = AsyncioMagicMock()
|
||||||
|
vm._start_console = AsyncioMagicMock()
|
||||||
|
|
||||||
|
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.start()))
|
||||||
|
|
||||||
|
mock_query.assert_called_with("POST", "containers/e90e34656842/start")
|
||||||
|
vm._add_ubridge_connection.assert_called_once_with(nio, 0, 42)
|
||||||
|
assert vm._start_ubridge.called
|
||||||
|
assert vm._start_console.called
|
||||||
|
assert vm._start_aux.called
|
||||||
|
assert vm.status == "started"
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_namespace_failed(loop, vm, manager, free_console_port):
|
||||||
|
|
||||||
|
assert vm.status != "started"
|
||||||
|
vm.adapters = 1
|
||||||
|
|
||||||
|
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._start_ubridge") as mock_start_ubridge:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_namespace", return_value=42) as mock_namespace:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._add_ubridge_connection", side_effect=UbridgeNamespaceError()) as mock_add_ubridge_connection:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_log", return_value='Hello not available') as mock_log:
|
||||||
|
|
||||||
|
with pytest.raises(DockerError):
|
||||||
|
loop.run_until_complete(asyncio.async(vm.start()))
|
||||||
|
|
||||||
|
mock_query.assert_any_call("POST", "containers/e90e34656842/start")
|
||||||
|
mock_add_ubridge_connection.assert_called_once_with(nio, 0, 42)
|
||||||
|
assert mock_start_ubridge.called
|
||||||
|
assert vm.status == "stopped"
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_without_nio(loop, vm, manager, free_console_port):
|
||||||
|
"""
|
||||||
|
If no nio exists we will create one.
|
||||||
|
"""
|
||||||
|
|
||||||
|
assert vm.status != "started"
|
||||||
|
vm.adapters = 1
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._start_ubridge") as mock_start_ubridge:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_namespace", return_value=42) as mock_namespace:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._add_ubridge_connection") as mock_add_ubridge_connection:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._start_console") as mock_start_console:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.start()))
|
||||||
|
|
||||||
|
mock_query.assert_called_with("POST", "containers/e90e34656842/start")
|
||||||
|
assert mock_add_ubridge_connection.called
|
||||||
|
assert mock_start_ubridge.called
|
||||||
|
assert mock_start_console.called
|
||||||
|
assert vm.status == "started"
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_unpause(loop, vm, manager, free_console_port):
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="paused"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM.unpause", return_value="paused") as mock:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.start()))
|
||||||
|
assert mock.called
|
||||||
|
assert vm.status == "started"
|
||||||
|
|
||||||
|
|
||||||
|
def test_restart(loop, vm):
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.restart()))
|
||||||
|
|
||||||
|
mock.assert_called_with("POST", "containers/e90e34656842/restart")
|
||||||
|
|
||||||
|
|
||||||
|
def test_stop(loop, vm):
|
||||||
|
vm._ubridge_hypervisor = MagicMock()
|
||||||
|
vm._ubridge_hypervisor.is_running.return_value = True
|
||||||
|
vm._fix_permissions = MagicMock()
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="running"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.stop()))
|
||||||
|
mock_query.assert_called_with("POST", "containers/e90e34656842/stop", params={"t": 5})
|
||||||
|
assert vm._ubridge_hypervisor.stop.called
|
||||||
|
assert vm._fix_permissions.called
|
||||||
|
|
||||||
|
|
||||||
|
def test_stop_paused_container(loop, vm):
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="paused"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM.unpause") as mock_unpause:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.stop()))
|
||||||
|
mock_query.assert_called_with("POST", "containers/e90e34656842/stop", params={"t": 5})
|
||||||
|
assert mock_unpause.called
|
||||||
|
|
||||||
|
|
||||||
|
def test_update(loop, vm):
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"Id": "e90e34656806",
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
|
||||||
|
original_console = vm.console
|
||||||
|
original_aux = vm.aux
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.update()))
|
||||||
|
|
||||||
|
mock_query.assert_any_call("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||||
|
mock_query.assert_any_call("POST", "containers/create", data={
|
||||||
|
"Tty": True,
|
||||||
|
"OpenStdin": True,
|
||||||
|
"StdinOnce": False,
|
||||||
|
"HostConfig":
|
||||||
|
{
|
||||||
|
"CapAdd": ["ALL"],
|
||||||
|
"Binds": [
|
||||||
|
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||||
|
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||||
|
],
|
||||||
|
"Privileged": True
|
||||||
|
},
|
||||||
|
"Volumes": {},
|
||||||
|
"NetworkDisabled": True,
|
||||||
|
"Name": "test",
|
||||||
|
"Hostname": "test",
|
||||||
|
"Image": "ubuntu:latest",
|
||||||
|
"Env": [
|
||||||
|
"container=docker",
|
||||||
|
"GNS3_MAX_ETHERNET=eth0",
|
||||||
|
"GNS3_VOLUMES=/etc/network"
|
||||||
|
],
|
||||||
|
"Entrypoint": ["/gns3/init.sh"],
|
||||||
|
"Cmd": ["/bin/sh"]
|
||||||
|
})
|
||||||
|
assert vm.console == original_console
|
||||||
|
assert vm.aux == original_aux
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_vnc(loop, vm):
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"Id": "e90e34656806",
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
|
||||||
|
vm.console_type = "vnc"
|
||||||
|
vm.console = 5900
|
||||||
|
vm._display = "display"
|
||||||
|
original_console = vm.console
|
||||||
|
original_aux = vm.aux
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._start_vnc"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.update()))
|
||||||
|
|
||||||
|
assert vm.console == original_console
|
||||||
|
assert vm.aux == original_aux
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_running(loop, vm):
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"Id": "e90e34656806",
|
||||||
|
"Warnings": []
|
||||||
|
}
|
||||||
|
|
||||||
|
original_console = vm.console
|
||||||
|
vm.start = MagicMock()
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="running"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.update()))
|
||||||
|
|
||||||
|
mock_query.assert_any_call("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||||
|
mock_query.assert_any_call("POST", "containers/create", data={
|
||||||
|
"Tty": True,
|
||||||
|
"OpenStdin": True,
|
||||||
|
"StdinOnce": False,
|
||||||
|
"HostConfig":
|
||||||
|
{
|
||||||
|
"CapAdd": ["ALL"],
|
||||||
|
"Binds": [
|
||||||
|
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||||
|
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||||
|
],
|
||||||
|
"Privileged": True
|
||||||
|
},
|
||||||
|
"Volumes": {},
|
||||||
|
"NetworkDisabled": True,
|
||||||
|
"Name": "test",
|
||||||
|
"Hostname": "test",
|
||||||
|
"Image": "ubuntu:latest",
|
||||||
|
"Env": [
|
||||||
|
"container=docker",
|
||||||
|
"GNS3_MAX_ETHERNET=eth0",
|
||||||
|
"GNS3_VOLUMES=/etc/network"
|
||||||
|
],
|
||||||
|
"Entrypoint": ["/gns3/init.sh"],
|
||||||
|
"Cmd": ["/bin/sh"]
|
||||||
|
})
|
||||||
|
|
||||||
|
assert vm.console == original_console
|
||||||
|
assert vm.start.called
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete(loop, vm):
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.delete()))
|
||||||
|
mock_query.assert_called_with("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||||
|
|
||||||
|
|
||||||
|
def test_close(loop, vm, port_manager):
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(0, nio)
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.close()))
|
||||||
|
mock_query.assert_called_with("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||||
|
|
||||||
|
assert vm._closed is True
|
||||||
|
assert "4242" not in port_manager.udp_ports
|
||||||
|
|
||||||
|
|
||||||
|
def test_close_vnc(loop, vm, port_manager):
|
||||||
|
|
||||||
|
vm._console_type = "vnc"
|
||||||
|
vm._x11vnc_process = MagicMock()
|
||||||
|
vm._xvfb_process = MagicMock()
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.close()))
|
||||||
|
mock_query.assert_called_with("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||||
|
|
||||||
|
assert vm._closed is True
|
||||||
|
assert vm._xvfb_process.terminate.called
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_namespace(loop, vm):
|
||||||
|
response = {
|
||||||
|
"State": {
|
||||||
|
"Pid": 42
|
||||||
|
}
|
||||||
|
}
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
|
||||||
|
assert loop.run_until_complete(asyncio.async(vm._get_namespace())) == 42
|
||||||
|
mock_query.assert_called_with("GET", "containers/e90e34656842/json")
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_ubridge_connection(loop, vm):
|
||||||
|
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(0, nio)
|
||||||
|
nio.startPacketCapture("/tmp/capture.pcap")
|
||||||
|
vm._ubridge_hypervisor = MagicMock()
|
||||||
|
|
||||||
|
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0, 42)))
|
||||||
|
|
||||||
|
calls = [
|
||||||
|
call.send("docker create_veth veth-gns3-e0 veth-gns3-i0"),
|
||||||
|
call.send('docker move_to_ns veth-gns3-i0 42 eth0'),
|
||||||
|
call.send('bridge create bridge0'),
|
||||||
|
call.send('bridge add_nio_linux_raw bridge0 veth-gns3-e0'),
|
||||||
|
call.send('bridge add_nio_udp bridge0 4242 127.0.0.1 4343'),
|
||||||
|
call.send('bridge start_capture bridge0 "/tmp/capture.pcap"'),
|
||||||
|
call.send('bridge start bridge0')
|
||||||
|
]
|
||||||
|
# We need to check any_order ortherwise mock is confused by asyncio
|
||||||
|
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_ubridge_connection_none_nio(loop, vm):
|
||||||
|
|
||||||
|
nio = None
|
||||||
|
vm._ubridge_hypervisor = MagicMock()
|
||||||
|
|
||||||
|
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0, 42)))
|
||||||
|
|
||||||
|
calls = [
|
||||||
|
call.send("docker create_veth veth-gns3-e0 veth-gns3-i0"),
|
||||||
|
call.send('docker move_to_ns veth-gns3-i0 42 eth0'),
|
||||||
|
]
|
||||||
|
# We need to check any_order ortherwise mock is confused by asyncio
|
||||||
|
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_ubridge_connection_invalid_adapter_number(loop, vm):
|
||||||
|
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(0, nio)
|
||||||
|
with pytest.raises(DockerError):
|
||||||
|
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 12, 42)))
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_ubridge_connection_no_free_interface(loop, vm):
|
||||||
|
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(0, nio)
|
||||||
|
with pytest.raises(DockerError):
|
||||||
|
|
||||||
|
# We create fake ethernet interfaces for docker
|
||||||
|
interfaces = ["veth-gns3-e{}".format(index) for index in range(4096)]
|
||||||
|
|
||||||
|
with patch("psutil.net_if_addrs", return_value=interfaces):
|
||||||
|
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0, 42)))
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_ubridge_connection(loop, vm):
|
||||||
|
|
||||||
|
vm._ubridge_hypervisor = MagicMock()
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(0, nio)
|
||||||
|
|
||||||
|
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0, 42)))
|
||||||
|
loop.run_until_complete(asyncio.async(vm._delete_ubridge_connection(0)))
|
||||||
|
|
||||||
|
calls = [
|
||||||
|
call.send("bridge delete bridge0"),
|
||||||
|
call.send('docker delete_veth veth-gns3-e0')
|
||||||
|
]
|
||||||
|
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
||||||
|
|
||||||
|
|
||||||
|
def test_adapter_add_nio_binding(vm, loop):
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(0, nio)
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
|
assert vm._ethernet_adapters[0].get_nio(0) == nio
|
||||||
|
|
||||||
|
|
||||||
|
def test_adapter_add_nio_binding_invalid_adapter(vm, loop):
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(0, nio)
|
||||||
|
with pytest.raises(DockerError):
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(12, nio)))
|
||||||
|
|
||||||
|
|
||||||
|
def test_adapter_remove_nio_binding(vm, loop):
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(0, nio)
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
|
with asyncio_patch("gns3server.modules.docker.DockerVM._delete_ubridge_connection") as delete_ubridge_mock:
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0)))
|
||||||
|
assert vm._ethernet_adapters[0].get_nio(0) is None
|
||||||
|
delete_ubridge_mock.assert_called_with(0)
|
||||||
|
|
||||||
|
|
||||||
|
def test_adapter_remove_nio_binding_invalid_adapter(vm, loop):
|
||||||
|
with pytest.raises(DockerError):
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(12)))
|
||||||
|
|
||||||
|
|
||||||
|
def test_pull_image(loop, vm):
|
||||||
|
class Response:
|
||||||
|
"""
|
||||||
|
Simulate a response splitted in multiple packets
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._read = -1
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def read(self, size):
|
||||||
|
self._read += 1
|
||||||
|
if self._read == 0:
|
||||||
|
return b'{"progress": "0/100",'
|
||||||
|
elif self._read == 1:
|
||||||
|
return '"id": 42}'
|
||||||
|
else:
|
||||||
|
None
|
||||||
|
|
||||||
|
mock_query = MagicMock()
|
||||||
|
mock_query.content.return_value = Response()
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.http_query", return_value=mock_query) as mock:
|
||||||
|
images = loop.run_until_complete(asyncio.async(vm.pull_image("ubuntu:latest")))
|
||||||
|
mock.assert_called_with("POST", "images/create", params={"fromImage": "ubuntu:latest"})
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||||
|
|
||||||
|
output_file = str(tmpdir / "test.pcap")
|
||||||
|
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
|
loop.run_until_complete(asyncio.async(vm.start_capture(0, output_file)))
|
||||||
|
assert vm._ethernet_adapters[0].get_nio(0).capturing
|
||||||
|
|
||||||
|
|
||||||
|
def test_stop_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||||
|
|
||||||
|
output_file = str(tmpdir / "test.pcap")
|
||||||
|
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
|
loop.run_until_complete(vm.start_capture(0, output_file))
|
||||||
|
assert vm._ethernet_adapters[0].get_nio(0).capturing
|
||||||
|
loop.run_until_complete(asyncio.async(vm.stop_capture(0)))
|
||||||
|
assert vm._ethernet_adapters[0].get_nio(0).capturing is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_log(loop, vm):
|
||||||
|
@asyncio.coroutine
|
||||||
|
def read():
|
||||||
|
return b'Hello\nWorld'
|
||||||
|
|
||||||
|
mock_query = MagicMock()
|
||||||
|
mock_query.read = read
|
||||||
|
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.http_query", return_value=mock_query) as mock:
|
||||||
|
images = loop.run_until_complete(asyncio.async(vm._get_log()))
|
||||||
|
mock.assert_called_with("GET", "containers/e90e34656842/logs", params={"stderr": 1, "stdout": 1}, data={})
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_image_informations(project, manager, loop):
|
||||||
|
response = {
|
||||||
|
}
|
||||||
|
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||||
|
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||||
|
loop.run_until_complete(asyncio.async(vm._get_image_informations()))
|
||||||
|
mock.assert_called_with("GET", "images/ubuntu:latest/json")
|
||||||
|
|
||||||
|
|
||||||
|
def test_mount_binds(vm, tmpdir):
|
||||||
|
image_infos = {
|
||||||
|
"ContainerConfig": {
|
||||||
|
"Volumes": {
|
||||||
|
"/test/experimental": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dst = os.path.join(vm.working_dir, "test/experimental")
|
||||||
|
assert vm._mount_binds(image_infos) == [
|
||||||
|
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||||
|
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")),
|
||||||
|
"{}:/gns3volumes{}".format(dst, "/test/experimental")
|
||||||
|
]
|
||||||
|
|
||||||
|
assert vm._volumes == ["/etc/network", "/test/experimental"]
|
||||||
|
assert os.path.exists(dst)
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_vnc(vm, loop):
|
||||||
|
vm.console_resolution = "1280x1024"
|
||||||
|
with patch("shutil.which", return_value="/bin/x"):
|
||||||
|
with asyncio_patch("gns3server.modules.docker.docker_vm.wait_for_file_creation") as mock_wait:
|
||||||
|
with asyncio_patch("asyncio.create_subprocess_exec") as mock_exec:
|
||||||
|
loop.run_until_complete(asyncio.async(vm._start_vnc()))
|
||||||
|
assert vm._display is not None
|
||||||
|
mock_exec.assert_any_call("Xvfb", "-nolisten", "tcp", ":{}".format(vm._display), "-screen", "0", "1280x1024x16")
|
||||||
|
mock_exec.assert_any_call("x11vnc", "-forever", "-nopw", "-shared", "-geometry", "1280x1024", "-display", "WAIT:{}".format(vm._display), "-rfbport", str(vm.console), "-rfbportv6", str(vm.console), "-noncache", "-listen", "127.0.0.1")
|
||||||
|
mock_wait.assert_called_with("/tmp/.X11-unix/X{}".format(vm._display))
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_vnc_xvfb_missing(vm, loop):
|
||||||
|
with pytest.raises(DockerError):
|
||||||
|
loop.run_until_complete(asyncio.async(vm._start_vnc()))
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_aux(vm, loop):
|
||||||
|
|
||||||
|
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=MagicMock()) as mock_exec:
|
||||||
|
loop.run_until_complete(asyncio.async(vm._start_aux()))
|
||||||
|
mock_exec.assert_called_with('docker', 'exec', '-i', 'e90e34656842', '/gns3/bin/busybox', 'script', '-qfc', 'while true; do /gns3/bin/busybox sh; done', '/dev/null', stderr=asyncio.subprocess.STDOUT, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE)
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_network_interfaces(vm):
|
||||||
|
|
||||||
|
vm.adapters = 5
|
||||||
|
network_config = vm._create_network_config()
|
||||||
|
assert os.path.exists(os.path.join(network_config, "interfaces"))
|
||||||
|
assert os.path.exists(os.path.join(network_config, "if-up.d"))
|
||||||
|
|
||||||
|
with open(os.path.join(network_config, "interfaces")) as f:
|
||||||
|
content = f.read()
|
||||||
|
assert "eth0" in content
|
||||||
|
assert "eth4" in content
|
||||||
|
assert "eth5" not in content
|
||||||
|
|
||||||
|
|
||||||
|
def test_fix_permission(vm, loop):
|
||||||
|
vm._volumes = ["/etc"]
|
||||||
|
process = MagicMock()
|
||||||
|
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=process) as mock_exec:
|
||||||
|
loop.run_until_complete(vm._fix_permissions())
|
||||||
|
mock_exec.assert_called_with('docker', 'exec', 'e90e34656842', '/gns3/bin/busybox', 'sh', '-c', '(/gns3/bin/busybox find "/etc" -depth -print0 | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c \'%a:%u:%g:%n\' > "/etc/.gns3_perms") && /gns3/bin/busybox chmod -R u+rX "/etc" && /gns3/bin/busybox chown {}:{} -R "/etc"'.format(os.getuid(), os.getgid()))
|
||||||
|
assert process.wait.called
|
@ -112,10 +112,11 @@ def test_start(loop, vm, monkeypatch):
|
|||||||
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM._check_iou_licence", return_value=True):
|
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM._check_iou_licence", return_value=True):
|
||||||
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM._start_ioucon", return_value=True):
|
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM._start_ioucon", return_value=True):
|
||||||
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM._start_iouyap", return_value=True):
|
with asyncio_patch("gns3server.modules.iou.iou_vm.IOUVM._start_iouyap", return_value=True):
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=mock_process):
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=mock_process) as mock_exec:
|
||||||
mock_process.returncode = None
|
mock_process.returncode = None
|
||||||
loop.run_until_complete(asyncio.async(vm.start()))
|
loop.run_until_complete(asyncio.async(vm.start()))
|
||||||
assert vm.is_running()
|
assert vm.is_running()
|
||||||
|
assert vm.command_line == ' '.join(mock_exec.call_args[0])
|
||||||
|
|
||||||
|
|
||||||
def test_start_with_iourc(loop, vm, monkeypatch, tmpdir):
|
def test_start_with_iourc(loop, vm, monkeypatch, tmpdir):
|
||||||
@ -215,7 +216,7 @@ def test_path(vm, fake_iou_bin):
|
|||||||
def test_path_12_location(vm, fake_iou_bin):
|
def test_path_12_location(vm, fake_iou_bin):
|
||||||
|
|
||||||
# In 1.2 users uploaded images to the images roots
|
# In 1.2 users uploaded images to the images roots
|
||||||
# after the migration their images are inside images/IOU
|
# after the migration their images are inside images/IOU
|
||||||
# but old topologies use old path
|
# but old topologies use old path
|
||||||
with patch("gns3server.config.Config.get_section_config", return_value={"local": True}):
|
with patch("gns3server.config.Config.get_section_config", return_value={"local": True}):
|
||||||
vm.path = fake_iou_bin.replace("/IOU", "")
|
vm.path = fake_iou_bin.replace("/IOU", "")
|
||||||
@ -341,7 +342,7 @@ def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
|||||||
|
|
||||||
output_file = str(tmpdir / "test.pcap")
|
output_file = str(tmpdir / "test.pcap")
|
||||||
nio = manager.create_nio(vm.iouyap_path, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
nio = manager.create_nio(vm.iouyap_path, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||||
vm.adapter_add_nio_binding(0, 0, nio)
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
|
||||||
loop.run_until_complete(asyncio.async(vm.start_capture(0, 0, output_file)))
|
loop.run_until_complete(asyncio.async(vm.start_capture(0, 0, output_file)))
|
||||||
assert vm._adapters[0].get_nio(0).capturing
|
assert vm._adapters[0].get_nio(0).capturing
|
||||||
|
|
||||||
@ -350,7 +351,7 @@ def test_stop_capture(vm, tmpdir, manager, free_console_port, loop):
|
|||||||
|
|
||||||
output_file = str(tmpdir / "test.pcap")
|
output_file = str(tmpdir / "test.pcap")
|
||||||
nio = manager.create_nio(vm.iouyap_path, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
nio = manager.create_nio(vm.iouyap_path, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||||
vm.adapter_add_nio_binding(0, 0, nio)
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
|
||||||
loop.run_until_complete(vm.start_capture(0, 0, output_file))
|
loop.run_until_complete(vm.start_capture(0, 0, output_file))
|
||||||
assert vm._adapters[0].get_nio(0).capturing
|
assert vm._adapters[0].get_nio(0).capturing
|
||||||
loop.run_until_complete(asyncio.async(vm.stop_capture(0, 0)))
|
loop.run_until_complete(asyncio.async(vm.stop_capture(0, 0)))
|
||||||
@ -412,6 +413,14 @@ def test_iourc_content(vm):
|
|||||||
assert f.read() == "test"
|
assert f.read() == "test"
|
||||||
|
|
||||||
|
|
||||||
|
def test_iourc_content_fix_carriage_return(vm):
|
||||||
|
|
||||||
|
vm.iourc_content = "test\r\n12"
|
||||||
|
|
||||||
|
with open(os.path.join(vm.temporary_directory, "iourc")) as f:
|
||||||
|
assert f.read() == "test\n12"
|
||||||
|
|
||||||
|
|
||||||
def test_extract_configs(vm):
|
def test_extract_configs(vm):
|
||||||
assert vm.extract_configs() == (None, None)
|
assert vm.extract_configs() == (None, None)
|
||||||
|
|
||||||
|
73
tests/modules/qemu/test_qcow2.py
Normal file
73
tests/modules/qemu/test_qcow2.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import pytest
|
||||||
|
import shutil
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
from gns3server.modules.qemu.qcow2 import Qcow2, Qcow2Error
|
||||||
|
|
||||||
|
|
||||||
|
def qemu_img():
|
||||||
|
"""
|
||||||
|
Return the path of qemu-img on system.
|
||||||
|
We can't use shutil.which because for safety reason we break
|
||||||
|
the PATH to avoid test interacting with real binaries
|
||||||
|
"""
|
||||||
|
paths = [
|
||||||
|
"/usr/bin/qemu-img",
|
||||||
|
"/usr/local/bin/qemu-img"
|
||||||
|
]
|
||||||
|
for path in paths:
|
||||||
|
if os.path.exists(path):
|
||||||
|
return path
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_base_file():
|
||||||
|
qcow2 = Qcow2("tests/resources/empty8G.qcow2")
|
||||||
|
assert qcow2.version == 3
|
||||||
|
assert qcow2.backing_file is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_valid_linked_file():
|
||||||
|
qcow2 = Qcow2("tests/resources/linked.qcow2")
|
||||||
|
assert qcow2.version == 3
|
||||||
|
assert qcow2.backing_file == "empty8G.qcow2"
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_file():
|
||||||
|
with pytest.raises(Qcow2Error):
|
||||||
|
Qcow2("tests/resources/nvram_iou")
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_empty_file(tmpdir):
|
||||||
|
open(str(tmpdir / 'a'), 'w+').close()
|
||||||
|
with pytest.raises(Qcow2Error):
|
||||||
|
Qcow2(str(tmpdir / 'a'))
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif(qemu_img() is None, reason="qemu-img is not available")
|
||||||
|
def test_rebase(tmpdir, loop):
|
||||||
|
shutil.copy("tests/resources/empty8G.qcow2", str(tmpdir / "empty16G.qcow2"))
|
||||||
|
shutil.copy("tests/resources/linked.qcow2", str(tmpdir / "linked.qcow2"))
|
||||||
|
qcow2 = Qcow2(str(tmpdir / "linked.qcow2"))
|
||||||
|
assert qcow2.version == 3
|
||||||
|
assert qcow2.backing_file == "empty8G.qcow2"
|
||||||
|
loop.run_until_complete(asyncio.async(qcow2.rebase(qemu_img(), str(tmpdir / "empty16G.qcow2"))))
|
||||||
|
assert qcow2.backing_file == str(tmpdir / "empty16G.qcow2")
|
@ -50,7 +50,7 @@ def test_get_qemu_version(loop):
|
|||||||
|
|
||||||
def test_binary_list(loop):
|
def test_binary_list(loop):
|
||||||
|
|
||||||
files_to_create = ["qemu-system-x86", "qemu-system-x42", "qemu-kvm", "hello"]
|
files_to_create = ["qemu-system-x86", "qemu-system-x42", "qemu-kvm", "hello", "qemu-system-x86_64-spice"]
|
||||||
|
|
||||||
for file_to_create in files_to_create:
|
for file_to_create in files_to_create:
|
||||||
path = os.path.join(os.environ["PATH"], file_to_create)
|
path = os.path.join(os.environ["PATH"], file_to_create)
|
||||||
@ -70,6 +70,7 @@ def test_binary_list(loop):
|
|||||||
assert {"path": os.path.join(os.environ["PATH"], "qemu-kvm"), "version": version} in qemus
|
assert {"path": os.path.join(os.environ["PATH"], "qemu-kvm"), "version": version} in qemus
|
||||||
assert {"path": os.path.join(os.environ["PATH"], "qemu-system-x42"), "version": version} in qemus
|
assert {"path": os.path.join(os.environ["PATH"], "qemu-system-x42"), "version": version} in qemus
|
||||||
assert {"path": os.path.join(os.environ["PATH"], "hello"), "version": version} not in qemus
|
assert {"path": os.path.join(os.environ["PATH"], "hello"), "version": version} not in qemus
|
||||||
|
assert {"path": os.path.join(os.environ["PATH"], "qemu-system-x86_64-spice"), "version": version} not in qemus
|
||||||
|
|
||||||
qemus = loop.run_until_complete(asyncio.async(Qemu.binary_list(["x86"])))
|
qemus = loop.run_until_complete(asyncio.async(Qemu.binary_list(["x86"])))
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user