mirror of
https://github.com/GNS3/gns3-server.git
synced 2025-06-24 17:55:15 +00:00
Compare commits
309 Commits
Author | SHA1 | Date | |
---|---|---|---|
19c4ec1867 | |||
8019374ed0 | |||
af530be346 | |||
9c3cfc4f4e | |||
c7d878ed9e | |||
49f1ee2e32 | |||
bd4de862c8 | |||
f038735595 | |||
a4f8675c93 | |||
da71f29208 | |||
b53b34d485 | |||
e63da227d0 | |||
c7d9af121f | |||
15babb137d | |||
eccee6b629 | |||
ef95ba1ed8 | |||
2bbdbeaa82 | |||
de2dad20d5 | |||
84c0a17572 | |||
f0edf799b7 | |||
a7be4681d5 | |||
07b982d4db | |||
da1cd9a3e7 | |||
0eafb6f06c | |||
042a69eecf | |||
1885fe62a6 | |||
e481ffa94c | |||
937bbf0131 | |||
d58a6ccda9 | |||
84fb108abb | |||
4455499e00 | |||
763f258465 | |||
d447a04c6a | |||
f358cb45a2 | |||
6b8e93f847 | |||
db95cb5c46 | |||
d6f63d3b7d | |||
7d90a73ed2 | |||
b1b2bbd581 | |||
da7074ea74 | |||
44307b43b9 | |||
febf0f7839 | |||
26d49f19c1 | |||
e1a7efad60 | |||
08956e438c | |||
e175650fb0 | |||
5f88db93ff | |||
e0a2553be4 | |||
b905760635 | |||
f9bc745ddb | |||
ead8a6caa2 | |||
5fab48ba75 | |||
2739483751 | |||
1f0fe6d304 | |||
5898b86dfc | |||
fa769cd41d | |||
379c216109 | |||
7422d06c1d | |||
5daff17911 | |||
8296621320 | |||
42ff398f05 | |||
d1de665939 | |||
f151181409 | |||
07395c9cf2 | |||
ae076c7ca9 | |||
6982e118c0 | |||
38a41a1cfd | |||
8fbfae6a98 | |||
5ca9becdf9 | |||
bb0db7846f | |||
e82862c69f | |||
d98a7aac19 | |||
43a3584a5c | |||
049c348709 | |||
343f2b574d | |||
016c3e515b | |||
4f03c3011e | |||
e183e7da27 | |||
313226786f | |||
df7d26e418 | |||
c829a250a9 | |||
775ee4259e | |||
e335015520 | |||
469187c584 | |||
ad7ecb8850 | |||
9b99a03cbf | |||
3aed651062 | |||
435dc82873 | |||
ae687346f1 | |||
d3436756b2 | |||
6f7b06e66f | |||
009ffcb4ef | |||
3f61fbeb0e | |||
e65648398b | |||
94a709cb42 | |||
ea7754f1c8 | |||
914fe7e750 | |||
8e3e3c08f8 | |||
e497e98ca1 | |||
3fd0a6d638 | |||
851ba074e7 | |||
cdd54b951a | |||
1ddb16eca0 | |||
7bc0570735 | |||
1ae17b74df | |||
cb6df28f59 | |||
3e89543ab9 | |||
a2ebbaa322 | |||
67e346ba92 | |||
acdc1e5ebb | |||
2bb062c61e | |||
a741662636 | |||
871fea33e0 | |||
8cb5cedb5d | |||
bc33683e47 | |||
fa140e991c | |||
f4b79af45f | |||
a1bf9cdfd3 | |||
f414ce459a | |||
366e9620dc | |||
ab13f628f7 | |||
e108a9e136 | |||
9e2043bfa8 | |||
28f7c2a1cd | |||
c7d58eb3b3 | |||
41f02dad54 | |||
76cc1d2750 | |||
2cdfd6c0d7 | |||
57cf57b408 | |||
16b5bb8810 | |||
56c153af79 | |||
9beca700a5 | |||
8e429ee4c1 | |||
f3095d94c1 | |||
5fd385159c | |||
dcbaa62df8 | |||
e9fb7f4981 | |||
d952718f30 | |||
978ec1b5be | |||
d5a7561bc4 | |||
c17e00204f | |||
936faaba5a | |||
5c1522f24a | |||
b92e64e507 | |||
848120c354 | |||
8a19afd618 | |||
815ef6066d | |||
85f571ee8b | |||
cf1b7ebe1f | |||
4fc5364ab5 | |||
4a91d8a6a5 | |||
b408f29726 | |||
1d08d4a5fa | |||
6d6e8196d2 | |||
75196b8a55 | |||
7ffdd0fab1 | |||
637a7326ec | |||
4afbf816ab | |||
40aec61701 | |||
7f77c66888 | |||
b7a859fa30 | |||
cd393491d5 | |||
f6d0971f15 | |||
ae5468a2d3 | |||
ec6e5a6118 | |||
f1737a6e43 | |||
e3b95fe9ce | |||
2109fd4f4d | |||
dff74352f7 | |||
977ccabf98 | |||
ad6fb664a2 | |||
6bc54b17c7 | |||
bb63f51f80 | |||
b335daf857 | |||
b93c85c390 | |||
cb197627b4 | |||
9b9eddb30c | |||
e564c84529 | |||
5531a576d1 | |||
a5b5f6c811 | |||
9ed15e55af | |||
f8ffd078a8 | |||
2651a48102 | |||
57394dfebf | |||
7422b31b2c | |||
06b9e46cd2 | |||
dc236eba09 | |||
36dbcfa7f6 | |||
6eca621b19 | |||
a046a4c980 | |||
34459f6a88 | |||
e097a0e728 | |||
7b25ce04e8 | |||
3b04f556b3 | |||
bd71f0cf4c | |||
879591eaf5 | |||
f39af9deb7 | |||
ec044f9210 | |||
498d006225 | |||
c1193c23c1 | |||
ba006f105f | |||
fa58b1f81c | |||
e167a81080 | |||
9f05b80d01 | |||
58a360e535 | |||
d243cb7d02 | |||
c563dbba8a | |||
5345fe8490 | |||
2a20333877 | |||
2e455037e1 | |||
23c48f5e3d | |||
e6ddce86b3 | |||
268c61ce80 | |||
d2c32bb570 | |||
3fe8016938 | |||
01dbaea2e4 | |||
427cb05c2a | |||
d36a9f3403 | |||
39c41336db | |||
fa22e19321 | |||
d472f47829 | |||
dcc4ddf11b | |||
e35ab1e8c0 | |||
975e1c8fa7 | |||
19b46a4014 | |||
7cdf23f497 | |||
06da40cdcd | |||
174e7cccea | |||
75212cda3f | |||
a018c5b0f5 | |||
93e2d4096d | |||
c077f3b990 | |||
60e9241857 | |||
ee4b094160 | |||
1dd437ecdc | |||
84eb8356e8 | |||
c833a20a8c | |||
eae6f33e29 | |||
59c1e125d3 | |||
7469f65fa0 | |||
392fa187c2 | |||
dab1b26569 | |||
03ffce0a75 | |||
9d28f4c0c3 | |||
04b02171bb | |||
e91e3525c4 | |||
c333e9451f | |||
49f1931e95 | |||
ebb8c0d0b0 | |||
2d42f32d71 | |||
fd18458588 | |||
5f15667ad1 | |||
bbdbc971c0 | |||
057b637961 | |||
87eef545e1 | |||
38815b3eaf | |||
4c4613b29b | |||
9d8cb4521d | |||
9dd09ccde1 | |||
86ebb9b840 | |||
569da0ce3a | |||
f73b288a28 | |||
e34c266ee1 | |||
80e15c86dc | |||
f35c742b07 | |||
f4f9e6eba6 | |||
90109e04aa | |||
c36de3393d | |||
7aedfc92fa | |||
9b1d513b03 | |||
836023a1bc | |||
93020a940f | |||
c2b78400f2 | |||
1123047404 | |||
01e2fcf225 | |||
85b9620953 | |||
1532b3ed9b | |||
a581eeba54 | |||
9b0088728f | |||
1995adf838 | |||
5a8408cdb9 | |||
a7ec224b6d | |||
89e86b7778 | |||
56658756e2 | |||
f6fb0623be | |||
4a3322b822 | |||
98537a242a | |||
03414993dc | |||
179372936e | |||
5deb584a03 | |||
c280fd5fb8 | |||
5db1f6b2f7 | |||
f92ac6e52e | |||
758fb2a0e7 | |||
5a28b9409a | |||
1f756c0d4f | |||
439a0c80d6 | |||
fd0fb97204 | |||
884bfa4724 | |||
794a7e880a | |||
de0df70933 | |||
0f868ef107 | |||
061b459abe | |||
8cf55166cb | |||
8de90d8236 | |||
7e196cabc1 | |||
16b4d60151 | |||
33edbefa3b | |||
6a808927d2 |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
gns3server/version.py merge=ours
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -26,6 +26,7 @@ pip-log.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
.coverage
|
||||
.coverage*
|
||||
.tox
|
||||
.cache
|
||||
nosetests.xml
|
||||
|
@ -1,6 +1,5 @@
|
||||
language: python
|
||||
python:
|
||||
- '3.4'
|
||||
- '3.5'
|
||||
sudo: false
|
||||
cache: pip
|
||||
|
169
CHANGELOG
169
CHANGELOG
@ -1,5 +1,142 @@
|
||||
# Change Log
|
||||
|
||||
## 1.5.3 12/01/2016
|
||||
|
||||
* Fix sporadically systemd is unable to start gns3-server
|
||||
|
||||
## 1.5.3 rc1 20/12/2016
|
||||
|
||||
* Support aiohttp 1.2 (but not compatible with previous versions)
|
||||
* Explain that segfault on IOU is a issue with the image
|
||||
* Fix an issue with finding vmrun and vboxmanage
|
||||
* Support named remote servers for VPCS
|
||||
* When checking for a free port check if the host and 0.0.0.0 are available
|
||||
* smm=off is only for 64bits
|
||||
* Fix set hostname on remote server
|
||||
* Fix sending smm option to qemu
|
||||
* Workaround a bug with KVM, Qemu >= 2.4 and Intel CPU
|
||||
* Renable sleep at Vbox exit bug seem to be back
|
||||
* Support large project (> 2GB) during export
|
||||
* Fix Deleting running telnet docker VM shows error in log
|
||||
* Fix when closing a container using VNC, root permission are not reset
|
||||
* Use $PATH also for dynamips and cleanup some $PATH usages
|
||||
* Fix a lock issue with some virtualbox vm
|
||||
* Raise proper error when you try to load an empty qcow2 file
|
||||
* Fix upload form crash
|
||||
* Search bin from the $PATH for sample configuration file
|
||||
* Updated systemd unit file and added sample configuration file
|
||||
|
||||
## 1.5.2 18/08/2016
|
||||
|
||||
* Move utils.vmnet to gns3 namespace
|
||||
* Fix Exporting portable projects with QEMU includes base images even when selecting no.
|
||||
* Catch error when md5sum file is corrupted
|
||||
* requirements.txt : added support for newer aiohttp version
|
||||
* Improve compaction of .gns3project
|
||||
* Fix crash when winpcap is not installed
|
||||
|
||||
## 1.5.1 07/07/2016
|
||||
|
||||
* Increase the number of interface for docker
|
||||
* Add the method in the bad request answer
|
||||
* Fix a rare crash in IOU
|
||||
* Fix a crash when docker is used but not installed
|
||||
* Backport Docker node hot linking
|
||||
* Allows hot-linking for Docker containers. Ref #267.
|
||||
|
||||
## 1.5.0 27/06/2016
|
||||
|
||||
* Fix import of project with no disk
|
||||
* Allow for (a lot) more docker container ports. Fixes #593.
|
||||
* Raise an error if you try to use Docker on non Linux host
|
||||
* Fix a crash in Docker if daemon stop to respond
|
||||
* Fix a crash if Dynamips router has no initial configuration
|
||||
* Kill ghosts process at startup (Dynamips, VPCS, Ubridge)
|
||||
|
||||
## 1.5.0rc2 15/06/2016
|
||||
|
||||
* Fix black screen with Qt app in Docker container
|
||||
* Detect when command in the container exit
|
||||
* Docker when the aux console exit and restart it
|
||||
* Pass by default the environment variable container=docker
|
||||
* Fix busybox binary location
|
||||
* Avoid loosing console port for Docker
|
||||
* Workaround a crash in x11vnc
|
||||
* Delete volume when dropping the container
|
||||
* Catch connection reset in ioucon
|
||||
* Delete vlan.dat for L2IOL during config import. Fixes #1285.
|
||||
* Copy original ressources from VOLUMES
|
||||
|
||||
## 1.5.0rc1 01/06/2016
|
||||
|
||||
* Save an restore docker permission
|
||||
* Export the list of volumes to a env variable accessible in the container
|
||||
* Fix a crash when docker start command is None
|
||||
* Ubridge 0.9.4 is require
|
||||
* Generate a MAC address using the project + node UUID. Ref #522.
|
||||
* Catch extra args in windows signal handler
|
||||
* Allow to block network traffic originating from the host OS for vmnet interfaces (Windows only).
|
||||
* Fix an import error when you have no GNS3 VM
|
||||
* Warn if you can not export a file due to permission issue
|
||||
* Do not delete adapters when stopping a VMware VM. Ref #1066.
|
||||
* Allocate a new vmnet interface if vmnet 0 1 or 8 is set to a custom adapter. Set adapter type to all adapters regardless if already configured or added by GNS3.
|
||||
* Set default VMware VM adapter type to e1000.
|
||||
|
||||
## 1.5.0b1 23/05/2016
|
||||
|
||||
* Allow an IOS router to stop even the Dynamips hypervisor command fail to be sent. Ref #488.
|
||||
* Extract private-config only when necessary (content is different than the default). Fixes #520.
|
||||
* Fixes disabling the VPCS relay feature. Fixes #521.
|
||||
* Fixes wrong exception in Docker VM implementation.
|
||||
* Force Npcap DLL to be used first for Dynamips and uBridge (instead of the one from Winpcap if installed).
|
||||
* Fixed startup-config is lost if you change any IOS router settings. Fixes #1233.
|
||||
* Fixes check for NPF service and add check for NPCAP service on Windows.
|
||||
* Fix ProcessLookupError X11VNC
|
||||
* Force tag latest for docker image if no tag is specified
|
||||
* Cleanup unbreakable space
|
||||
* Do not raise error if vmrun.exe is named vmrun.EXE
|
||||
* Load docker api only for Linux
|
||||
|
||||
## 1.5.0a2 10/05/2016
|
||||
|
||||
* Fix distribution on PyPi
|
||||
|
||||
## 1.5.0a1 10/05/2016
|
||||
|
||||
* Rebase Qcow2 disks when starting a VM if needed
|
||||
* Docker support
|
||||
* import / export portable projects (.gns3project)
|
||||
|
||||
## 1.4.6 28/04/2016
|
||||
|
||||
* More robust save/restore for VirtualBox linked clone VM hard disks.
|
||||
* Prevent non linked cloned hard disks to be detached when using VirtualBox linked cloned VMs. Fixes #1184.
|
||||
* Stricter checks to match VMware version to the right vmrun (VIX library) version. Also checks the VIX library version when only using the GNS3 VM running in VMware.
|
||||
* Allow only .pcap to be downloaded from remote stream API
|
||||
* Fix incrementation of qemu mac address
|
||||
* Clear warnings about using linked clones with VMware Player.
|
||||
* Alternative method to find the Documents folder on Windows.
|
||||
* Add IOU support and install config in /etc
|
||||
|
||||
## 1.4.5 23/03/2016
|
||||
|
||||
* Stop the VMware VM if there is an error while setting up the network connections or console.
|
||||
* Remote install on 14.04 ubuntu
|
||||
* Include VMware VMs paths found preferences.ini
|
||||
* Allow to stop a VMware VM from GNS3 even if halted within the VM. Fixes #1118.
|
||||
* Keep Dynamips stdout log file in the project directory.
|
||||
* Get MAC addresses for host interfaces to use for filtering frames from vmnet interfaces.
|
||||
* Dynamips uuid hypervisor command is no longer supported.
|
||||
* Restart NPF service after adding vmnet adapters on Windows.
|
||||
* Support /etc/gns3/gns3_server.conf for the config
|
||||
* Improve warning if fusion is not installed or in non standard location
|
||||
|
||||
## 1.4.4 23/02/2016
|
||||
* Check if VMware Fusion is correctly installed when retrieving the VM list.
|
||||
|
||||
## 1.4.3 19/02/2016
|
||||
* Nothing! (changes made in the GUI only).
|
||||
|
||||
## 1.4.2 17/02/2016
|
||||
* Fix missing format in IOU export
|
||||
* Fix number of arguments to the UDP errors on VBOX
|
||||
@ -27,7 +164,7 @@
|
||||
* Fix error when setting Qemu VM boot to 'cd' (HDD or CD/DVD-ROM)
|
||||
* Fixed the VMware default VM location on Windows, so that it doesn't assume the "Documents" folder is within the %USERPROFILE% folder, and also support Windows Server's folder (which is "My Virtual Machines" instead of "Virtual Machines").
|
||||
* Improve dynamips startup_config dump
|
||||
* Dump environnement to server debug log
|
||||
* Dump environment to server debug log
|
||||
* Fix usage of qemu 0.10 on Windows
|
||||
* Show hostname when the hostname is missing in the iourc.txt
|
||||
|
||||
@ -79,7 +216,7 @@
|
||||
* Support VM usage for qemu
|
||||
* Raise an error if psutil version is invalid
|
||||
|
||||
## 1.4.0rc1 12/11/2015
|
||||
## 1.4.0rc1 12/11/2015
|
||||
|
||||
* Raise error if server received windows path
|
||||
* Update sentry key
|
||||
@ -219,7 +356,7 @@
|
||||
* Send an error when vmware executable cannot be found on Linux. Fixes #288.
|
||||
* Support for CPUs setting for Qemu VMs.
|
||||
|
||||
## 1.4.0alpha4 04/08/2015
|
||||
## 1.4.0alpha4 04/08/2015
|
||||
|
||||
* Quote command in qemu debug logs so you can copy/paste them
|
||||
* Support for Qemu disk interfaces, cd/dvd-rom image and boot priority. Fixes #278.
|
||||
@ -227,11 +364,11 @@
|
||||
* Catch GeneratorExit exception when trying to create a Ghost IOS image.
|
||||
* Backport: removes code that deletes IOS router instance files.
|
||||
|
||||
## 1.3.9 03/08/2015
|
||||
## 1.3.9 03/08/2015
|
||||
|
||||
* Backport: removes code that deletes IOS router instance files.
|
||||
|
||||
## 1.4.0alpha3 28/07/2015
|
||||
## 1.4.0alpha3 28/07/2015
|
||||
|
||||
* Raise error if qemu image already exist when creating disk
|
||||
* Prevent user to create a qemu to a different directory on non local server
|
||||
@ -251,7 +388,7 @@
|
||||
* Update documentation
|
||||
* API for listing current projects
|
||||
|
||||
## 1.3.8 27/07/2015
|
||||
## 1.3.8 27/07/2015
|
||||
|
||||
* Catch ProcessLookupError when updating iouyap config. Fixes #255.
|
||||
* Fixes IOS adapters and WICS cannot be removed. Fixes #282.
|
||||
@ -268,7 +405,7 @@
|
||||
* Backport from 1.4: Fixes RuntimeError: Event loop is closed.
|
||||
* Backport from 1.4: Bind host on 0.0.0.0 when checking for a free UDP port.
|
||||
|
||||
## 1.4.0alpha2 22/07/2015
|
||||
## 1.4.0alpha2 22/07/2015
|
||||
|
||||
* Deactivate uBridge process monitoring (process returns 1 on Windows when stopping).
|
||||
* Prevent using different hypervisors that leverage hardware virtualization. - Implemented for Qemu when a VMware or VirtualBox VM with hardware virtualization is already running. - Implemented for VirtualBox only when a Qemu VM with KVM is already running.
|
||||
@ -323,15 +460,15 @@
|
||||
* A notification stream with process monitoring
|
||||
* VMware support
|
||||
|
||||
## 1.3.7 22/06/2015
|
||||
## 1.3.7 22/06/2015
|
||||
|
||||
* Prevent install on Python 2
|
||||
|
||||
## 1.3.6 16/06/2015
|
||||
## 1.3.6 16/06/2015
|
||||
|
||||
* Fix an issue with 1.4dev compatibility
|
||||
|
||||
## 1.3.5 16/06/15
|
||||
## 1.3.5 16/06/15
|
||||
|
||||
* Ignore invalid characters when reading the output of a process
|
||||
* Turn on / off authentication
|
||||
@ -491,7 +628,7 @@
|
||||
* Initialize chassis when creating an IOS router. Fixes #107.
|
||||
* Lock the dynamips reader an writer
|
||||
|
||||
## 1.3.0rc1 19/03/2015
|
||||
## 1.3.0rc1 19/03/2015
|
||||
|
||||
* Save IOS router config when saving the project
|
||||
* Look in legacy IOU images directory
|
||||
@ -500,7 +637,7 @@
|
||||
* Support all QEMU status
|
||||
* Bind tunnel UDP to the correct source index
|
||||
|
||||
## 1.3.0beta2 13/03/2015
|
||||
## 1.3.0beta2 13/03/2015
|
||||
|
||||
* Fixed issue when VBoxManage returns an error.
|
||||
* Server handler to shutdown a local server.
|
||||
@ -510,7 +647,7 @@
|
||||
* Alternative local server shutdown (intended for Windows).
|
||||
* Request user permission to kill the local server if it cannot be stopped.
|
||||
|
||||
## 1.3.0beta1 11/03/2015
|
||||
## 1.3.0beta1 11/03/2015
|
||||
|
||||
* Optional IOU license key check.
|
||||
* Relative path support of IOU, IOS and Qemu images.
|
||||
@ -533,7 +670,7 @@
|
||||
* Fixed Telnet server initialization issue in VirtualBox.
|
||||
* Disconnect network cable if adapter is not attached in VirtualBox vNIC.
|
||||
|
||||
## 1.3.0alpha1 03/03/2015
|
||||
## 1.3.0alpha1 03/03/2015
|
||||
|
||||
* HTTP Rest API instead of WebSocket
|
||||
* API documentation
|
||||
@ -548,7 +685,7 @@
|
||||
|
||||
## 1.2.2 2015/01/16
|
||||
|
||||
### Small improvements / new features
|
||||
### Small improvements / new features
|
||||
|
||||
* Auxiliary console support for IOS routers.
|
||||
* Suspend / resume support for Qemu.
|
||||
@ -580,7 +717,7 @@
|
||||
* VirtualBox linked clones support (experimental, still some problems with temporary projects).
|
||||
|
||||
|
||||
## 1.1 2014/10/23
|
||||
## 1.1 2014/10/23
|
||||
|
||||
* Serial console for local VirtualBox.
|
||||
|
||||
|
@ -18,13 +18,17 @@ it on https://github.com/GNS3/gns3-gui we will take care of the triage.
|
||||
|
||||
For bugs specific to the GNS3 VM, please report on https://github.com/GNS3/gns3-vm
|
||||
|
||||
## Asking for new features
|
||||
## Security issues
|
||||
|
||||
For security issues please keep it private and send an email to developers@gns3.net
|
||||
|
||||
## Asking for new features
|
||||
|
||||
The best is to start a discussion on the community website in order to get feedback
|
||||
from the whole community.
|
||||
|
||||
|
||||
## Contributing code
|
||||
## Contributing code
|
||||
|
||||
We welcome code contribution from everyone including beginners.
|
||||
Don't be afraid to submit a half finished or mediocre contribution and we will help you.
|
||||
@ -45,6 +49,6 @@ The reason we do this is to ensure, to the extent possible, that we don’t “t
|
||||
|
||||
More information there: https://github.com/GNS3/cla
|
||||
|
||||
### Pull requests
|
||||
### Pull requests
|
||||
|
||||
Creating a pull request is the easiest way to contribute code. Do not hesitate to create one early when contributing for new feature in order to get our feedback.
|
||||
|
@ -4,6 +4,7 @@ include INSTALL
|
||||
include LICENSE
|
||||
include MANIFEST.in
|
||||
include tox.ini
|
||||
include requirements.txt
|
||||
recursive-include tests *
|
||||
recursive-exclude docs *
|
||||
recursive-include gns3server *
|
||||
|
@ -205,4 +205,4 @@ If you want test coverage:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
py.test --cov=gns3server
|
||||
py.test --cov-report term-missing --cov=gns3server
|
||||
|
61
conf/gns3_server.conf
Normal file
61
conf/gns3_server.conf
Normal file
@ -0,0 +1,61 @@
|
||||
[Server]
|
||||
; IP where the server listen for connections
|
||||
host = 0.0.0.0
|
||||
; HTTP port for controlling the servers
|
||||
port = 3080
|
||||
|
||||
; Option to enable SSL encryption
|
||||
ssl = False
|
||||
certfile=/home/gns3/.config/GNS3/ssl/server.cert
|
||||
certkey=/home/gns3/.config/GNS3/ssl/server.key
|
||||
|
||||
; Path where devices images are stored
|
||||
images_path = /home/gns3/GNS3/images
|
||||
; Path where user projects are stored
|
||||
projects_path = /home/gns3/GNS3/projects
|
||||
|
||||
; Option to automatically send crash reports to the GNS3 team
|
||||
report_errors = True
|
||||
|
||||
; First console port of the range allocated to devices
|
||||
console_start_port_range = 5000
|
||||
; Last console port of the range allocated to devices
|
||||
console_end_port_range = 10000
|
||||
; First port of the range allocated for inter-device communication. Two ports are allocated per link.
|
||||
udp_start_port_range = 10000
|
||||
; Last port of the range allocated for inter-device communication. Two ports are allocated per link
|
||||
udp_start_end_range = 20000
|
||||
; uBridge executable location, default: search in PATH
|
||||
;ubridge_path = ubridge
|
||||
|
||||
; Option to enable HTTP authentication.
|
||||
auth = False
|
||||
; Username for HTTP authentication.
|
||||
user = gns3
|
||||
; Password for HTTP authentication.
|
||||
password = gns3
|
||||
|
||||
[VPCS]
|
||||
; VPCS executable location, default: search in PATH
|
||||
;vpcs_path = vpcs
|
||||
|
||||
[Dynamips]
|
||||
; Enable auxiliary console ports on IOS routers
|
||||
allocate_aux_console_ports = False
|
||||
mmap_support = True
|
||||
; Dynamips executable path, default: search in PATH
|
||||
;dynamips_path = dynamips
|
||||
sparse_memory_support = True
|
||||
ghost_ios_support = True
|
||||
|
||||
[IOU]
|
||||
; iouyap executable path, default: search in PATH
|
||||
;iouyap_path = iouyap
|
||||
; Path of your .iourc file. If not provided, the file is searched in $HOME/.iourc
|
||||
iourc_path = /home/gns3/.iourc
|
||||
; Validate if the iourc license file is correct. If you turn this off and your licence is invalid IOU will not start and no errors will be shown.
|
||||
license_check = True
|
||||
|
||||
[Qemu]
|
||||
; !! Remember to add the gns3 user to the KVM group, otherwise you will not have read / write permssions to /dev/kvm !!
|
||||
enable_kvm = True
|
8
docs/api/v1/docker.rst
Normal file
8
docs/api/v1/docker.rst
Normal file
@ -0,0 +1,8 @@
|
||||
Docker
|
||||
---------------------
|
||||
|
||||
.. toctree::
|
||||
:glob:
|
||||
:maxdepth: 2
|
||||
|
||||
docker/*
|
13
docs/api/v1/docker/dockerimages.rst
Normal file
13
docs/api/v1/docker/dockerimages.rst
Normal file
@ -0,0 +1,13 @@
|
||||
/v1/docker/images
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. contents::
|
||||
|
||||
GET /v1/docker/images
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Get all available Docker images
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **200**: Success
|
||||
|
49
docs/api/v1/docker/projectsprojectiddockerimages.rst
Normal file
49
docs/api/v1/docker/projectsprojectiddockerimages.rst
Normal file
@ -0,0 +1,49 @@
|
||||
/v1/projects/{project_id}/docker/images
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. contents::
|
||||
|
||||
POST /v1/projects/**{project_id}**/docker/images
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Create a new Docker container
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **project_id**: UUID for the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **400**: Invalid request
|
||||
- **201**: Instance created
|
||||
- **409**: Conflict
|
||||
|
||||
Input
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>console</td> <td> </td> <td>string</td> <td>console name</td> </tr>
|
||||
<tr><td>imagename</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>startcmd</td> <td> </td> <td>string</td> <td>Docker CMD entry</td> </tr>
|
||||
<tr><td>vm_id</td> <td> </td> <td></td> <td>Docker VM instance identifier</td> </tr>
|
||||
</table>
|
||||
|
||||
Output
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>cid</td> <td> </td> <td>string</td> <td>Docker container ID</td> </tr>
|
||||
<tr><td>image</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>project_id</td> <td>✔</td> <td>string</td> <td>Project UUID</td> </tr>
|
||||
<tr><td>vm_id</td> <td>✔</td> <td>string</td> <td>Docker container instance UUID</td> </tr>
|
||||
</table>
|
||||
|
20
docs/api/v1/docker/projectsprojectiddockerimagesid.rst
Normal file
20
docs/api/v1/docker/projectsprojectiddockerimagesid.rst
Normal file
@ -0,0 +1,20 @@
|
||||
/v1/projects/{project_id}/docker/images/{id}
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. contents::
|
||||
|
||||
DELETE /v1/projects/**{project_id}**/docker/images/**{id}**
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Delete a Docker container
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **id**: ID for the container
|
||||
- **project_id**: UUID for the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **400**: Invalid request
|
||||
- **404**: Instance doesn't exist
|
||||
- **204**: Instance deleted
|
||||
|
@ -0,0 +1,40 @@
|
||||
/v1/projects/{project_id}/docker/images/{id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. contents::
|
||||
|
||||
POST /v1/projects/**{project_id}**/docker/images/**{id}**/adapters/**{adapter_number:\d+}**/ports/**{port_number:\d+}**/nio
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Add a NIO to a Docker container
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **id**: ID of the container
|
||||
- **adapter_number**: Adapter where the nio should be added
|
||||
- **project_id**: UUID for the project
|
||||
- **port_number**: Port on the adapter
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **400**: Invalid request
|
||||
- **201**: NIO created
|
||||
- **404**: Instance doesn't exist
|
||||
|
||||
|
||||
DELETE /v1/projects/**{project_id}**/docker/images/**{id}**/adapters/**{adapter_number:\d+}**/ports/**{port_number:\d+}**/nio
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Remove a NIO from a Docker container
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **id**: ID of the container
|
||||
- **adapter_number**: Adapter where the nio should be added
|
||||
- **project_id**: UUID for the project
|
||||
- **port_number**: Port on the adapter
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **400**: Invalid request
|
||||
- **404**: Instance doesn't exist
|
||||
- **204**: NIO deleted
|
||||
|
50
docs/api/v1/docker/projectsprojectiddockerimagesidreload.rst
Normal file
50
docs/api/v1/docker/projectsprojectiddockerimagesidreload.rst
Normal file
@ -0,0 +1,50 @@
|
||||
/v1/projects/{project_id}/docker/images/{id}/reload
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. contents::
|
||||
|
||||
POST /v1/projects/**{project_id}**/docker/images/**{id}**/reload
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Restart a Docker container
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **id**: ID of the container
|
||||
- **project_id**: UUID of the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **400**: Invalid request
|
||||
- **404**: Instance doesn't exist
|
||||
- **204**: Instance restarted
|
||||
|
||||
Input
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>console</td> <td> </td> <td>string</td> <td>console name</td> </tr>
|
||||
<tr><td>imagename</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>startcmd</td> <td> </td> <td>string</td> <td>Docker CMD entry</td> </tr>
|
||||
<tr><td>vm_id</td> <td> </td> <td></td> <td>Docker VM instance identifier</td> </tr>
|
||||
</table>
|
||||
|
||||
Output
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>cid</td> <td> </td> <td>string</td> <td>Docker container ID</td> </tr>
|
||||
<tr><td>image</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>project_id</td> <td>✔</td> <td>string</td> <td>Project UUID</td> </tr>
|
||||
<tr><td>vm_id</td> <td>✔</td> <td>string</td> <td>Docker container instance UUID</td> </tr>
|
||||
</table>
|
||||
|
50
docs/api/v1/docker/projectsprojectiddockerimagesidstart.rst
Normal file
50
docs/api/v1/docker/projectsprojectiddockerimagesidstart.rst
Normal file
@ -0,0 +1,50 @@
|
||||
/v1/projects/{project_id}/docker/images/{id}/start
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. contents::
|
||||
|
||||
POST /v1/projects/**{project_id}**/docker/images/**{id}**/start
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Start a Docker container
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **id**: ID of the container
|
||||
- **project_id**: UUID of the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **400**: Invalid request
|
||||
- **404**: Instance doesn't exist
|
||||
- **204**: Instance started
|
||||
|
||||
Input
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>console</td> <td> </td> <td>string</td> <td>console name</td> </tr>
|
||||
<tr><td>imagename</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>startcmd</td> <td> </td> <td>string</td> <td>Docker CMD entry</td> </tr>
|
||||
<tr><td>vm_id</td> <td> </td> <td></td> <td>Docker VM instance identifier</td> </tr>
|
||||
</table>
|
||||
|
||||
Output
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>cid</td> <td> </td> <td>string</td> <td>Docker container ID</td> </tr>
|
||||
<tr><td>image</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>project_id</td> <td>✔</td> <td>string</td> <td>Project UUID</td> </tr>
|
||||
<tr><td>vm_id</td> <td>✔</td> <td>string</td> <td>Docker container instance UUID</td> </tr>
|
||||
</table>
|
||||
|
50
docs/api/v1/docker/projectsprojectiddockerimagesidstop.rst
Normal file
50
docs/api/v1/docker/projectsprojectiddockerimagesidstop.rst
Normal file
@ -0,0 +1,50 @@
|
||||
/v1/projects/{project_id}/docker/images/{id}/stop
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. contents::
|
||||
|
||||
POST /v1/projects/**{project_id}**/docker/images/**{id}**/stop
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Stop a Docker container
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **id**: ID of the container
|
||||
- **project_id**: UUID of the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **400**: Invalid request
|
||||
- **404**: Instance doesn't exist
|
||||
- **204**: Instance stopped
|
||||
|
||||
Input
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>console</td> <td> </td> <td>string</td> <td>console name</td> </tr>
|
||||
<tr><td>imagename</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>startcmd</td> <td> </td> <td>string</td> <td>Docker CMD entry</td> </tr>
|
||||
<tr><td>vm_id</td> <td> </td> <td></td> <td>Docker VM instance identifier</td> </tr>
|
||||
</table>
|
||||
|
||||
Output
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>cid</td> <td> </td> <td>string</td> <td>Docker container ID</td> </tr>
|
||||
<tr><td>image</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>project_id</td> <td>✔</td> <td>string</td> <td>Project UUID</td> </tr>
|
||||
<tr><td>vm_id</td> <td>✔</td> <td>string</td> <td>Docker container instance UUID</td> </tr>
|
||||
</table>
|
||||
|
@ -0,0 +1,50 @@
|
||||
/v1/projects/{project_id}/docker/images/{id}/suspend
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
.. contents::
|
||||
|
||||
POST /v1/projects/**{project_id}**/docker/images/**{id}**/suspend
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Pause a Docker container
|
||||
|
||||
Parameters
|
||||
**********
|
||||
- **id**: ID of the container
|
||||
- **project_id**: UUID of the project
|
||||
|
||||
Response status codes
|
||||
**********************
|
||||
- **400**: Invalid request
|
||||
- **404**: Instance doesn't exist
|
||||
- **204**: Instance paused
|
||||
|
||||
Input
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>console</td> <td> </td> <td>string</td> <td>console name</td> </tr>
|
||||
<tr><td>imagename</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>startcmd</td> <td> </td> <td>string</td> <td>Docker CMD entry</td> </tr>
|
||||
<tr><td>vm_id</td> <td> </td> <td></td> <td>Docker VM instance identifier</td> </tr>
|
||||
</table>
|
||||
|
||||
Output
|
||||
*******
|
||||
.. raw:: html
|
||||
|
||||
<table>
|
||||
<tr> <th>Name</th> <th>Mandatory</th> <th>Type</th> <th>Description</th> </tr>
|
||||
<tr><td>adapter_type</td> <td> </td> <td>string</td> <td>Docker adapter type</td> </tr>
|
||||
<tr><td>adapters</td> <td> </td> <td>integer</td> <td>number of adapters</td> </tr>
|
||||
<tr><td>cid</td> <td> </td> <td>string</td> <td>Docker container ID</td> </tr>
|
||||
<tr><td>image</td> <td> </td> <td>string</td> <td>Docker image name</td> </tr>
|
||||
<tr><td>name</td> <td> </td> <td>string</td> <td>Docker container name</td> </tr>
|
||||
<tr><td>project_id</td> <td>✔</td> <td>string</td> <td>Project UUID</td> </tr>
|
||||
<tr><td>vm_id</td> <td>✔</td> <td>string</td> <td>Docker container instance UUID</td> </tr>
|
||||
</table>
|
||||
|
@ -29,7 +29,7 @@ You can check the server version with a simple curl command:
|
||||
|
||||
.. code-block:: shell-session
|
||||
|
||||
# curl "http://localhost:8000/v1/version"
|
||||
# curl "http://localhost:3080/v1/version"
|
||||
{
|
||||
"version": "1.3.dev1"
|
||||
}
|
||||
@ -39,7 +39,7 @@ The next step is to create a project.
|
||||
|
||||
.. code-block:: shell-session
|
||||
|
||||
# curl -X POST "http://localhost:8000/v1/projects" -d '{"name": "test"}'
|
||||
# curl -X POST "http://localhost:3080/v1/projects" -d '{"name": "test"}'
|
||||
{
|
||||
"project_id": "42f9feee-3217-4104-981e-85d5f0a806ec",
|
||||
"temporary": false,
|
||||
@ -50,7 +50,7 @@ With this project id we can now create two VPCS VM.
|
||||
|
||||
.. code-block:: shell-session
|
||||
|
||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 1"}'
|
||||
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 1"}'
|
||||
{
|
||||
"console": 2000,
|
||||
"name": "VPCS 1",
|
||||
@ -58,7 +58,7 @@ With this project id we can now create two VPCS VM.
|
||||
"vm_id": "24d2e16b-fbef-4259-ae34-7bc21a41ee28"
|
||||
}%
|
||||
|
||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 2"}'
|
||||
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 2"}'
|
||||
{
|
||||
"console": 2001,
|
||||
"name": "VPCS 2",
|
||||
@ -70,12 +70,12 @@ two UDP ports.
|
||||
|
||||
.. code-block:: shell-session
|
||||
|
||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
|
||||
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
|
||||
{
|
||||
"udp_port": 10000
|
||||
}
|
||||
|
||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
|
||||
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
|
||||
{
|
||||
"udp_port": 10001
|
||||
}
|
||||
@ -86,7 +86,7 @@ communication is made by creating two UDP tunnels.
|
||||
|
||||
.. code-block:: shell-session
|
||||
|
||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/adapters/0/ports/0/nio" -d '{"lport": 10000, "rhost": "127.0.0.1", "rport": 10001, "type": "nio_udp"}'
|
||||
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/adapters/0/ports/0/nio" -d '{"lport": 10000, "rhost": "127.0.0.1", "rport": 10001, "type": "nio_udp"}'
|
||||
{
|
||||
"lport": 10000,
|
||||
"rhost": "127.0.0.1",
|
||||
@ -94,7 +94,7 @@ communication is made by creating two UDP tunnels.
|
||||
"type": "nio_udp"
|
||||
}
|
||||
|
||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/adapters/0/ports/0/nio" -d '{"lport": 10001, "rhost": "127.0.0.1", "rport": 10000, "type": "nio_udp"}'
|
||||
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/adapters/0/ports/0/nio" -d '{"lport": 10001, "rhost": "127.0.0.1", "rport": 10000, "type": "nio_udp"}'
|
||||
{
|
||||
"lport": 10001,
|
||||
"rhost": "127.0.0.1",
|
||||
@ -106,15 +106,15 @@ Now we can start the two VM
|
||||
|
||||
.. code-block:: shell-session
|
||||
|
||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/start" -d "{}"
|
||||
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/start" -d '{}'
|
||||
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/start" -d "{}"
|
||||
# curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/start" -d '{}'
|
||||
|
||||
Everything should be started now. You can connect via telnet to the different VM.
|
||||
The port is the field console in the create VM request.
|
||||
|
||||
.. code-block:: shell-session
|
||||
|
||||
# telnet 127.0.0.1 2000
|
||||
# telnet 127.0.0.1 2000
|
||||
Trying 127.0.0.1...
|
||||
Connected to localhost.
|
||||
Escape character is '^]'.
|
||||
@ -140,7 +140,7 @@ The port is the field console in the create VM request.
|
||||
Good-bye
|
||||
Connection closed by foreign host.
|
||||
|
||||
# telnet 127.0.0.1 2001
|
||||
# telnet 127.0.0.1 2001
|
||||
telnet 127.0.0.1 2001
|
||||
Trying 127.0.0.1...
|
||||
Connected to localhost.
|
||||
@ -190,7 +190,7 @@ complexity for the client due to the fact only some command on some VM can be
|
||||
concurrent.
|
||||
|
||||
|
||||
Authentification
|
||||
Authentication
|
||||
-----------------
|
||||
|
||||
In this version of the API you have no authentification system. If you
|
||||
|
@ -75,7 +75,7 @@ class Config(object):
|
||||
# 2: $HOME/.config/GNS3.conf
|
||||
# 3: /etc/xdg/GNS3/gns3_server.conf
|
||||
# 4: /etc/xdg/GNS3.conf
|
||||
# 5: server.conf in the current working directory
|
||||
# 5: gns3_server.conf in the current working directory
|
||||
|
||||
appname = "GNS3"
|
||||
home = os.path.expanduser("~")
|
||||
@ -84,6 +84,7 @@ class Config(object):
|
||||
self._files = [os.path.join(os.getcwd(), filename),
|
||||
os.path.join(home, ".config", appname, filename),
|
||||
os.path.join(home, ".config", appname + ".conf"),
|
||||
os.path.join("/etc/gns3", filename),
|
||||
os.path.join("/etc/xdg", appname, filename),
|
||||
os.path.join("/etc/xdg", appname + ".conf")]
|
||||
|
||||
|
41
gns3server/controller/__init__.py
Normal file
41
gns3server/controller/__init__.py
Normal file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
from ..config import Config
|
||||
|
||||
|
||||
class Controller:
|
||||
"""The controller manage multiple gns3 servers"""
|
||||
|
||||
def isEnabled(self):
|
||||
"""
|
||||
:returns: True if current instance is the controller
|
||||
of our GNS3 infrastructure.
|
||||
"""
|
||||
return Config.instance().get_section_config("Server").getboolean("controller")
|
||||
|
||||
@staticmethod
|
||||
def instance():
|
||||
"""
|
||||
Singleton to return only on instance of Controller.
|
||||
:returns: instance of Controller
|
||||
"""
|
||||
|
||||
if not hasattr(Controller, '_instance') or Controller._instance is None:
|
||||
Controller._instance = Controller()
|
||||
return Controller._instance
|
@ -36,7 +36,7 @@ import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Dev build
|
||||
# Dev build
|
||||
if __version_info__[3] != 0:
|
||||
import faulthandler
|
||||
|
||||
@ -52,7 +52,7 @@ class CrashReport:
|
||||
Report crash to a third party service
|
||||
"""
|
||||
|
||||
DSN = "sync+https://119ddececccd43b69951ac87d4859870:2a982a50bbbb49ddb33c87ef3720026e@app.getsentry.com/38482"
|
||||
DSN = "sync+https://700b0c46edb0473baacd2dc318d8de1f:824bd6d75471494ebcb87ce27cfdeade@sentry.io/38482"
|
||||
if hasattr(sys, "frozen"):
|
||||
cacert = get_resource("cacert.pem")
|
||||
if cacert is not None and os.path.isfile(cacert):
|
||||
|
@ -33,6 +33,8 @@ from gns3server.handlers.upload_handler import UploadHandler
|
||||
from gns3server.handlers.index_handler import IndexHandler
|
||||
|
||||
if sys.platform.startswith("linux") or hasattr(sys, "_called_from_test") or os.environ.get("PYTEST_BUILD_DOCUMENTATION") == "1":
|
||||
from gns3server.handlers.api.docker_handler import DockerHandler
|
||||
|
||||
# IOU runs only on Linux but testsuite work on UNIX platform
|
||||
if not sys.platform.startswith("win"):
|
||||
from gns3server.handlers.api.iou_handler import IOUHandler
|
||||
|
341
gns3server/handlers/api/docker_handler.py
Normal file
341
gns3server/handlers/api/docker_handler.py
Normal file
@ -0,0 +1,341 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
from aiohttp.web import HTTPConflict
|
||||
|
||||
from ...web.route import Route
|
||||
from ...modules.docker import Docker
|
||||
|
||||
from ...schemas.docker import (
|
||||
DOCKER_CREATE_SCHEMA,
|
||||
DOCKER_OBJECT_SCHEMA,
|
||||
DOCKER_UPDATE_SCHEMA,
|
||||
DOCKER_LIST_IMAGES_SCHEMA
|
||||
)
|
||||
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ...schemas.nio import NIO_SCHEMA
|
||||
|
||||
|
||||
class DockerHandler:
|
||||
"""API entry points for Docker."""
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/docker/images",
|
||||
status_codes={
|
||||
200: "Success",
|
||||
},
|
||||
output=DOCKER_LIST_IMAGES_SCHEMA,
|
||||
description="Get all available Docker images")
|
||||
def show(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
images = yield from docker_manager.list_images()
|
||||
response.json(images)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms",
|
||||
parameters={
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
status_codes={
|
||||
201: "Instance created",
|
||||
400: "Invalid request",
|
||||
409: "Conflict"
|
||||
},
|
||||
description="Create a new Docker container",
|
||||
input=DOCKER_CREATE_SCHEMA,
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def create(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = yield from docker_manager.create_vm(
|
||||
request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("vm_id"),
|
||||
image=request.json.pop("image"),
|
||||
start_command=request.json.get("start_command"),
|
||||
environment=request.json.get("environment"),
|
||||
adapters=request.json.get("adapters"),
|
||||
console=request.json.get("console"),
|
||||
console_type=request.json.get("console_type"),
|
||||
console_resolution=request.json.get("console_resolution", "1024x768"),
|
||||
console_http_port=request.json.get("console_http_port", 80),
|
||||
console_http_path=request.json.get("console_http_path", "/"),
|
||||
aux=request.json.get("aux")
|
||||
)
|
||||
for name, value in request.json.items():
|
||||
if name != "_vm_id":
|
||||
if hasattr(container, name) and getattr(container, name) != value:
|
||||
setattr(container, name, value)
|
||||
|
||||
response.set_status(201)
|
||||
response.json(container)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{id}/start",
|
||||
parameters={
|
||||
"project_id": "UUID of the project",
|
||||
"id": "ID of the container"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance started",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Start a Docker container",
|
||||
input=DOCKER_CREATE_SCHEMA,
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def start(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.start()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{id}/stop",
|
||||
parameters={
|
||||
"project_id": "UUID of the project",
|
||||
"id": "ID of the container"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance stopped",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Stop a Docker container",
|
||||
input=DOCKER_CREATE_SCHEMA,
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def stop(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.stop()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{id}/reload",
|
||||
parameters={
|
||||
"project_id": "UUID of the project",
|
||||
"id": "ID of the container"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance restarted",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Restart a Docker container",
|
||||
input=DOCKER_CREATE_SCHEMA,
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def reload(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.restart()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/docker/vms/{id}",
|
||||
parameters={
|
||||
"id": "ID for the container",
|
||||
"project_id": "UUID for the project"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance deleted",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Delete a Docker container")
|
||||
def delete(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.delete()
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{id}/suspend",
|
||||
parameters={
|
||||
"project_id": "UUID of the project",
|
||||
"id": "ID of the container"
|
||||
},
|
||||
status_codes={
|
||||
204: "Instance paused",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Pause a Docker container",
|
||||
input=DOCKER_CREATE_SCHEMA,
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def suspend(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.pause()
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"id": "ID of the container",
|
||||
"adapter_number": "Adapter where the nio should be added",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
status_codes={
|
||||
201: "NIO created",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Add a NIO to a Docker container",
|
||||
input=NIO_SCHEMA,
|
||||
output=NIO_SCHEMA)
|
||||
def create_nio(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["vm_id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = docker_manager.create_nio(int(request.match_info["adapter_number"]), request.json)
|
||||
yield from container.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
||||
@classmethod
|
||||
@Route.delete(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"id": "ID of the container",
|
||||
"adapter_number": "Adapter where the nio should be added",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
status_codes={
|
||||
204: "NIO deleted",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Remove a NIO from a Docker container")
|
||||
def delete_nio(request, response):
|
||||
docker_manager = Docker.instance()
|
||||
container = docker_manager.get_vm(
|
||||
request.match_info["vm_id"],
|
||||
project_id=request.match_info["project_id"])
|
||||
yield from container.adapter_remove_nio_binding(
|
||||
int(request.match_info["adapter_number"]))
|
||||
response.set_status(204)
|
||||
|
||||
@classmethod
|
||||
@Route.put(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance"
|
||||
},
|
||||
status_codes={
|
||||
200: "Instance updated",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist",
|
||||
409: "Conflict"
|
||||
},
|
||||
description="Update a Docker instance",
|
||||
input=DOCKER_UPDATE_SCHEMA,
|
||||
output=DOCKER_OBJECT_SCHEMA)
|
||||
def update(request, response):
|
||||
|
||||
docker_manager = Docker.instance()
|
||||
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm.name = request.json.get("name", vm.name)
|
||||
vm.console = request.json.get("console", vm.console)
|
||||
vm.aux = request.json.get("aux", vm.aux)
|
||||
vm.console_type = request.json.get("console_type", vm.console_type)
|
||||
vm.console_resolution = request.json.get("console_resolution", vm.console_resolution)
|
||||
vm.console_http_port = request.json.get("console_http_port", vm.console_http_port)
|
||||
vm.console_http_path = request.json.get("console_http_path", vm.console_http_path)
|
||||
vm.start_command = request.json.get("start_command", vm.start_command)
|
||||
vm.environment = request.json.get("environment", vm.environment)
|
||||
vm.adapters = request.json.get("adapters", vm.adapters)
|
||||
yield from vm.update()
|
||||
response.json(vm)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/start_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to start a packet capture",
|
||||
"port_number": "Port on the adapter"
|
||||
},
|
||||
status_codes={
|
||||
200: "Capture started",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist",
|
||||
409: "VM not started"
|
||||
},
|
||||
description="Start a packet capture on a IOU VM instance",
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
docker_manager = Docker.instance()
|
||||
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
adapter_number = int(request.match_info["adapter_number"])
|
||||
pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"])
|
||||
|
||||
if not vm.is_running():
|
||||
raise HTTPConflict(text="Cannot capture traffic on a non started VM")
|
||||
yield from vm.start_capture(adapter_number, pcap_file_path)
|
||||
response.json({"pcap_file_path": str(pcap_file_path)})
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/stop_capture",
|
||||
parameters={
|
||||
"project_id": "UUID for the project",
|
||||
"vm_id": "UUID for the instance",
|
||||
"adapter_number": "Adapter to stop a packet capture",
|
||||
"port_number": "Port on the adapter (always 0)"
|
||||
},
|
||||
status_codes={
|
||||
204: "Capture stopped",
|
||||
400: "Invalid request",
|
||||
404: "Instance doesn't exist",
|
||||
409: "VM not started"
|
||||
},
|
||||
description="Stop a packet capture on a IOU VM instance")
|
||||
def stop_capture(request, response):
|
||||
|
||||
docker_manager = Docker.instance()
|
||||
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
if not vm.is_running():
|
||||
raise HTTPConflict(text="Cannot capture traffic on a non started VM")
|
||||
|
||||
adapter_number = int(request.match_info["adapter_number"])
|
||||
yield from vm.stop_capture(adapter_number)
|
||||
response.set_status(204)
|
@ -20,9 +20,9 @@ import asyncio
|
||||
from ...web.route import Route
|
||||
from ...schemas.dynamips_device import DEVICE_CREATE_SCHEMA
|
||||
from ...schemas.dynamips_device import DEVICE_UPDATE_SCHEMA
|
||||
from ...schemas.dynamips_device import DEVICE_CAPTURE_SCHEMA
|
||||
from ...schemas.dynamips_device import DEVICE_OBJECT_SCHEMA
|
||||
from ...schemas.dynamips_device import DEVICE_NIO_SCHEMA
|
||||
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ...modules.dynamips import Dynamips
|
||||
|
||||
|
||||
@ -198,7 +198,7 @@ class DynamipsDeviceHandler:
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Start a packet capture on a Dynamips device instance",
|
||||
input=DEVICE_CAPTURE_SCHEMA)
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
|
@ -24,9 +24,9 @@ from ...schemas.nio import NIO_SCHEMA
|
||||
from ...schemas.vm import VM_LIST_IMAGES_SCHEMA
|
||||
from ...schemas.dynamips_vm import VM_CREATE_SCHEMA
|
||||
from ...schemas.dynamips_vm import VM_UPDATE_SCHEMA
|
||||
from ...schemas.dynamips_vm import VM_CAPTURE_SCHEMA
|
||||
from ...schemas.dynamips_vm import VM_OBJECT_SCHEMA
|
||||
from ...schemas.dynamips_vm import VM_CONFIGS_SCHEMA
|
||||
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ...modules.dynamips import Dynamips
|
||||
from ...modules.dynamips.dynamips_error import DynamipsError
|
||||
from ...modules.project_manager import ProjectManager
|
||||
|
@ -37,6 +37,9 @@ class FileHandler:
|
||||
def read(request, response):
|
||||
response.enable_chunked_encoding()
|
||||
|
||||
if not request.json.get("location").endswith(".pcap"):
|
||||
raise aiohttp.web.HTTPForbidden(text="Only .pcap file are allowed")
|
||||
|
||||
try:
|
||||
with open(request.json.get("location"), "rb") as f:
|
||||
loop = asyncio.get_event_loop()
|
||||
|
@ -24,9 +24,9 @@ from ...schemas.iou import IOU_CREATE_SCHEMA
|
||||
from ...schemas.iou import IOU_START_SCHEMA
|
||||
from ...schemas.iou import IOU_UPDATE_SCHEMA
|
||||
from ...schemas.iou import IOU_OBJECT_SCHEMA
|
||||
from ...schemas.iou import IOU_CAPTURE_SCHEMA
|
||||
from ...schemas.iou import IOU_CONFIGS_SCHEMA
|
||||
from ...schemas.vm import VM_LIST_IMAGES_SCHEMA
|
||||
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ...modules.iou import IOU
|
||||
|
||||
|
||||
@ -65,10 +65,6 @@ class IOUHandler:
|
||||
if name == "private_config_content" and (vm.private_config_content and len(vm.private_config_content) > 0):
|
||||
continue
|
||||
setattr(vm, name, value)
|
||||
if "startup_config_content" in request.json:
|
||||
vm.startup_config = request.json.get("startup_config_content")
|
||||
if "private_config_content" in request.json:
|
||||
vm.private_config = request.json.get("private_config_content")
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
|
||||
@ -112,14 +108,9 @@ class IOUHandler:
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
|
||||
for name, value in request.json.items():
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
setattr(vm, name, value)
|
||||
if "startup_config_content" in request.json:
|
||||
vm.startup_config = request.json.get("startup_config_content")
|
||||
if "private_config_content" in request.json:
|
||||
vm.private_config = request.json.get("private_config_content")
|
||||
response.json(vm)
|
||||
|
||||
@classmethod
|
||||
@ -233,7 +224,7 @@ class IOUHandler:
|
||||
if nio_type not in ("nio_udp", "nio_tap", "nio_generic_ethernet"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = iou_manager.create_nio(vm.iouyap_path, request.json)
|
||||
vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio)
|
||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
||||
@ -256,7 +247,7 @@ class IOUHandler:
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
vm = iou_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]))
|
||||
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]))
|
||||
response.set_status(204)
|
||||
|
||||
@Route.post(
|
||||
@ -274,7 +265,7 @@ class IOUHandler:
|
||||
409: "VM not started"
|
||||
},
|
||||
description="Start a packet capture on a IOU VM instance",
|
||||
input=IOU_CAPTURE_SCHEMA)
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
|
@ -20,6 +20,7 @@ import asyncio
|
||||
import json
|
||||
import os
|
||||
import psutil
|
||||
import tempfile
|
||||
|
||||
from ...web.route import Route
|
||||
from ...schemas.project import PROJECT_OBJECT_SCHEMA, PROJECT_CREATE_SCHEMA, PROJECT_UPDATE_SCHEMA, PROJECT_FILE_LIST_SCHEMA, PROJECT_LIST_SCHEMA
|
||||
@ -56,6 +57,7 @@ class ProjectHandler:
|
||||
description="Create a new project on the server",
|
||||
status_codes={
|
||||
201: "Project created",
|
||||
403: "You are not allowed to modify this property",
|
||||
409: "Project already created"
|
||||
},
|
||||
output=PROJECT_OBJECT_SCHEMA,
|
||||
@ -234,7 +236,7 @@ class ProjectHandler:
|
||||
:returns: hash
|
||||
"""
|
||||
stats = {}
|
||||
# Non blocking call in order to get cpu usage. First call will return 0
|
||||
# Non blocking call in order to get cpu usage. First call will return 0
|
||||
stats["cpu_usage_percent"] = psutil.cpu_percent(interval=None)
|
||||
stats["memory_usage_percent"] = psutil.virtual_memory().percent
|
||||
return {"action": "ping", "event": stats}
|
||||
@ -278,7 +280,7 @@ class ProjectHandler:
|
||||
path = request.match_info["path"]
|
||||
path = os.path.normpath(path)
|
||||
|
||||
# Raise error if user try to escape
|
||||
# Raise error if user try to escape
|
||||
if path[0] == ".":
|
||||
raise aiohttp.web.HTTPForbidden
|
||||
path = os.path.join(project.path, path)
|
||||
@ -301,4 +303,112 @@ class ProjectHandler:
|
||||
except FileNotFoundError:
|
||||
raise aiohttp.web.HTTPNotFound()
|
||||
except PermissionError:
|
||||
raise aiohttp.web.HTTPForbidden()
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/files/{path:.+}",
|
||||
description="Get a file of a project",
|
||||
parameters={
|
||||
"project_id": "The UUID of the project",
|
||||
},
|
||||
raw=True,
|
||||
status_codes={
|
||||
200: "Return the file",
|
||||
403: "Permission denied",
|
||||
404: "The path doesn't exist"
|
||||
})
|
||||
def write_file(request, response):
|
||||
|
||||
pm = ProjectManager.instance()
|
||||
project = pm.get_project(request.match_info["project_id"])
|
||||
path = request.match_info["path"]
|
||||
path = os.path.normpath(path)
|
||||
|
||||
# Raise error if user try to escape
|
||||
if path[0] == ".":
|
||||
raise aiohttp.web.HTTPForbidden
|
||||
path = os.path.join(project.path, path)
|
||||
|
||||
response.set_status(200)
|
||||
|
||||
try:
|
||||
with open(path, 'wb+') as f:
|
||||
while True:
|
||||
packet = yield from request.content.read(512)
|
||||
if not packet:
|
||||
break
|
||||
f.write(packet)
|
||||
|
||||
except FileNotFoundError:
|
||||
raise aiohttp.web.HTTPNotFound()
|
||||
except PermissionError:
|
||||
raise aiohttp.web.HTTPForbidden()
|
||||
|
||||
@classmethod
|
||||
@Route.get(
|
||||
r"/projects/{project_id}/export",
|
||||
description="Export a project as a portable archive",
|
||||
parameters={
|
||||
"project_id": "The UUID of the project",
|
||||
},
|
||||
raw=True,
|
||||
status_codes={
|
||||
200: "Return the file",
|
||||
404: "The project doesn't exist"
|
||||
})
|
||||
def export_project(request, response):
|
||||
|
||||
pm = ProjectManager.instance()
|
||||
project = pm.get_project(request.match_info["project_id"])
|
||||
response.content_type = 'application/gns3project'
|
||||
response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name)
|
||||
response.enable_chunked_encoding()
|
||||
# Very important: do not send a content length otherwise QT close the connection but curl can consume the Feed
|
||||
response.content_length = None
|
||||
response.start(request)
|
||||
|
||||
include_images = bool(int(request.json.get("include_images", "0")))
|
||||
for data in project.export(include_images=include_images):
|
||||
response.write(data)
|
||||
yield from response.drain()
|
||||
|
||||
yield from response.write_eof()
|
||||
|
||||
@classmethod
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/import",
|
||||
description="Import a project from a portable archive",
|
||||
parameters={
|
||||
"project_id": "The UUID of the project",
|
||||
},
|
||||
raw=True,
|
||||
output=PROJECT_OBJECT_SCHEMA,
|
||||
status_codes={
|
||||
200: "Project imported",
|
||||
403: "You are not allowed to modify this property"
|
||||
})
|
||||
def import_project(request, response):
|
||||
|
||||
pm = ProjectManager.instance()
|
||||
project_id = request.match_info["project_id"]
|
||||
project = pm.create_project(project_id=project_id)
|
||||
|
||||
# We write the content to a temporary location
|
||||
# and after extract all. It could be more optimal to stream
|
||||
# this but it's not implemented in Python.
|
||||
#
|
||||
# Spooled mean the file is temporary keep in ram until max_size
|
||||
try:
|
||||
with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
|
||||
while True:
|
||||
packet = yield from request.content.read(512)
|
||||
if not packet:
|
||||
break
|
||||
temp.write(packet)
|
||||
project.import_zip(temp, gns3vm=bool(int(request.GET.get("gns3vm", "1"))))
|
||||
except OSError as e:
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
|
||||
|
||||
response.json(project)
|
||||
response.set_status(201)
|
||||
|
@ -22,8 +22,8 @@ from ...web.route import Route
|
||||
from ...schemas.nio import NIO_SCHEMA
|
||||
from ...schemas.virtualbox import VBOX_CREATE_SCHEMA
|
||||
from ...schemas.virtualbox import VBOX_UPDATE_SCHEMA
|
||||
from ...schemas.virtualbox import VBOX_CAPTURE_SCHEMA
|
||||
from ...schemas.virtualbox import VBOX_OBJECT_SCHEMA
|
||||
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ...modules.virtualbox import VirtualBox
|
||||
from ...modules.project_manager import ProjectManager
|
||||
|
||||
@ -342,7 +342,7 @@ class VirtualBoxHandler:
|
||||
404: "Instance doesn't exist"
|
||||
},
|
||||
description="Start a packet capture on a VirtualBox VM instance",
|
||||
input=VBOX_CAPTURE_SCHEMA)
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
vbox_manager = VirtualBox.instance()
|
||||
|
@ -22,7 +22,7 @@ from ...web.route import Route
|
||||
from ...schemas.vmware import VMWARE_CREATE_SCHEMA
|
||||
from ...schemas.vmware import VMWARE_UPDATE_SCHEMA
|
||||
from ...schemas.vmware import VMWARE_OBJECT_SCHEMA
|
||||
from ...schemas.vmware import VMWARE_CAPTURE_SCHEMA
|
||||
from ...schemas.vm import VM_CAPTURE_SCHEMA
|
||||
from ...schemas.nio import NIO_SCHEMA
|
||||
from ...modules.vmware import VMware
|
||||
from ...modules.project_manager import ProjectManager
|
||||
@ -271,7 +271,7 @@ class VMwareHandler:
|
||||
vmware_manager = VMware.instance()
|
||||
vm = vmware_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_vmnet", "nio_nat"):
|
||||
if nio_type not in ("nio_udp", "nio_vmnet", "nio_nat", "nio_tap"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = vmware_manager.create_nio(None, request.json)
|
||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||
@ -314,7 +314,7 @@ class VMwareHandler:
|
||||
404: "Instance doesn't exist",
|
||||
},
|
||||
description="Start a packet capture on a VMware VM instance",
|
||||
input=VMWARE_CAPTURE_SCHEMA)
|
||||
input=VM_CAPTURE_SCHEMA)
|
||||
def start_capture(request, response):
|
||||
|
||||
vmware_manager = VMware.instance()
|
||||
|
@ -54,7 +54,8 @@ class UploadHandler:
|
||||
@Route.post(
|
||||
r"/upload",
|
||||
description="Manage upload of GNS3 images",
|
||||
api_version=None
|
||||
api_version=None,
|
||||
raw=True
|
||||
)
|
||||
def upload(request, response):
|
||||
data = yield from request.post()
|
||||
|
@ -27,6 +27,9 @@ MODULES = [VPCS, VirtualBox, Dynamips, Qemu, VMware]
|
||||
|
||||
if sys.platform.startswith("linux") or hasattr(sys, "_called_from_test") or os.environ.get("PYTEST_BUILD_DOCUMENTATION") == "1":
|
||||
|
||||
from .docker import Docker
|
||||
MODULES.append(Docker)
|
||||
|
||||
# IOU runs only on Linux but testsuite work on UNIX platform
|
||||
if not sys.platform.startswith("win"):
|
||||
from .iou import IOU
|
||||
|
@ -27,6 +27,7 @@ class EthernetAdapter(Adapter):
|
||||
def __init__(self, interfaces=1):
|
||||
|
||||
super().__init__(interfaces)
|
||||
self.host_ifc = None
|
||||
|
||||
def __str__(self):
|
||||
|
||||
|
@ -423,7 +423,7 @@ class BaseManager:
|
||||
|
||||
return force_unix_path(path)
|
||||
else:
|
||||
# For non local server we disallow using absolute path outside image directory
|
||||
# For non local server we disallow using absolute path outside image directory
|
||||
if Config.instance().get_section_config("Server").get("local", False) is False:
|
||||
img_directory = self.config.get_section_config("Server").get("images_path", os.path.expanduser("~/GNS3/images"))
|
||||
img_directory = force_unix_path(img_directory)
|
||||
@ -486,14 +486,17 @@ class BaseManager:
|
||||
log.info("Writting image file %s", path)
|
||||
try:
|
||||
remove_checksum(path)
|
||||
# We store the file under his final name only when the upload is finished
|
||||
tmp_path = path + ".tmp"
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
with open(path, 'wb+') as f:
|
||||
with open(tmp_path, 'wb+') as f:
|
||||
while True:
|
||||
packet = yield from stream.read(512)
|
||||
if not packet:
|
||||
break
|
||||
f.write(packet)
|
||||
os.chmod(path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
|
||||
os.chmod(tmp_path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
|
||||
shutil.move(tmp_path, path)
|
||||
md5sum(path)
|
||||
except OSError as e:
|
||||
raise aiohttp.web.HTTPConflict(text="Could not write image: {} because {}".format(filename, e))
|
||||
|
@ -16,6 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import stat
|
||||
import logging
|
||||
import aiohttp
|
||||
import shutil
|
||||
@ -24,7 +25,7 @@ import tempfile
|
||||
import psutil
|
||||
import platform
|
||||
|
||||
from pkg_resources import parse_version
|
||||
from gns3server.utils import parse_version
|
||||
from ..utils.asyncio import wait_run_in_executor
|
||||
from ..ubridge.hypervisor import Hypervisor
|
||||
from .vm_error import VMError
|
||||
@ -43,9 +44,11 @@ class BaseVM:
|
||||
:param project: Project instance
|
||||
:param manager: parent VM Manager
|
||||
:param console: TCP console port
|
||||
:param aux: TCP aux console port
|
||||
:param allocate_aux: Boolean if true will allocate an aux console port
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, console=None, console_type="telnet"):
|
||||
def __init__(self, name, vm_id, project, manager, console=None, console_type="telnet", aux=None, allocate_aux=False):
|
||||
|
||||
self._name = name
|
||||
self._usage = ""
|
||||
@ -53,25 +56,36 @@ class BaseVM:
|
||||
self._project = project
|
||||
self._manager = manager
|
||||
self._console = console
|
||||
self._aux = aux
|
||||
self._console_type = console_type
|
||||
self._temporary_directory = None
|
||||
self._hw_virtualization = False
|
||||
self._ubridge_hypervisor = None
|
||||
self._closed = False
|
||||
self._vm_status = "stopped"
|
||||
self._command_line = ""
|
||||
self._allocate_aux = allocate_aux
|
||||
|
||||
if self._console is not None:
|
||||
if console_type == "vnc":
|
||||
self._console = self._manager.port_manager.reserve_tcp_port(self._console, self._project, port_range_start=5900, port_range_end=6000)
|
||||
else:
|
||||
self._console = self._manager.port_manager.reserve_tcp_port(self._console, self._project)
|
||||
else:
|
||||
|
||||
# We need to allocate aux before giving a random console port
|
||||
if self._aux is not None:
|
||||
self._aux = self._manager.port_manager.reserve_tcp_port(self._aux, self._project)
|
||||
|
||||
if self._console is None:
|
||||
if console_type == "vnc":
|
||||
# VNC is a special case and the range must be 5900-6000
|
||||
self._console = self._manager.port_manager.get_free_tcp_port(self._project, port_range_start=5900, port_range_end=6000)
|
||||
else:
|
||||
self._console = self._manager.port_manager.get_free_tcp_port(self._project)
|
||||
|
||||
if self._aux is None and allocate_aux:
|
||||
self._aux = self._manager.port_manager.get_free_tcp_port(self._project)
|
||||
|
||||
log.debug("{module}: {name} [{id}] initialized. Console port {console}".format(module=self.manager.module_name,
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
@ -211,11 +225,13 @@ class BaseVM:
|
||||
"""
|
||||
Delete the VM (including all its files).
|
||||
"""
|
||||
def set_rw(operation, name, exc):
|
||||
os.chmod(name, stat.S_IWRITE)
|
||||
|
||||
directory = self.project.vm_working_directory(self)
|
||||
if os.path.exists(directory):
|
||||
try:
|
||||
yield from wait_run_in_executor(shutil.rmtree, directory)
|
||||
yield from wait_run_in_executor(shutil.rmtree, directory, onerror=set_rw)
|
||||
except OSError as e:
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not delete the VM working directory: {}".format(e))
|
||||
|
||||
@ -233,12 +249,75 @@ class BaseVM:
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
@asyncio.coroutine
|
||||
def close(self):
|
||||
"""
|
||||
Close the VM process.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
if self._closed:
|
||||
return False
|
||||
|
||||
log.info("{module}: '{name}' [{id}]: is closing".format(
|
||||
module=self.manager.module_name,
|
||||
name=self.name,
|
||||
id=self.id))
|
||||
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
self._console = None
|
||||
|
||||
if self._aux:
|
||||
self._manager.port_manager.release_tcp_port(self._aux, self._project)
|
||||
self._aux = None
|
||||
|
||||
self._closed = True
|
||||
return True
|
||||
|
||||
@property
|
||||
def allocate_aux(self):
|
||||
"""
|
||||
:returns: Boolean allocate or not an aux console
|
||||
"""
|
||||
return self._allocate_aux
|
||||
|
||||
@allocate_aux.setter
|
||||
def allocate_aux(self, allocate_aux):
|
||||
"""
|
||||
:returns: Boolean allocate or not an aux console
|
||||
"""
|
||||
self._allocate_aux = allocate_aux
|
||||
|
||||
@property
|
||||
def aux(self):
|
||||
"""
|
||||
Returns the aux console port of this VM.
|
||||
|
||||
:returns: aux console port
|
||||
"""
|
||||
|
||||
return self._aux
|
||||
|
||||
@aux.setter
|
||||
def aux(self, aux):
|
||||
"""
|
||||
Changes the aux port
|
||||
|
||||
:params aux: Console port (integer) or None to free the port
|
||||
"""
|
||||
|
||||
if aux == self._aux:
|
||||
return
|
||||
|
||||
if self._aux:
|
||||
self._manager.port_manager.release_tcp_port(self._aux, self._project)
|
||||
self._aux = None
|
||||
if aux is not None:
|
||||
self._aux = self._manager.port_manager.reserve_tcp_port(aux, self._project)
|
||||
log.info("{module}: '{name}' [{id}]: aux port set to {port}".format(module=self.manager.module_name,
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
port=aux))
|
||||
|
||||
@property
|
||||
def console(self):
|
||||
@ -255,22 +334,28 @@ class BaseVM:
|
||||
"""
|
||||
Changes the console port
|
||||
|
||||
:params console: Console port (integer)
|
||||
:params console: Console port (integer) or None to free the port
|
||||
"""
|
||||
|
||||
if console == self._console:
|
||||
return
|
||||
|
||||
if self._console_type == "vnc" and console < 5900:
|
||||
raise VMError("VNC console require a port superior or equal to 5900")
|
||||
if self._console_type == "vnc" and console is not None and console < 5900:
|
||||
raise VMError("VNC console require a port superior or equal to 5900 currently it's {}".format(console))
|
||||
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project)
|
||||
log.info("{module}: '{name}' [{id}]: console port set to {port}".format(module=self.manager.module_name,
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
port=console))
|
||||
self._console = None
|
||||
if console is not None:
|
||||
if self.console_type == "vnc":
|
||||
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project, port_range_start=5900, port_range_end=6000)
|
||||
else:
|
||||
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project)
|
||||
|
||||
log.info("{module}: '{name}' [{id}]: console port set to {port}".format(module=self.manager.module_name,
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
port=console))
|
||||
|
||||
@property
|
||||
def console_type(self):
|
||||
@ -318,8 +403,10 @@ class BaseVM:
|
||||
"""
|
||||
|
||||
path = self._manager.config.get_section_config("Server").get("ubridge_path", "ubridge")
|
||||
if path == "ubridge":
|
||||
path = shutil.which("ubridge")
|
||||
path = shutil.which(path)
|
||||
|
||||
if path is None or len(path) == 0:
|
||||
raise VMError("uBridge is not installed or uBridge path is invalid")
|
||||
return path
|
||||
|
||||
@asyncio.coroutine
|
||||
@ -339,8 +426,6 @@ class BaseVM:
|
||||
yield from self._ubridge_hypervisor.start()
|
||||
log.info("Hypervisor {}:{} has successfully started".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
|
||||
yield from self._ubridge_hypervisor.connect()
|
||||
if parse_version(self._ubridge_hypervisor.version) < parse_version('0.9.2'):
|
||||
raise VMError("uBridge version must be >= 0.9.2, detected version is {}".format(self._ubridge_hypervisor.version))
|
||||
|
||||
@property
|
||||
def hw_virtualization(self):
|
||||
|
163
gns3server/modules/docker/__init__.py
Normal file
163
gns3server/modules/docker/__init__.py
Normal file
@ -0,0 +1,163 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Docker server module.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import aiohttp
|
||||
import urllib
|
||||
import json
|
||||
import sys
|
||||
from gns3server.utils import parse_version
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
from ..base_manager import BaseManager
|
||||
from .docker_vm import DockerVM
|
||||
from .docker_error import *
|
||||
|
||||
DOCKER_MINIMUM_API_VERSION = "1.21"
|
||||
|
||||
|
||||
class Docker(BaseManager):
|
||||
|
||||
_VM_CLASS = DockerVM
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._server_url = '/var/run/docker.sock'
|
||||
self._connected = False
|
||||
# Allow locking during ubridge operations
|
||||
self.ubridge_lock = asyncio.Lock()
|
||||
|
||||
@asyncio.coroutine
|
||||
def connector(self):
|
||||
if not self._connected or self._connector.closed:
|
||||
if not sys.platform.startswith("linux"):
|
||||
raise DockerError("Docker is supported only on Linux")
|
||||
|
||||
try:
|
||||
self._connector = aiohttp.connector.UnixConnector(self._server_url, conn_timeout=2)
|
||||
self._connected = True
|
||||
version = yield from self.query("GET", "version")
|
||||
except (aiohttp.errors.ClientOSError, FileNotFoundError):
|
||||
self._connected = False
|
||||
raise DockerError("Can't connect to docker daemon")
|
||||
|
||||
if parse_version(version["ApiVersion"]) < parse_version(DOCKER_MINIMUM_API_VERSION):
|
||||
raise DockerError("Docker API version is {}. GNS3 requires a minimum API version of {}".format(version["ApiVersion"], DOCKER_MINIMUM_API_VERSION))
|
||||
return self._connector
|
||||
|
||||
@asyncio.coroutine
|
||||
def unload(self):
|
||||
yield from super().unload()
|
||||
if self._connected:
|
||||
self._connector.close()
|
||||
|
||||
@asyncio.coroutine
|
||||
def query(self, method, path, data={}, params={}):
|
||||
"""
|
||||
Make a query to the docker daemon and decode the request
|
||||
|
||||
:param method: HTTP method
|
||||
:param path: Endpoint in API
|
||||
:param data: Dictionnary with the body. Will be transformed to a JSON
|
||||
:param params: Parameters added as a query arg
|
||||
"""
|
||||
|
||||
response = yield from self.http_query(method, path, data=data, params=params)
|
||||
body = yield from response.read()
|
||||
if len(body):
|
||||
if response.headers['CONTENT-TYPE'] == 'application/json':
|
||||
body = json.loads(body.decode("utf-8"))
|
||||
else:
|
||||
body = body.decode("utf-8")
|
||||
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
|
||||
return body
|
||||
|
||||
@asyncio.coroutine
|
||||
def http_query(self, method, path, data={}, params={}):
|
||||
"""
|
||||
Make a query to the docker daemon
|
||||
|
||||
:param method: HTTP method
|
||||
:param path: Endpoint in API
|
||||
:param data: Dictionnary with the body. Will be transformed to a JSON
|
||||
:param params: Parameters added as a query arg
|
||||
:returns: HTTP response
|
||||
"""
|
||||
data = json.dumps(data)
|
||||
url = "http://docker/" + path
|
||||
try:
|
||||
response = yield from aiohttp.request(
|
||||
method,
|
||||
url,
|
||||
connector=(yield from self.connector()),
|
||||
params=params,
|
||||
data=data,
|
||||
headers={"content-type": "application/json", },
|
||||
)
|
||||
except (aiohttp.ClientResponseError, aiohttp.ClientOSError) as e:
|
||||
raise DockerError("Docker has returned an error: {}".format(str(e)))
|
||||
if response.status >= 300:
|
||||
body = yield from response.read()
|
||||
try:
|
||||
body = json.loads(body.decode("utf-8"))["message"]
|
||||
except ValueError:
|
||||
pass
|
||||
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
|
||||
if response.status == 304:
|
||||
raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body))
|
||||
elif response.status == 404:
|
||||
raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body))
|
||||
else:
|
||||
raise DockerError("Docker has returned an error: {} {}".format(response.status, body))
|
||||
return response
|
||||
|
||||
@asyncio.coroutine
|
||||
def websocket_query(self, path, params={}):
|
||||
"""
|
||||
Open a websocket connection
|
||||
|
||||
:param path: Endpoint in API
|
||||
:param params: Parameters added as a query arg
|
||||
:returns: Websocket
|
||||
"""
|
||||
|
||||
url = "http://docker/" + path
|
||||
connection = yield from aiohttp.ws_connect(url,
|
||||
connector=(yield from self.connector()),
|
||||
origin="http://docker",
|
||||
autoping=True)
|
||||
return connection
|
||||
|
||||
@asyncio.coroutine
|
||||
def list_images(self):
|
||||
"""Gets Docker image list.
|
||||
|
||||
:returns: list of dicts
|
||||
:rtype: list
|
||||
"""
|
||||
images = []
|
||||
for image in (yield from self.query("GET", "images/json", params={"all": 0})):
|
||||
for tag in image['RepoTags']:
|
||||
if tag != "<none>:<none>":
|
||||
images.append({'image': tag})
|
||||
return sorted(images, key=lambda i: i['image'])
|
@ -15,14 +15,20 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import aiohttp.web
|
||||
import logging
|
||||
"""
|
||||
Custom exceptions for the Docker module.
|
||||
"""
|
||||
|
||||
from ..vm_error import VMError
|
||||
|
||||
|
||||
class RequestHandler(aiohttp.web.RequestHandler):
|
||||
class DockerError(VMError):
|
||||
pass
|
||||
|
||||
def log_access(self, message, environ, response, time):
|
||||
|
||||
# In debug mode we don't use the standard request log but a more complete in response.py
|
||||
if self.logger.getEffectiveLevel() != logging.DEBUG:
|
||||
super().log_access(message, environ, response, time)
|
||||
class DockerHttp304Error(DockerError):
|
||||
pass
|
||||
|
||||
|
||||
class DockerHttp404Error(DockerError):
|
||||
pass
|
915
gns3server/modules/docker/docker_vm.py
Normal file
915
gns3server/modules/docker/docker_vm.py
Normal file
@ -0,0 +1,915 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Docker container instance.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import shutil
|
||||
import psutil
|
||||
import shlex
|
||||
import aiohttp
|
||||
import json
|
||||
import os
|
||||
|
||||
from .docker_error import *
|
||||
from ..base_vm import BaseVM
|
||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
from ..nios.nio_tap import NIOTAP
|
||||
from ...utils.asyncio.telnet_server import AsyncioTelnetServer
|
||||
from ...utils.asyncio.raw_command_server import AsyncioRawCommandServer
|
||||
from ...utils.asyncio import wait_for_file_creation
|
||||
from ...utils.get_resource import get_resource
|
||||
from ...ubridge.ubridge_error import UbridgeError, UbridgeNamespaceError
|
||||
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerVM(BaseVM):
|
||||
"""Docker container implementation.
|
||||
|
||||
:param name: Docker container name
|
||||
:param vm_id: Docker VM identifier
|
||||
:param project: Project instance
|
||||
:param manager: Manager instance
|
||||
:param image: Docker image
|
||||
:param console: TCP console port
|
||||
:param console_type: Console type
|
||||
:param aux: TCP aux console port
|
||||
:param console_resolution: Resolution of the VNC display
|
||||
:param console_http_port: Port to redirect HTTP queries
|
||||
:param console_http_path: Url part with the path of the web interface
|
||||
"""
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, image,
|
||||
console=None, aux=None, start_command=None,
|
||||
adapters=None, environment=None, console_type="telnet",
|
||||
console_resolution="1024x768", console_http_port=80, console_http_path="/"):
|
||||
super().__init__(name, vm_id, project, manager, console=console, aux=aux, allocate_aux=True, console_type=console_type)
|
||||
|
||||
# If no version is specified force latest
|
||||
if ":" not in image:
|
||||
image = "{}:latest".format(image)
|
||||
self._image = image
|
||||
self._start_command = start_command
|
||||
self._environment = environment
|
||||
self._cid = None
|
||||
self._ethernet_adapters = []
|
||||
self._ubridge_hypervisor = None
|
||||
self._temporary_directory = None
|
||||
self._telnet_servers = []
|
||||
self._x11vnc_process = None
|
||||
self._console_resolution = console_resolution
|
||||
self._console_http_path = console_http_path
|
||||
self._console_http_port = console_http_port
|
||||
self._console_websocket = None
|
||||
self._volumes = []
|
||||
|
||||
if adapters is None:
|
||||
self.adapters = 1
|
||||
else:
|
||||
self.adapters = adapters
|
||||
|
||||
log.debug(
|
||||
"{module}: {name} [{image}] initialized.".format(
|
||||
module=self.manager.module_name,
|
||||
name=self.name,
|
||||
image=self._image))
|
||||
|
||||
def __json__(self):
|
||||
return {
|
||||
"name": self._name,
|
||||
"vm_id": self._id,
|
||||
"container_id": self._cid,
|
||||
"project_id": self._project.id,
|
||||
"image": self._image,
|
||||
"adapters": self.adapters,
|
||||
"console": self.console,
|
||||
"console_type": self.console_type,
|
||||
"console_resolution": self.console_resolution,
|
||||
"console_http_port": self.console_http_port,
|
||||
"console_http_path": self.console_http_path,
|
||||
"aux": self.aux,
|
||||
"start_command": self.start_command,
|
||||
"environment": self.environment,
|
||||
"vm_directory": self.working_dir
|
||||
}
|
||||
|
||||
def _get_free_display_port(self):
|
||||
"""
|
||||
Search a free display port
|
||||
"""
|
||||
display = 100
|
||||
if not os.path.exists("/tmp/.X11-unix/"):
|
||||
return display
|
||||
while True:
|
||||
if not os.path.exists("/tmp/.X11-unix/X{}".format(display)):
|
||||
return display
|
||||
display += 1
|
||||
|
||||
@property
|
||||
def start_command(self):
|
||||
return self._start_command
|
||||
|
||||
@start_command.setter
|
||||
def start_command(self, command):
|
||||
if command:
|
||||
command = command.strip()
|
||||
if command is None or len(command) == 0:
|
||||
self._start_command = None
|
||||
else:
|
||||
self._start_command = command
|
||||
|
||||
@property
|
||||
def console_resolution(self):
|
||||
return self._console_resolution
|
||||
|
||||
@console_resolution.setter
|
||||
def console_resolution(self, resolution):
|
||||
self._console_resolution = resolution
|
||||
|
||||
@property
|
||||
def console_http_path(self):
|
||||
return self._console_http_path
|
||||
|
||||
@console_http_path.setter
|
||||
def console_http_path(self, path):
|
||||
self._console_http_path = path
|
||||
|
||||
@property
|
||||
def console_http_port(self):
|
||||
return self._console_http_port
|
||||
|
||||
@console_http_port.setter
|
||||
def console_http_port(self, port):
|
||||
self._console_http_port = port
|
||||
|
||||
@property
|
||||
def environment(self):
|
||||
return self._environment
|
||||
|
||||
@environment.setter
|
||||
def environment(self, command):
|
||||
self._environment = command
|
||||
|
||||
@asyncio.coroutine
|
||||
def _get_container_state(self):
|
||||
"""Returns the container state (e.g. running, paused etc.)
|
||||
|
||||
:returns: state
|
||||
:rtype: str
|
||||
"""
|
||||
result = yield from self.manager.query("GET", "containers/{}/json".format(self._cid))
|
||||
|
||||
if result["State"]["Paused"]:
|
||||
return "paused"
|
||||
if result["State"]["Running"]:
|
||||
return "running"
|
||||
return "exited"
|
||||
|
||||
@asyncio.coroutine
|
||||
def _get_image_informations(self):
|
||||
"""
|
||||
:returns: Dictionnary informations about the container image
|
||||
"""
|
||||
result = yield from self.manager.query("GET", "images/{}/json".format(self._image))
|
||||
return result
|
||||
|
||||
def _mount_binds(self, image_infos):
|
||||
"""
|
||||
:returns: Return the path that we need to map to local folders
|
||||
"""
|
||||
binds = []
|
||||
|
||||
binds.append("{}:/gns3:ro".format(get_resource("modules/docker/resources")))
|
||||
|
||||
# We mount our own etc/network
|
||||
network_config = self._create_network_config()
|
||||
binds.append("{}:/gns3volumes/etc/network:rw".format(network_config))
|
||||
|
||||
self._volumes = ["/etc/network"]
|
||||
|
||||
volumes = image_infos.get("ContainerConfig", {}).get("Volumes")
|
||||
if volumes is None:
|
||||
return binds
|
||||
for volume in volumes.keys():
|
||||
source = os.path.join(self.working_dir, os.path.relpath(volume, "/"))
|
||||
os.makedirs(source, exist_ok=True)
|
||||
binds.append("{}:/gns3volumes{}".format(source, volume))
|
||||
self._volumes.append(volume)
|
||||
|
||||
return binds
|
||||
|
||||
def _create_network_config(self):
|
||||
"""
|
||||
If network config is empty we create a sample config
|
||||
"""
|
||||
path = os.path.join(self.working_dir, "etc", "network")
|
||||
os.makedirs(path, exist_ok=True)
|
||||
os.makedirs(os.path.join(path, "if-up.d"), exist_ok=True)
|
||||
os.makedirs(os.path.join(path, "if-down.d"), exist_ok=True)
|
||||
os.makedirs(os.path.join(path, "if-pre-up.d"), exist_ok=True)
|
||||
os.makedirs(os.path.join(path, "if-post-down.d"), exist_ok=True)
|
||||
|
||||
if not os.path.exists(os.path.join(path, "interfaces")):
|
||||
with open(os.path.join(path, "interfaces"), "w+") as f:
|
||||
f.write("""#
|
||||
# This is a sample network config uncomment lines to configure the network
|
||||
#
|
||||
|
||||
""")
|
||||
for adapter in range(0, self.adapters):
|
||||
f.write("""
|
||||
# Static config for eth{adapter}
|
||||
#auto eth{adapter}
|
||||
#iface eth{adapter} inet static
|
||||
#\taddress 192.168.{adapter}.2
|
||||
#\tnetmask 255.255.255.0
|
||||
#\tgateway 192.168.{adapter}.1
|
||||
#\tup echo nameserver 192.168.{adapter}.1 > /etc/resolv.conf
|
||||
|
||||
# DHCP config for eth{adapter}
|
||||
# auto eth{adapter}
|
||||
# iface eth{adapter} inet dhcp""".format(adapter=adapter))
|
||||
return path
|
||||
|
||||
@asyncio.coroutine
|
||||
def create(self):
|
||||
"""Creates the Docker container."""
|
||||
|
||||
try:
|
||||
image_infos = yield from self._get_image_informations()
|
||||
except DockerHttp404Error:
|
||||
log.info("Image %s is missing pulling it from docker hub", self._image)
|
||||
yield from self.pull_image(self._image)
|
||||
image_infos = yield from self._get_image_informations()
|
||||
|
||||
params = {
|
||||
"Hostname": self._name,
|
||||
"Name": self._name,
|
||||
"Image": self._image,
|
||||
"NetworkDisabled": True,
|
||||
"Tty": True,
|
||||
"OpenStdin": True,
|
||||
"StdinOnce": False,
|
||||
"HostConfig": {
|
||||
"CapAdd": ["ALL"],
|
||||
"Privileged": True,
|
||||
"Binds": self._mount_binds(image_infos)
|
||||
},
|
||||
"Volumes": {},
|
||||
"Env": ["container=docker"], # Systemd compliant: https://github.com/GNS3/gns3-server/issues/573
|
||||
"Cmd": [],
|
||||
"Entrypoint": image_infos.get("Config", {"Entrypoint": []})["Entrypoint"]
|
||||
}
|
||||
|
||||
if params["Entrypoint"] is None:
|
||||
params["Entrypoint"] = []
|
||||
if self._start_command:
|
||||
params["Cmd"] = shlex.split(self._start_command)
|
||||
if len(params["Cmd"]) == 0:
|
||||
params["Cmd"] = image_infos.get("Config", {"Cmd": []})["Cmd"]
|
||||
if params["Cmd"] is None:
|
||||
params["Cmd"] = []
|
||||
if len(params["Cmd"]) == 0 and len(params["Entrypoint"]) == 0:
|
||||
params["Cmd"] = ["/bin/sh"]
|
||||
params["Entrypoint"].insert(0, "/gns3/init.sh") # FIXME /gns3/init.sh is not found?
|
||||
|
||||
# Give the information to the container on how many interface should be inside
|
||||
params["Env"].append("GNS3_MAX_ETHERNET=eth{}".format(self.adapters - 1))
|
||||
# Give the information to the container the list of volume path mounted
|
||||
params["Env"].append("GNS3_VOLUMES={}".format(":".join(self._volumes)))
|
||||
|
||||
if self._environment:
|
||||
params["Env"] += [e.strip() for e in self._environment.split("\n")]
|
||||
|
||||
if self._console_type == "vnc":
|
||||
yield from self._start_vnc()
|
||||
params["Env"].append("QT_GRAPHICSSYSTEM=native") # To fix a Qt issue: https://github.com/GNS3/gns3-server/issues/556
|
||||
params["Env"].append("DISPLAY=:{}".format(self._display))
|
||||
params["HostConfig"]["Binds"].append("/tmp/.X11-unix/:/tmp/.X11-unix/")
|
||||
|
||||
result = yield from self.manager.query("POST", "containers/create", data=params)
|
||||
self._cid = result['Id']
|
||||
log.info("Docker container '{name}' [{id}] created".format(
|
||||
name=self._name, id=self._id))
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def update(self):
|
||||
"""
|
||||
Destroy an recreate the container with the new settings
|
||||
"""
|
||||
# We need to save the console and state and restore it
|
||||
console = self.console
|
||||
aux = self.aux
|
||||
state = yield from self._get_container_state()
|
||||
|
||||
yield from self.reset()
|
||||
yield from self.create()
|
||||
self.console = console
|
||||
self.aux = aux
|
||||
if state == "running":
|
||||
yield from self.start()
|
||||
|
||||
@asyncio.coroutine
|
||||
def start(self):
|
||||
"""Starts this Docker container."""
|
||||
|
||||
state = yield from self._get_container_state()
|
||||
if state == "paused":
|
||||
yield from self.unpause()
|
||||
else:
|
||||
yield from self._clean_servers()
|
||||
|
||||
result = yield from self.manager.query("POST", "containers/{}/start".format(self._cid))
|
||||
|
||||
namespace = yield from self._get_namespace()
|
||||
|
||||
yield from self._start_ubridge()
|
||||
|
||||
for adapter_number in range(0, self.adapters):
|
||||
nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
||||
with (yield from self.manager.ubridge_lock):
|
||||
try:
|
||||
yield from self._add_ubridge_connection(nio, adapter_number, namespace)
|
||||
except UbridgeNamespaceError:
|
||||
yield from self.stop()
|
||||
|
||||
# The container can crash soon after the start this mean we can not move the interface to the container namespace
|
||||
logdata = yield from self._get_log()
|
||||
for line in logdata.split('\n'):
|
||||
log.error(line)
|
||||
raise DockerError(logdata)
|
||||
|
||||
if self.console_type == "telnet":
|
||||
yield from self._start_console()
|
||||
elif self.console_type == "http" or self.console_type == "https":
|
||||
yield from self._start_http()
|
||||
|
||||
if self.allocate_aux:
|
||||
yield from self._start_aux()
|
||||
|
||||
self.status = "started"
|
||||
log.info("Docker container '{name}' [{image}] started listen for {console_type} on {console}".format(name=self._name, image=self._image, console=self.console, console_type=self.console_type))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _start_aux(self):
|
||||
"""
|
||||
Start an auxilary console
|
||||
"""
|
||||
|
||||
# We can not use the API because docker doesn't expose a websocket api for exec
|
||||
# https://github.com/GNS3/gns3-gui/issues/1039
|
||||
process = yield from asyncio.subprocess.create_subprocess_exec(
|
||||
"docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "script", "-qfc", "while true; do /gns3/bin/busybox sh; done", "/dev/null",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.STDOUT,
|
||||
stdin=asyncio.subprocess.PIPE)
|
||||
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=True, echo=True)
|
||||
self._telnet_servers.append((yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux)))
|
||||
log.debug("Docker container '%s' started listen for auxilary telnet on %d", self.name, self.aux)
|
||||
|
||||
@asyncio.coroutine
|
||||
def _fix_permissions(self):
|
||||
"""
|
||||
Because docker run as root we need to fix permission and ownership to allow user to interact
|
||||
with it from their filesystem and do operation like file delete
|
||||
"""
|
||||
for volume in self._volumes:
|
||||
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(
|
||||
name=self._name, image=self._image, path=volume))
|
||||
process = yield from asyncio.subprocess.create_subprocess_exec(
|
||||
"docker",
|
||||
"exec",
|
||||
self._cid,
|
||||
"/gns3/bin/busybox",
|
||||
"sh",
|
||||
"-c",
|
||||
"(/gns3/bin/busybox find \"{path}\" -depth -print0 | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c '%a:%u:%g:%n' > \"{path}/.gns3_perms\") && /gns3/bin/busybox chmod -R u+rX \"{path}\" && /gns3/bin/busybox chown {uid}:{gid} -R \"{path}\"".format(uid=os.getuid(), gid=os.getgid(), path=volume))
|
||||
yield from process.wait()
|
||||
|
||||
@asyncio.coroutine
|
||||
def _start_vnc(self):
|
||||
"""
|
||||
Start a VNC server for this container
|
||||
"""
|
||||
|
||||
self._display = self._get_free_display_port()
|
||||
if shutil.which("Xvfb") is None or shutil.which("x11vnc") is None:
|
||||
raise DockerError("Please install Xvfb and x11vnc before using the VNC support")
|
||||
self._xvfb_process = yield from asyncio.create_subprocess_exec("Xvfb", "-nolisten", "tcp", ":{}".format(self._display), "-screen", "0", self._console_resolution + "x16")
|
||||
# We pass a port for TCPV6 due to a crash in X11VNC if not here: https://github.com/GNS3/gns3-server/issues/569
|
||||
self._x11vnc_process = yield from asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-shared", "-geometry", self._console_resolution, "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-rfbportv6", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host)
|
||||
|
||||
x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display))
|
||||
yield from wait_for_file_creation(x11_socket)
|
||||
|
||||
@asyncio.coroutine
|
||||
def _start_http(self):
|
||||
"""
|
||||
Start an HTTP tunnel to container localhost
|
||||
"""
|
||||
log.debug("Forward HTTP for %s to %d", self.name, self._console_http_port)
|
||||
command = ["docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "nc", "127.0.0.1", str(self._console_http_port)]
|
||||
# We replace the port in the server answer otherwise somelink could be broke
|
||||
server = AsyncioRawCommandServer(command, replaces=[
|
||||
(
|
||||
'{}'.format(self._console_http_port).encode(),
|
||||
'{}'.format(self.console).encode(),
|
||||
)
|
||||
])
|
||||
self._telnet_servers.append((yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console)))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _start_console(self):
|
||||
"""
|
||||
Start streaming the console via telnet
|
||||
"""
|
||||
|
||||
class InputStream:
|
||||
|
||||
def __init__(self):
|
||||
self._data = b""
|
||||
|
||||
def write(self, data):
|
||||
self._data += data
|
||||
|
||||
@asyncio.coroutine
|
||||
def drain(self):
|
||||
if not self.ws.closed:
|
||||
self.ws.send_bytes(self._data)
|
||||
self._data = b""
|
||||
|
||||
output_stream = asyncio.StreamReader()
|
||||
input_stream = InputStream()
|
||||
|
||||
telnet = AsyncioTelnetServer(reader=output_stream, writer=input_stream, echo=True)
|
||||
self._telnet_servers.append((yield from asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console)))
|
||||
|
||||
self._console_websocket = yield from self.manager.websocket_query("containers/{}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1".format(self._cid))
|
||||
input_stream.ws = self._console_websocket
|
||||
|
||||
output_stream.feed_data(self.name.encode() + b" console is now available... Press RETURN to get started.\r\n")
|
||||
|
||||
asyncio.async(self._read_console_output(self._console_websocket, output_stream))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _read_console_output(self, ws, out):
|
||||
"""
|
||||
Read websocket and forward it to the telnet
|
||||
:params ws: Websocket connection
|
||||
:param out: Output stream
|
||||
"""
|
||||
|
||||
while True:
|
||||
msg = yield from ws.receive()
|
||||
if msg.tp == aiohttp.MsgType.text:
|
||||
out.feed_data(msg.data.encode())
|
||||
else:
|
||||
out.feed_eof()
|
||||
ws.close()
|
||||
break
|
||||
yield from self.stop()
|
||||
|
||||
@asyncio.coroutine
|
||||
def is_running(self):
|
||||
"""Checks if the container is running.
|
||||
|
||||
:returns: True or False
|
||||
:rtype: bool
|
||||
"""
|
||||
state = yield from self._get_container_state()
|
||||
if state == "running":
|
||||
return True
|
||||
return False
|
||||
|
||||
@asyncio.coroutine
|
||||
def restart(self):
|
||||
"""Restart this Docker container."""
|
||||
yield from self.manager.query("POST", "containers/{}/restart".format(self._cid))
|
||||
log.info("Docker container '{name}' [{image}] restarted".format(
|
||||
name=self._name, image=self._image))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _clean_servers(self):
|
||||
"""
|
||||
Clean the list of running console servers
|
||||
"""
|
||||
if len(self._telnet_servers) > 0:
|
||||
for telnet_server in self._telnet_servers:
|
||||
telnet_server.close()
|
||||
yield from telnet_server.wait_closed()
|
||||
self._telnet_servers = []
|
||||
|
||||
@asyncio.coroutine
|
||||
def stop(self):
|
||||
"""Stops this Docker container."""
|
||||
|
||||
try:
|
||||
yield from self._clean_servers()
|
||||
|
||||
if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
|
||||
yield from self._ubridge_hypervisor.stop()
|
||||
|
||||
try:
|
||||
state = yield from self._get_container_state()
|
||||
except DockerHttp404Error:
|
||||
state = "stopped"
|
||||
|
||||
if state == "paused":
|
||||
yield from self.unpause()
|
||||
|
||||
if state != "stopped":
|
||||
yield from self._fix_permissions()
|
||||
# t=5 number of seconds to wait before killing the container
|
||||
try:
|
||||
yield from self.manager.query("POST", "containers/{}/stop".format(self._cid), params={"t": 5})
|
||||
log.info("Docker container '{name}' [{image}] stopped".format(
|
||||
name=self._name, image=self._image))
|
||||
except DockerHttp304Error:
|
||||
# Container is already stopped
|
||||
pass
|
||||
# Ignore runtime error because when closing the server
|
||||
except RuntimeError as e:
|
||||
log.debug("Docker runtime error when closing: {}".format(str(e)))
|
||||
return
|
||||
self.status = "stopped"
|
||||
|
||||
@asyncio.coroutine
|
||||
def pause(self):
|
||||
"""Pauses this Docker container."""
|
||||
yield from self.manager.query("POST", "containers/{}/pause".format(self._cid))
|
||||
log.info("Docker container '{name}' [{image}] paused".format(
|
||||
name=self._name, image=self._image))
|
||||
self.status = "paused"
|
||||
|
||||
@asyncio.coroutine
|
||||
def unpause(self):
|
||||
"""Unpauses this Docker container."""
|
||||
yield from self.manager.query("POST", "containers/{}/unpause".format(self._cid))
|
||||
log.info("Docker container '{name}' [{image}] unpaused".format(
|
||||
name=self._name, image=self._image))
|
||||
self.status = "started"
|
||||
|
||||
@asyncio.coroutine
|
||||
def close(self):
|
||||
"""Closes this Docker container."""
|
||||
|
||||
if not (yield from super().close()):
|
||||
return False
|
||||
yield from self.reset()
|
||||
|
||||
@asyncio.coroutine
|
||||
def reset(self):
|
||||
try:
|
||||
state = yield from self._get_container_state()
|
||||
if state == "paused" or state == "running":
|
||||
yield from self.stop()
|
||||
if self.console_type == "vnc":
|
||||
if self._x11vnc_process:
|
||||
try:
|
||||
self._x11vnc_process.terminate()
|
||||
yield from self._x11vnc_process.wait()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
try:
|
||||
self._xvfb_process.terminate()
|
||||
yield from self._xvfb_process.wait()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
# v – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false.
|
||||
# force - 1/True/true or 0/False/false, Kill then remove the container. Default false.
|
||||
yield from self.manager.query("DELETE", "containers/{}".format(self._cid), params={"force": 1, "v": 1})
|
||||
log.info("Docker container '{name}' [{image}] removed".format(
|
||||
name=self._name, image=self._image))
|
||||
|
||||
for adapter in self._ethernet_adapters:
|
||||
if adapter is not None:
|
||||
for nio in adapter.ports.values():
|
||||
if nio and isinstance(nio, NIOUDP):
|
||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||
# Ignore runtime error because when closing the server
|
||||
except (DockerHttp404Error, RuntimeError) as e:
|
||||
log.debug("Docker error when closing: {}".format(str(e)))
|
||||
return
|
||||
|
||||
@asyncio.coroutine
|
||||
def _add_ubridge_connection(self, nio, adapter_number, namespace):
|
||||
"""
|
||||
Creates a connection in uBridge.
|
||||
|
||||
:param nio: NIO instance or None if it's a dummu interface (if an interface is missing in ubridge you can't see it via ifconfig in the container)
|
||||
:param adapter_number: adapter number
|
||||
:param namespace: Container namespace (pid)
|
||||
"""
|
||||
|
||||
try:
|
||||
adapter = self._ethernet_adapters[adapter_number]
|
||||
except IndexError:
|
||||
raise DockerError(
|
||||
"Adapter {adapter_number} doesn't exist on Docker container '{name}'".format(name=self.name, adapter_number=adapter_number))
|
||||
|
||||
for index in range(4096):
|
||||
if "veth-gns3-e{}".format(index) not in psutil.net_if_addrs():
|
||||
adapter.ifc = "eth{}".format(str(index))
|
||||
adapter.host_ifc = "veth-gns3-e{}".format(str(index))
|
||||
adapter.guest_ifc = "veth-gns3-i{}".format(str(index))
|
||||
break
|
||||
if not hasattr(adapter, "ifc"):
|
||||
raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
yield from self._ubridge_hypervisor.send('docker create_veth {hostif} {guestif}'.format(guestif=adapter.guest_ifc,
|
||||
hostif=adapter.host_ifc))
|
||||
|
||||
log.debug("Move container %s adapter %s to namespace %s", self.name, adapter.guest_ifc, namespace)
|
||||
try:
|
||||
yield from self._ubridge_hypervisor.send('docker move_to_ns {ifc} {ns} eth{adapter}'.format(ifc=adapter.guest_ifc,
|
||||
ns=namespace,
|
||||
adapter=adapter_number))
|
||||
except UbridgeError as e:
|
||||
raise UbridgeNamespaceError(e)
|
||||
|
||||
if nio:
|
||||
yield from self._ubridge_hypervisor.send('bridge create bridge{}'.format(adapter_number))
|
||||
yield from self._ubridge_hypervisor.send('bridge add_nio_linux_raw bridge{adapter} {ifc}'.format(ifc=adapter.host_ifc, adapter=adapter_number))
|
||||
if isinstance(nio, NIOUDP):
|
||||
yield from self._ubridge_hypervisor.send('bridge add_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number,
|
||||
lport=nio.lport,
|
||||
rhost=nio.rhost,
|
||||
rport=nio.rport))
|
||||
elif isinstance(nio, NIOTAP):
|
||||
yield from self._ubridge_hypervisor.send('bridge add_nio_tap bridge{adapter} {tap}'.format(adapter=adapter_number, tap=nio.tap_device))
|
||||
|
||||
if nio.capturing:
|
||||
yield from self._ubridge_hypervisor.send('bridge start_capture bridge{adapter} "{pcap_file}"'.format(adapter=adapter_number,
|
||||
pcap_file=nio.pcap_output_file))
|
||||
|
||||
yield from self._ubridge_hypervisor.send('bridge start bridge{adapter}'.format(adapter=adapter_number))
|
||||
|
||||
def _delete_ubridge_connection(self, adapter_number):
|
||||
"""Deletes a connection in uBridge.
|
||||
|
||||
:param adapter_number: adapter number
|
||||
"""
|
||||
|
||||
if not self._ubridge_hypervisor:
|
||||
return
|
||||
|
||||
adapter = self._ethernet_adapters[adapter_number]
|
||||
try:
|
||||
yield from self._ubridge_hypervisor.send("bridge delete bridge{name}".format(
|
||||
name=adapter_number))
|
||||
except UbridgeError as e:
|
||||
log.debug(str(e))
|
||||
try:
|
||||
yield from self._ubridge_hypervisor.send('docker delete_veth {hostif}'.format(hostif=adapter.host_ifc))
|
||||
except UbridgeError as e:
|
||||
log.debug(str(e))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _get_namespace(self):
|
||||
result = yield from self.manager.query("GET", "containers/{}/json".format(self._cid))
|
||||
return int(result['State']['Pid'])
|
||||
|
||||
@asyncio.coroutine
|
||||
def adapter_add_nio_binding(self, adapter_number, nio):
|
||||
"""Adds an adapter NIO binding.
|
||||
|
||||
:param adapter_number: adapter number
|
||||
:param nio: NIO instance to add to the slot/port
|
||||
"""
|
||||
try:
|
||||
adapter = self._ethernet_adapters[adapter_number]
|
||||
except IndexError:
|
||||
raise DockerError(
|
||||
"Adapter {adapter_number} doesn't exist on Docker container '{name}'".format(
|
||||
name=self.name, adapter_number=adapter_number))
|
||||
|
||||
if self.status == "started" and self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
|
||||
# the container is running, let's add the UDP tunnel to connect to another node
|
||||
yield from self._ubridge_hypervisor.send('bridge create bridge{}'.format(adapter_number))
|
||||
yield from self._ubridge_hypervisor.send('bridge add_nio_linux_raw bridge{adapter} {ifc}'.format(ifc=adapter.host_ifc, adapter=adapter_number))
|
||||
|
||||
yield from self._ubridge_hypervisor.send('bridge add_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number,
|
||||
lport=nio.lport,
|
||||
rhost=nio.rhost,
|
||||
rport=nio.rport))
|
||||
|
||||
yield from self._ubridge_hypervisor.send('bridge start bridge{adapter}'.format(adapter=adapter_number))
|
||||
|
||||
adapter.add_nio(0, nio)
|
||||
log.info(
|
||||
"Docker container '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(
|
||||
name=self.name,
|
||||
id=self._id,
|
||||
nio=nio,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
@asyncio.coroutine
|
||||
def adapter_remove_nio_binding(self, adapter_number):
|
||||
"""
|
||||
Removes an adapter NIO binding.
|
||||
|
||||
:param adapter_number: adapter number
|
||||
|
||||
:returns: NIO instance
|
||||
"""
|
||||
try:
|
||||
adapter = self._ethernet_adapters[adapter_number]
|
||||
except IndexError:
|
||||
raise DockerError(
|
||||
"Adapter {adapter_number} doesn't exist on Docker VM '{name}'".format(
|
||||
name=self.name, adapter_number=adapter_number))
|
||||
|
||||
adapter.remove_nio(0)
|
||||
if self.status == "started" and self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
|
||||
# the container is running, just delete the UDP tunnel so we can reconnect it later if needed
|
||||
yield from self._ubridge_hypervisor.send("bridge delete bridge{name}".format(name=adapter_number))
|
||||
else:
|
||||
# the container is not running, let's completely delete the connection
|
||||
yield from self._delete_ubridge_connection(adapter_number)
|
||||
|
||||
log.info(
|
||||
"Docker VM '{name}' [{id}]: {nio} removed from adapter {adapter_number}".format(
|
||||
name=self.name, id=self.id, nio=adapter.host_ifc,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
@property
|
||||
def adapters(self):
|
||||
"""Returns the number of Ethernet adapters for this Docker VM.
|
||||
|
||||
:returns: number of adapters
|
||||
:rtype: int
|
||||
"""
|
||||
return len(self._ethernet_adapters)
|
||||
|
||||
@adapters.setter
|
||||
def adapters(self, adapters):
|
||||
"""Sets the number of Ethernet adapters for this Docker container.
|
||||
|
||||
:param adapters: number of adapters
|
||||
"""
|
||||
|
||||
if len(self._ethernet_adapters) == adapters:
|
||||
return
|
||||
|
||||
self._ethernet_adapters.clear()
|
||||
for adapter_number in range(0, adapters):
|
||||
self._ethernet_adapters.append(EthernetAdapter())
|
||||
|
||||
log.info(
|
||||
'Docker container "{name}" [{id}]: number of Ethernet adapters changed to {adapters}'.format(
|
||||
name=self._name,
|
||||
id=self._id,
|
||||
adapters=adapters))
|
||||
|
||||
@asyncio.coroutine
|
||||
def pull_image(self, image):
|
||||
"""
|
||||
Pull image from docker repository
|
||||
"""
|
||||
log.info("Pull %s from docker hub", image)
|
||||
response = yield from self.manager.http_query("POST", "images/create", params={"fromImage": image})
|
||||
# The pull api will stream status via an HTTP JSON stream
|
||||
content = ""
|
||||
while True:
|
||||
chunk = yield from response.content.read(1024)
|
||||
if not chunk:
|
||||
break
|
||||
content += chunk.decode("utf-8")
|
||||
|
||||
try:
|
||||
while True:
|
||||
content = content.lstrip(" \r\n\t")
|
||||
answer, index = json.JSONDecoder().raw_decode(content)
|
||||
if "progress" in answer:
|
||||
self.project.emit("log.info", {"message": "Pulling image {}:{}: {}".format(self._image, answer["id"], answer["progress"])})
|
||||
content = content[index:]
|
||||
except ValueError: # Partial JSON
|
||||
pass
|
||||
self.project.emit("log.info", {"message": "Success pulling image {}".format(self._image)})
|
||||
|
||||
@asyncio.coroutine
|
||||
def _start_ubridge_capture(self, adapter_number, output_file):
|
||||
"""
|
||||
Start a packet capture in uBridge.
|
||||
|
||||
:param adapter_number: adapter number
|
||||
:param output_file: PCAP destination file for the capture
|
||||
"""
|
||||
|
||||
adapter = "bridge{}".format(adapter_number)
|
||||
if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running():
|
||||
raise DockerError("Cannot start the packet capture: uBridge is not running")
|
||||
yield from self._ubridge_hypervisor.send('bridge start_capture {name} "{output_file}"'.format(name=adapter, output_file=output_file))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _stop_ubridge_capture(self, adapter_number):
|
||||
"""
|
||||
Stop a packet capture in uBridge.
|
||||
|
||||
:param adapter_number: adapter number
|
||||
"""
|
||||
|
||||
adapter = "bridge{}".format(adapter_number)
|
||||
if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running():
|
||||
raise DockerError("Cannot stop the packet capture: uBridge is not running")
|
||||
yield from self._ubridge_hypervisor.send("bridge stop_capture {name}".format(name=adapter))
|
||||
|
||||
@asyncio.coroutine
|
||||
def start_capture(self, adapter_number, output_file):
|
||||
"""
|
||||
Starts a packet capture.
|
||||
|
||||
:param adapter_number: adapter number
|
||||
:param output_file: PCAP destination file for the capture
|
||||
"""
|
||||
|
||||
try:
|
||||
adapter = self._ethernet_adapters[adapter_number]
|
||||
except KeyError:
|
||||
raise DockerError("Adapter {adapter_number} doesn't exist on Docker VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
nio = adapter.get_nio(0)
|
||||
|
||||
if not nio:
|
||||
raise DockerError("Adapter {} is not connected".format(adapter_number))
|
||||
|
||||
if nio.capturing:
|
||||
raise DockerError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number))
|
||||
|
||||
nio.startPacketCapture(output_file)
|
||||
|
||||
if self.status == "started" and self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
|
||||
yield from self._start_ubridge_capture(adapter_number, output_file)
|
||||
|
||||
log.info("Docker VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name,
|
||||
id=self.id,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
def stop_capture(self, adapter_number):
|
||||
"""
|
||||
Stops a packet capture.
|
||||
|
||||
:param adapter_number: adapter number
|
||||
"""
|
||||
|
||||
try:
|
||||
adapter = self._ethernet_adapters[adapter_number]
|
||||
except KeyError:
|
||||
raise DockerError("Adapter {adapter_number} doesn't exist on Docker VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
nio = adapter.get_nio(0)
|
||||
|
||||
if not nio:
|
||||
raise DockerError("Adapter {} is not connected".format(adapter_number))
|
||||
|
||||
nio.stopPacketCapture()
|
||||
|
||||
if self.status == "started" and self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
|
||||
yield from self._stop_ubridge_capture(adapter_number)
|
||||
|
||||
log.info("Docker VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name,
|
||||
id=self.id,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _get_log(self):
|
||||
"""
|
||||
Return the log from the container
|
||||
|
||||
:returns: string
|
||||
"""
|
||||
|
||||
result = yield from self.manager.query("GET", "containers/{}/logs".format(self._cid), params={"stderr": 1, "stdout": 1})
|
||||
return result
|
||||
|
||||
@asyncio.coroutine
|
||||
def delete(self):
|
||||
"""
|
||||
Delete the VM (including all its files).
|
||||
"""
|
||||
yield from self.close()
|
||||
yield from super().delete()
|
BIN
gns3server/modules/docker/resources/bin/busybox
Executable file
BIN
gns3server/modules/docker/resources/bin/busybox
Executable file
Binary file not shown.
138
gns3server/modules/docker/resources/etc/udhcpc/default.script
Executable file
138
gns3server/modules/docker/resources/etc/udhcpc/default.script
Executable file
@ -0,0 +1,138 @@
|
||||
#!/tmp/gns3/bin/sh
|
||||
|
||||
# script for udhcpc
|
||||
# Copyright (c) 2008 Natanael Copa <natanael.copa@gmail.com>
|
||||
|
||||
UDHCPC="/gns3/etc/udhcpc"
|
||||
UDHCPC_CONF="$UDHCPC/udhcpc.conf"
|
||||
|
||||
RESOLV_CONF="/etc/resolv.conf"
|
||||
[ -f $UDHCPC_CONF ] && . $UDHCPC_CONF
|
||||
|
||||
export broadcast
|
||||
export dns
|
||||
export domain
|
||||
export interface
|
||||
export ip
|
||||
export mask
|
||||
export metric
|
||||
export router
|
||||
export subnet
|
||||
|
||||
#export PATH=/usr/bin:/bin:/usr/sbin:/sbin
|
||||
|
||||
run_scripts() {
|
||||
local dir=$1
|
||||
if [ -d $dir ]; then
|
||||
for i in $dir/*; do
|
||||
[ -f $i ] && $i
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
deconfig() {
|
||||
ip addr flush dev $interface
|
||||
}
|
||||
|
||||
is_wifi() {
|
||||
test -e /sys/class/net/$interface/phy80211
|
||||
}
|
||||
|
||||
if_index() {
|
||||
if [ -e /sys/class/net/$interface/ifindex ]; then
|
||||
cat /sys/class/net/$interface/ifindex
|
||||
else
|
||||
ip link show dev $interface | head -n1 | cut -d: -f1
|
||||
fi
|
||||
}
|
||||
|
||||
calc_metric() {
|
||||
local base=
|
||||
if is_wifi; then
|
||||
base=300
|
||||
else
|
||||
base=200
|
||||
fi
|
||||
echo $(( $base + $(if_index) ))
|
||||
}
|
||||
|
||||
routes() {
|
||||
[ -z "$router" ] && return
|
||||
local gw= num=
|
||||
while ip route del default via dev $interface 2>/dev/null; do
|
||||
:
|
||||
done
|
||||
num=0
|
||||
for gw in $router; do
|
||||
ip route add 0.0.0.0/0 via $gw dev $interface \
|
||||
metric $(( $num + ${IF_METRIC:-$(calc_metric)} ))
|
||||
num=$(( $num + 1 ))
|
||||
done
|
||||
}
|
||||
|
||||
resolvconf() {
|
||||
local i
|
||||
[ -n "$IF_PEER_DNS" ] && [ "$IF_PEER_DNS" != "yes" ] && return
|
||||
if [ "$RESOLV_CONF" = "no" ] || [ "$RESOLV_CONF" = "NO" ] \
|
||||
|| [ -z "$RESOLV_CONF" ]; then
|
||||
return
|
||||
fi
|
||||
echo -n > "$RESOLV_CONF"
|
||||
[ -n "$domain" ] && echo "search $domain" >> "$RESOLV_CONF"
|
||||
for i in $dns; do
|
||||
echo "nameserver $i" >> "$RESOLV_CONF"
|
||||
done
|
||||
}
|
||||
|
||||
bound() {
|
||||
ip addr add $ip/$mask ${broadcast:+broadcast $broadcast} dev $interface
|
||||
ip link set dev $interface up
|
||||
routes
|
||||
resolvconf
|
||||
}
|
||||
|
||||
renew() {
|
||||
if ! ip addr show dev $interface | grep $ip/$mask; then
|
||||
ip addr flush dev $interface
|
||||
ip addr add $ip/$mask ${broadcast:+broadcast $broadcast} dev $interface
|
||||
fi
|
||||
|
||||
local i
|
||||
for i in $router; do
|
||||
if ! ip route show | grep ^default | grep $i; then
|
||||
routes
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if ! grep "^search $domain"; then
|
||||
resolvconf
|
||||
return
|
||||
fi
|
||||
for i in $dns; do
|
||||
if ! grep "^nameserver $i"; then
|
||||
resolvconf
|
||||
return
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
deconfig|renew|bound)
|
||||
run_scripts $UDHCPC/pre-$1
|
||||
$1
|
||||
run_scripts $UDHCPC/post-$1
|
||||
;;
|
||||
leasefail)
|
||||
echo "udhcpc failed to get a DHCP lease" >&2
|
||||
;;
|
||||
nak)
|
||||
echo "udhcpc received DHCP NAK" >&2
|
||||
;;
|
||||
*)
|
||||
echo "Error: this script should be called from udhcpc" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit 0
|
||||
|
81
gns3server/modules/docker/resources/init.sh
Executable file
81
gns3server/modules/docker/resources/init.sh
Executable file
@ -0,0 +1,81 @@
|
||||
#!/gns3/bin/busybox sh
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#
|
||||
# This script is injected into the container and launch before
|
||||
# the start command of the container
|
||||
#
|
||||
OLD_PATH="$PATH"
|
||||
PATH=/gns3/bin:/tmp/gns3/bin
|
||||
|
||||
# bootstrap busybox commands
|
||||
if [ ! -d /tmp/gns3/bin ]; then
|
||||
busybox mkdir -p /tmp/gns3/bin
|
||||
/gns3/bin/busybox --install -s /tmp/gns3/bin
|
||||
fi
|
||||
|
||||
# Restore file permission and mount volumes
|
||||
echo "$GNS3_VOLUMES" | tr ":" "\n" | while read i
|
||||
do
|
||||
# Copy original files if destination is empty (first start)
|
||||
[ "$(ls -A "/gns3volumes$i")" ] || cp -a "$i/." "/gns3volumes$i"
|
||||
|
||||
mount --bind "/gns3volumes$i" "$i"
|
||||
if [ -f "$i/.gns3_perms" ]
|
||||
then
|
||||
while IFS=: read PERMS OWNER GROUP FILE
|
||||
do
|
||||
chmod "$PERMS" "$FILE"
|
||||
chown "${OWNER}:${GROUP}" "$FILE"
|
||||
done < "$i/.gns3_perms"
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# /etc/hosts
|
||||
[ -s /etc/hosts ] || cat > /etc/hosts << __EOF__
|
||||
127.0.1.1 $HOSTNAME
|
||||
127.0.0.1 localhost
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
__EOF__
|
||||
|
||||
# configure loopback interface
|
||||
ip link set dev lo up
|
||||
|
||||
# Wait for all eth available
|
||||
while true
|
||||
do
|
||||
grep $GNS3_MAX_ETHERNET /proc/net/dev > /dev/null && break
|
||||
sleep 0.5
|
||||
done
|
||||
|
||||
# activate eth interfaces
|
||||
sed -n 's/^ *\(eth[0-9]*\):.*/\1/p' < /proc/net/dev | while read dev; do
|
||||
ip link set dev $dev up
|
||||
done
|
||||
|
||||
# configure network interfaces
|
||||
ifup -a -f
|
||||
|
||||
# continue normal docker startup
|
||||
PATH="$OLD_PATH"
|
||||
exec "$@"
|
||||
|
@ -32,9 +32,9 @@ import glob
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
from gns3server.utils.interfaces import get_windows_interfaces, is_interface_up
|
||||
from gns3server.utils.interfaces import interfaces, is_interface_up
|
||||
from gns3server.utils.asyncio import wait_run_in_executor
|
||||
from pkg_resources import parse_version
|
||||
from gns3server.utils import parse_version
|
||||
from uuid import UUID, uuid4
|
||||
from ..base_manager import BaseManager
|
||||
from ..project_manager import ProjectManager
|
||||
@ -219,7 +219,7 @@ class Dynamips(BaseManager):
|
||||
continue
|
||||
|
||||
# Release the dynamips ids if we want to reload the same project
|
||||
# later
|
||||
# later
|
||||
if project.id in self._dynamips_ids:
|
||||
del self._dynamips_ids[project.id]
|
||||
|
||||
@ -336,16 +336,16 @@ class Dynamips(BaseManager):
|
||||
def find_dynamips(self):
|
||||
|
||||
# look for Dynamips
|
||||
dynamips_path = self.config.get_section_config("Dynamips").get("dynamips_path")
|
||||
if not dynamips_path:
|
||||
dynamips_path = shutil.which("dynamips")
|
||||
dynamips_path = self.config.get_section_config("Dynamips").get("dynamips_path", "dynamips")
|
||||
if not os.path.isabs(dynamips_path):
|
||||
dynamips_path = shutil.which(dynamips_path)
|
||||
|
||||
if not dynamips_path:
|
||||
raise DynamipsError("Could not find Dynamips")
|
||||
if not os.path.isfile(dynamips_path):
|
||||
raise DynamipsError("Dynamips {} is not accessible".format(dynamips_path))
|
||||
if not os.access(dynamips_path, os.X_OK):
|
||||
raise DynamipsError("Dynamips is not executable")
|
||||
raise DynamipsError("Dynamips {} is not executable".format(dynamips_path))
|
||||
|
||||
self._dynamips_path = dynamips_path
|
||||
return dynamips_path
|
||||
@ -439,9 +439,9 @@ class Dynamips(BaseManager):
|
||||
ethernet_device = nio_settings["ethernet_device"]
|
||||
if sys.platform.startswith("win"):
|
||||
# replace the interface name by the GUID on Windows
|
||||
interfaces = get_windows_interfaces()
|
||||
windows_interfaces = interfaces()
|
||||
npf_interface = None
|
||||
for interface in interfaces:
|
||||
for interface in windows_interfaces:
|
||||
if interface["name"] == ethernet_device:
|
||||
npf_interface = interface["id"]
|
||||
if not npf_interface:
|
||||
@ -603,8 +603,8 @@ class Dynamips(BaseManager):
|
||||
elif startup_config_content:
|
||||
startup_config_path = self._create_config(vm, default_startup_config_path, startup_config_content)
|
||||
yield from vm.set_configs(startup_config_path)
|
||||
# An empty startup config crash dynamips
|
||||
else:
|
||||
elif os.path.isfile(default_startup_config_path) and os.path.getsize(default_startup_config_path) == 0:
|
||||
# An empty startup-config may crash Dynamips
|
||||
startup_config_path = self._create_config(vm, default_startup_config_path, "!\n")
|
||||
yield from vm.set_configs(startup_config_path)
|
||||
|
||||
|
@ -55,7 +55,6 @@ class DynamipsHypervisor:
|
||||
self._working_dir = working_dir
|
||||
self._version = "N/A"
|
||||
self._timeout = timeout
|
||||
self._uuid = None
|
||||
self._reader = None
|
||||
self._writer = None
|
||||
self._io_lock = asyncio.Lock()
|
||||
@ -99,8 +98,6 @@ class DynamipsHypervisor:
|
||||
except IndexError:
|
||||
self._version = "Unknown"
|
||||
|
||||
self._uuid = yield from self.send("hypervisor uuid")
|
||||
|
||||
# this forces to send the working dir to Dynamips
|
||||
yield from self.set_working_dir(self._working_dir)
|
||||
|
||||
@ -174,16 +171,6 @@ class DynamipsHypervisor:
|
||||
|
||||
return self._working_dir
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
"""
|
||||
Returns this hypervisor UUID.
|
||||
|
||||
:Returns: uuid string
|
||||
"""
|
||||
|
||||
return self._uuid
|
||||
|
||||
@property
|
||||
def devices(self):
|
||||
"""
|
||||
|
@ -19,9 +19,9 @@
|
||||
Represents a Dynamips hypervisor and starts/stops the associated Dynamips process.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
import asyncio
|
||||
|
||||
from gns3server.utils.asyncio import wait_for_process_termination
|
||||
@ -118,16 +118,22 @@ class Hypervisor(DynamipsHypervisor):
|
||||
"""
|
||||
|
||||
self._command = self._build_command()
|
||||
env = os.environ.copy()
|
||||
if sys.platform.startswith("win"):
|
||||
# add the Npcap directory to $PATH to force Dynamips to use npcap DLL instead of Winpcap (if installed)
|
||||
system_root = os.path.join(os.path.expandvars("%SystemRoot%"), "System32", "Npcap")
|
||||
if os.path.isdir(system_root):
|
||||
env["PATH"] = system_root + ';' + env["PATH"]
|
||||
try:
|
||||
log.info("Starting Dynamips: {}".format(self._command))
|
||||
|
||||
with tempfile.NamedTemporaryFile(delete=False) as fd:
|
||||
self._stdout_file = fd.name
|
||||
log.info("Dynamips process logging to {}".format(fd.name))
|
||||
self._stdout_file = os.path.join(self.working_dir, "dynamips_i{}_stdout.txt".format(self._id))
|
||||
log.info("Dynamips process logging to {}".format(self._stdout_file))
|
||||
with open(self._stdout_file, "w", encoding="utf-8") as fd:
|
||||
self._process = yield from asyncio.create_subprocess_exec(*self._command,
|
||||
stdout=fd,
|
||||
stderr=subprocess.STDOUT,
|
||||
cwd=self._working_dir)
|
||||
cwd=self._working_dir,
|
||||
env=env)
|
||||
log.info("Dynamips process started PID={}".format(self._process.pid))
|
||||
self._started = True
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
|
@ -21,7 +21,7 @@ http://github.com/GNS3/dynamips/blob/master/README.hypervisor#L558
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from pkg_resources import parse_version
|
||||
from gns3server.utils import parse_version
|
||||
|
||||
from .device import Device
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
|
@ -61,12 +61,12 @@ class Router(BaseVM):
|
||||
|
||||
def __init__(self, name, vm_id, project, manager, dynamips_id=None, console=None, aux=None, platform="c7200", hypervisor=None, ghost_flag=False):
|
||||
|
||||
super().__init__(name, vm_id, project, manager, console=console)
|
||||
allocate_aux = manager.config.get_section_config("Dynamips").getboolean("allocate_aux_console_ports", False)
|
||||
|
||||
super().__init__(name, vm_id, project, manager, console=console, aux=aux, allocate_aux=aux)
|
||||
|
||||
self._hypervisor = hypervisor
|
||||
self._dynamips_id = dynamips_id
|
||||
self._closed = False
|
||||
self._name = name
|
||||
self._platform = platform
|
||||
self._image = ""
|
||||
self._startup_config = ""
|
||||
@ -88,7 +88,6 @@ class Router(BaseVM):
|
||||
self._disk0 = 0 # Megabytes
|
||||
self._disk1 = 0 # Megabytes
|
||||
self._auto_delete_disks = False
|
||||
self._aux = aux
|
||||
self._mac_addr = ""
|
||||
self._system_id = "FTX0945W0MY" # processor board ID in IOS
|
||||
self._slots = []
|
||||
@ -100,19 +99,12 @@ class Router(BaseVM):
|
||||
else:
|
||||
self._dynamips_id = dynamips_id
|
||||
manager.take_dynamips_id(project.id, dynamips_id)
|
||||
|
||||
if self._aux is not None:
|
||||
self._aux = self._manager.port_manager.reserve_tcp_port(self._aux, self._project)
|
||||
else:
|
||||
allocate_aux = self.manager.config.get_section_config("Dynamips").getboolean("allocate_aux_console_ports", False)
|
||||
if allocate_aux:
|
||||
self._aux = self._manager.port_manager.get_free_tcp_port(self._project)
|
||||
else:
|
||||
log.info("Creating a new ghost IOS instance")
|
||||
if self._console:
|
||||
# Ghost VMs do not need a console port.
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
self._console = None
|
||||
self.console = None
|
||||
|
||||
self._dynamips_id = 0
|
||||
self._name = "Ghost"
|
||||
|
||||
@ -140,8 +132,8 @@ class Router(BaseVM):
|
||||
"disk0": self._disk0,
|
||||
"disk1": self._disk1,
|
||||
"auto_delete_disks": self._auto_delete_disks,
|
||||
"console": self._console,
|
||||
"aux": self._aux,
|
||||
"console": self.console,
|
||||
"aux": self.aux,
|
||||
"mac_addr": self._mac_addr,
|
||||
"system_id": self._system_id}
|
||||
|
||||
@ -195,8 +187,8 @@ class Router(BaseVM):
|
||||
|
||||
yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self._console))
|
||||
|
||||
if self._aux is not None:
|
||||
yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=self._aux))
|
||||
if self.aux is not None:
|
||||
yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=self.aux))
|
||||
|
||||
# get the default base MAC address
|
||||
mac_addr = yield from self._hypervisor.send('{platform} get_mac_addr "{name}"'.format(platform=self._platform,
|
||||
@ -278,7 +270,10 @@ class Router(BaseVM):
|
||||
|
||||
status = yield from self.get_status()
|
||||
if status != "inactive":
|
||||
yield from self._hypervisor.send('vm stop "{name}"'.format(name=self._name))
|
||||
try:
|
||||
yield from self._hypervisor.send('vm stop "{name}"'.format(name=self._name))
|
||||
except DynamipsError as e:
|
||||
log.warn("Could not stop {}: {}".format(self._name, e))
|
||||
self.status = "stopped"
|
||||
log.info('Router "{name}" [{id}] has been stopped'.format(name=self._name, id=self._id))
|
||||
yield from self.save_configs()
|
||||
@ -328,19 +323,8 @@ class Router(BaseVM):
|
||||
@asyncio.coroutine
|
||||
def close(self):
|
||||
|
||||
if self._closed:
|
||||
# router is already closed
|
||||
return
|
||||
|
||||
log.debug('Router "{name}" [{id}] is closing'.format(name=self._name, id=self._id))
|
||||
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
self._console = None
|
||||
|
||||
if self._aux:
|
||||
self._manager.port_manager.release_tcp_port(self._aux, self._project)
|
||||
self._aux = None
|
||||
if not (yield from super().close()):
|
||||
return False
|
||||
|
||||
for adapter in self._slots:
|
||||
if adapter is not None:
|
||||
@ -354,8 +338,8 @@ class Router(BaseVM):
|
||||
try:
|
||||
yield from self.stop()
|
||||
yield from self._hypervisor.send('vm delete "{}"'.format(self._name))
|
||||
except DynamipsError:
|
||||
pass
|
||||
except DynamipsError as e:
|
||||
log.warn("Could not stop and delete {}: {}".format(self._name, e))
|
||||
yield from self.hypervisor.stop()
|
||||
|
||||
if self._auto_delete_disks:
|
||||
@ -375,7 +359,6 @@ class Router(BaseVM):
|
||||
except OSError as e:
|
||||
log.warn("Could not delete file {}: {}".format(file, e))
|
||||
continue
|
||||
self._closed = True
|
||||
|
||||
@property
|
||||
def platform(self):
|
||||
@ -913,25 +896,8 @@ class Router(BaseVM):
|
||||
:param console: console port (integer)
|
||||
"""
|
||||
|
||||
yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=console))
|
||||
|
||||
log.info('Router "{name}" [{id}]: console port updated from {old_console} to {new_console}'.format(name=self._name,
|
||||
id=self._id,
|
||||
old_console=self._console,
|
||||
new_console=console))
|
||||
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project)
|
||||
|
||||
@property
|
||||
def aux(self):
|
||||
"""
|
||||
Returns the TCP auxiliary port.
|
||||
|
||||
:returns: console auxiliary port (integer)
|
||||
"""
|
||||
|
||||
return self._aux
|
||||
self.console = console
|
||||
yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self.console))
|
||||
|
||||
@asyncio.coroutine
|
||||
def set_aux(self, aux):
|
||||
@ -941,16 +907,9 @@ class Router(BaseVM):
|
||||
:param aux: console auxiliary port (integer)
|
||||
"""
|
||||
|
||||
self.aux = aux
|
||||
yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=aux))
|
||||
|
||||
log.info('Router "{name}" [{id}]: aux port updated from {old_aux} to {new_aux}'.format(name=self._name,
|
||||
id=self._id,
|
||||
old_aux=self._aux,
|
||||
new_aux=aux))
|
||||
|
||||
self._manager.port_manager.release_tcp_port(self._aux, self._project)
|
||||
self._aux = self._manager.port_manager.reserve_tcp_port(aux, self._project)
|
||||
|
||||
@asyncio.coroutine
|
||||
def get_cpu_usage(self, cpu_id=0):
|
||||
"""
|
||||
@ -1560,12 +1519,18 @@ class Router(BaseVM):
|
||||
"""
|
||||
|
||||
if self.startup_config or self.private_config:
|
||||
|
||||
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
|
||||
try:
|
||||
config_path = os.path.join(module_workdir, "configs")
|
||||
os.makedirs(config_path, exist_ok=True)
|
||||
except OSError as e:
|
||||
raise DynamipsError("Could could not create configuration directory {}: {}".format(config_path, e))
|
||||
|
||||
startup_config_base64, private_config_base64 = yield from self.extract_config()
|
||||
if startup_config_base64:
|
||||
if not self.startup_config:
|
||||
self._startup_config = os.path.join("configs", "i{}_startup-config.cfg".format(self._dynamips_id))
|
||||
|
||||
try:
|
||||
config = base64.b64decode(startup_config_base64).decode("utf-8", errors="replace")
|
||||
config = "!\n" + config.replace("\r", "")
|
||||
@ -1576,13 +1541,11 @@ class Router(BaseVM):
|
||||
except (binascii.Error, OSError) as e:
|
||||
raise DynamipsError("Could not save the startup configuration {}: {}".format(config_path, e))
|
||||
|
||||
if private_config_base64:
|
||||
if private_config_base64 and base64.b64decode(private_config_base64) != b'\nkerberos password \nend\n':
|
||||
if not self.private_config:
|
||||
self._private_config = os.path.join("configs", "i{}_private-config.cfg".format(self._dynamips_id))
|
||||
|
||||
try:
|
||||
config = base64.b64decode(private_config_base64).decode("utf-8", errors="replace")
|
||||
config = "!\n" + config.replace("\r", "")
|
||||
config_path = os.path.join(module_workdir, self.private_config)
|
||||
with open(config_path, "wb") as f:
|
||||
log.info("saving private-config to {}".format(self.private_config))
|
||||
|
@ -97,11 +97,8 @@ class IOUVM(BaseVM):
|
||||
Closes this IOU VM.
|
||||
"""
|
||||
|
||||
log.debug('IOU "{name}" [{id}] is closing'.format(name=self._name, id=self._id))
|
||||
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
self._console = None
|
||||
if not (yield from super().close()):
|
||||
return False
|
||||
|
||||
adapters = self._ethernet_adapters + self._serial_adapters
|
||||
for adapter in adapters:
|
||||
@ -111,7 +108,6 @@ class IOUVM(BaseVM):
|
||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||
|
||||
yield from self.stop()
|
||||
self.save_configs()
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
@ -134,7 +130,7 @@ class IOUVM(BaseVM):
|
||||
self._path = self.manager.get_abs_image_path(path)
|
||||
|
||||
# In 1.2 users uploaded images to the images roots
|
||||
# after the migration their images are inside images/IOU
|
||||
# after the migration their images are inside images/IOU
|
||||
# but old topologies use old path
|
||||
if "IOU" not in self._path:
|
||||
location, filename = os.path.split(self._path)
|
||||
@ -234,9 +230,11 @@ class IOUVM(BaseVM):
|
||||
:returns: path to IOUYAP
|
||||
"""
|
||||
|
||||
path = self._manager.config.get_section_config("IOU").get("iouyap_path", "iouyap")
|
||||
if path == "iouyap":
|
||||
path = shutil.which("iouyap")
|
||||
search_path = self._manager.config.get_section_config("IOU").get("iouyap_path", "iouyap")
|
||||
path = shutil.which(search_path)
|
||||
# shutil.which return None if the path doesn't exists
|
||||
if not path:
|
||||
return search_path
|
||||
return path
|
||||
|
||||
@property
|
||||
@ -538,14 +536,19 @@ class IOUVM(BaseVM):
|
||||
:param returncode: Process returncode
|
||||
"""
|
||||
|
||||
log.info("{} process has stopped, return code: {}".format(process_name, returncode))
|
||||
self._terminate_process_iou()
|
||||
self._terminate_process_iouyap()
|
||||
self._ioucon_thread_stop_event.set()
|
||||
|
||||
if returncode != 0:
|
||||
self.project.emit("log.error", {"message": "{} process has stopped, return code: {}\n{}".format(process_name,
|
||||
returncode,
|
||||
self.read_iou_stdout())})
|
||||
log.info("{} process has stopped, return code: {}".format(process_name, returncode))
|
||||
else:
|
||||
if returncode == 11:
|
||||
message = "{} process has stopped, return code: {}. This could be an issue with the image using a different image can fix the issue.\n{}".format(process_name, returncode, self.read_iou_stdout())
|
||||
else:
|
||||
message = "{} process has stopped, return code: {}\n{}".format(process_name, returncode, self.read_iou_stdout())
|
||||
log.warn(message)
|
||||
self.project.emit("log.error", {"message": message})
|
||||
|
||||
def _rename_nvram_file(self):
|
||||
"""
|
||||
@ -667,7 +670,10 @@ class IOUVM(BaseVM):
|
||||
except asyncio.TimeoutError:
|
||||
if self._iou_process.returncode is None:
|
||||
log.warn("IOU process {} is still running... killing it".format(self._iou_process.pid))
|
||||
self._iou_process.kill()
|
||||
try:
|
||||
self._iou_process.kill()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
self._iou_process = None
|
||||
|
||||
if self.is_iouyap_running():
|
||||
@ -684,6 +690,7 @@ class IOUVM(BaseVM):
|
||||
self._iouyap_process = None
|
||||
|
||||
self._started = False
|
||||
self.save_configs()
|
||||
|
||||
def _terminate_process_iouyap(self):
|
||||
"""
|
||||
@ -913,6 +920,7 @@ class IOUVM(BaseVM):
|
||||
|
||||
self._adapters = self._ethernet_adapters + self._serial_adapters
|
||||
|
||||
@asyncio.coroutine
|
||||
def adapter_add_nio_binding(self, adapter_number, port_number, nio):
|
||||
"""
|
||||
Adds a adapter NIO binding.
|
||||
@ -945,6 +953,7 @@ class IOUVM(BaseVM):
|
||||
except ProcessLookupError:
|
||||
log.error("Could not update iouyap configuration: process (PID={}) not found".format(self._iouyap_process.pid))
|
||||
|
||||
@asyncio.coroutine
|
||||
def adapter_remove_nio_binding(self, adapter_number, port_number):
|
||||
"""
|
||||
Removes an adapter NIO binding.
|
||||
@ -1065,6 +1074,14 @@ class IOUVM(BaseVM):
|
||||
else:
|
||||
startup_config = startup_config.replace("%h", self._name)
|
||||
f.write(startup_config)
|
||||
|
||||
vlan_file = os.path.join(self.working_dir, "vlan.dat-{:05d}".format(self.application_id))
|
||||
if os.path.exists(vlan_file):
|
||||
try:
|
||||
os.remove(vlan_file)
|
||||
except OSError as e:
|
||||
log.error("Could not delete VLAN file '{}': {}".format(vlan_file, e))
|
||||
|
||||
except OSError as e:
|
||||
raise IOUError("Can't write startup-config file '{}': {}".format(startup_config_path, e))
|
||||
|
||||
@ -1098,7 +1115,7 @@ class IOUVM(BaseVM):
|
||||
if private_config is None:
|
||||
private_config = ''
|
||||
|
||||
# We disallow erasing the startup config file
|
||||
# We disallow erasing the private config file
|
||||
if len(private_config) == 0 and os.path.exists(private_config_path):
|
||||
return
|
||||
|
||||
@ -1205,18 +1222,16 @@ class IOUVM(BaseVM):
|
||||
config_path = os.path.join(self.working_dir, "startup-config.cfg")
|
||||
try:
|
||||
config = startup_config_content.decode("utf-8", errors="replace")
|
||||
config = "!\n" + config.replace("\r", "")
|
||||
with open(config_path, "wb") as f:
|
||||
log.info("saving startup-config to {}".format(config_path))
|
||||
f.write(config.encode("utf-8"))
|
||||
except (binascii.Error, OSError) as e:
|
||||
raise IOUError("Could not save the startup configuration {}: {}".format(config_path, e))
|
||||
|
||||
if private_config_content:
|
||||
if private_config_content and private_config_content != b'\nend\n':
|
||||
config_path = os.path.join(self.working_dir, "private-config.cfg")
|
||||
try:
|
||||
config = private_config_content.decode("utf-8", errors="replace")
|
||||
config = "!\n" + config.replace("\r", "")
|
||||
with open(config_path, "wb") as f:
|
||||
log.info("saving private-config to {}".format(config_path))
|
||||
f.write(config.encode("utf-8"))
|
||||
|
@ -550,6 +550,8 @@ def send_recv_loop(epoll, console, router, esc_char, stop_event):
|
||||
esc_state = True
|
||||
else:
|
||||
router.write(buf)
|
||||
except ConnectionError as e:
|
||||
pass
|
||||
finally:
|
||||
router.unregister(epoll)
|
||||
console.unregister(epoll)
|
||||
|
@ -24,6 +24,10 @@ import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# This ports are disallowed by Chrome and Firefox to avoid trouble with skip them
|
||||
BANNED_PORTS = set((1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87, 95, 101, 102, 103, 104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139, 143, 179, 389, 465, 512, 513, 514, 515, 526, 530, 531, 532, 540, 556, 563, 587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667, 6668, 6669))
|
||||
|
||||
|
||||
class PortManager:
|
||||
|
||||
"""
|
||||
@ -42,8 +46,8 @@ class PortManager:
|
||||
server_config = Config.instance().get_section_config("Server")
|
||||
remote_console_connections = server_config.getboolean("allow_remote_console")
|
||||
|
||||
console_start_port_range = server_config.getint("console_start_port_range", 2001)
|
||||
console_end_port_range = server_config.getint("console_end_port_range", 7000)
|
||||
console_start_port_range = server_config.getint("console_start_port_range", 5000)
|
||||
console_end_port_range = server_config.getint("console_end_port_range", 10000)
|
||||
self._console_port_range = (console_start_port_range, console_end_port_range)
|
||||
log.debug("Console port range is {}-{}".format(console_start_port_range, console_end_port_range))
|
||||
|
||||
@ -102,7 +106,7 @@ class PortManager:
|
||||
return self._udp_host
|
||||
|
||||
@udp_host.setter
|
||||
def host(self, new_host):
|
||||
def udp_host(self, new_host):
|
||||
|
||||
self._udp_host = new_host
|
||||
|
||||
@ -144,11 +148,13 @@ class PortManager:
|
||||
|
||||
last_exception = None
|
||||
for port in range(start_port, end_port + 1):
|
||||
if port in ignore_ports:
|
||||
if port in ignore_ports or port in BANNED_PORTS:
|
||||
continue
|
||||
|
||||
try:
|
||||
PortManager._check_port(host, port, socket_type)
|
||||
if host != "0.0.0.0":
|
||||
PortManager._check_port("0.0.0.0", port, socket_type)
|
||||
return port
|
||||
except OSError as e:
|
||||
last_exception = e
|
||||
@ -225,15 +231,15 @@ class PortManager:
|
||||
old_port = port
|
||||
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
|
||||
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
|
||||
log.warning(msg)
|
||||
project.emit("log.warning", {"message": msg})
|
||||
log.debug(msg)
|
||||
#project.emit("log.warning", {"message": msg})
|
||||
return port
|
||||
if port < self._console_port_range[0] or port > self._console_port_range[1]:
|
||||
if port < port_range_start or port > port_range_end:
|
||||
old_port = port
|
||||
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
|
||||
msg = "TCP port {} is outside the range {}-{} on host {}. Port has been replaced by {}".format(old_port, port_range_start, port_range_end, self._console_host, port)
|
||||
log.warning(msg)
|
||||
project.emit("log.warning", {"message": msg})
|
||||
log.debug(msg)
|
||||
#project.emit("log.warning", {"message": msg})
|
||||
return port
|
||||
try:
|
||||
PortManager._check_port(self._console_host, port, "TCP")
|
||||
@ -241,8 +247,8 @@ class PortManager:
|
||||
old_port = port
|
||||
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
|
||||
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
|
||||
log.warning(msg)
|
||||
project.emit("log.warning", {"message": msg})
|
||||
log.debug(msg)
|
||||
#project.emit("log.warning", {"message": msg})
|
||||
return port
|
||||
|
||||
self._used_tcp_ports.add(port)
|
||||
|
@ -15,17 +15,21 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import aiohttp
|
||||
import os
|
||||
import aiohttp
|
||||
import shutil
|
||||
import asyncio
|
||||
import hashlib
|
||||
import zipstream
|
||||
import zipfile
|
||||
import json
|
||||
|
||||
from uuid import UUID, uuid4
|
||||
from .port_manager import PortManager
|
||||
from ..config import Config
|
||||
from ..utils.asyncio import wait_run_in_executor
|
||||
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -169,6 +173,8 @@ class Project:
|
||||
@name.setter
|
||||
def name(self, name):
|
||||
|
||||
if "/" in name or "\\" in name:
|
||||
raise aiohttp.web.HTTPForbidden(text="Name can not contain path separator")
|
||||
self._name = name
|
||||
|
||||
@property
|
||||
@ -428,7 +434,10 @@ class Project:
|
||||
path = os.path.join(directory, project)
|
||||
if os.path.exists(os.path.join(path, ".gns3_temporary")):
|
||||
log.warning("Purge old temporary project {}".format(project))
|
||||
shutil.rmtree(path)
|
||||
try:
|
||||
shutil.rmtree(path)
|
||||
except OSError as e:
|
||||
log.error("Error when cleaning {}: {}".format(path, str(e)))
|
||||
|
||||
def modules(self):
|
||||
"""
|
||||
@ -507,3 +516,183 @@ class Project:
|
||||
break
|
||||
m.update(buf)
|
||||
return m.hexdigest()
|
||||
|
||||
def export(self, include_images=False):
|
||||
"""
|
||||
Export the project as zip. It's a ZipStream object.
|
||||
The file will be read chunk by chunk when you iterate on
|
||||
the zip.
|
||||
|
||||
It will ignore some files like snapshots and
|
||||
|
||||
:returns: ZipStream object
|
||||
"""
|
||||
|
||||
z = zipstream.ZipFile(allowZip64=True)
|
||||
# topdown allo to modify the list of directory in order to ignore
|
||||
# directory
|
||||
for root, dirs, files in os.walk(self._path, topdown=True):
|
||||
# Remove snapshots and capture
|
||||
if os.path.split(root)[-1:][0] == "project-files":
|
||||
dirs[:] = [d for d in dirs if d not in ("snapshots", "captures")]
|
||||
|
||||
# Ignore log files and OS noise
|
||||
files = [f for f in files if not f.endswith('_log.txt') and not f.endswith('.log') and f != '.DS_Store']
|
||||
|
||||
for file in files:
|
||||
path = os.path.join(root, file)
|
||||
# Try open the file
|
||||
try:
|
||||
open(path).close()
|
||||
except OSError as e:
|
||||
msg = "Could not export file {}: {}".format(path, e)
|
||||
log.warn(msg)
|
||||
self.emit("log.warning", {"message": msg})
|
||||
continue
|
||||
# We rename the .gns3 project.gns3 to avoid the task to the client to guess the file name
|
||||
if file.endswith(".gns3"):
|
||||
self._export_project_file(path, z, include_images)
|
||||
else:
|
||||
# We merge the data from all server in the same project-files directory
|
||||
vm_directory = os.path.join(self._path, "servers", "vm")
|
||||
if os.path.commonprefix([root, vm_directory]) == vm_directory:
|
||||
z.write(path, os.path.relpath(path, vm_directory), compress_type=zipfile.ZIP_DEFLATED)
|
||||
else:
|
||||
z.write(path, os.path.relpath(path, self._path), compress_type=zipfile.ZIP_DEFLATED)
|
||||
return z
|
||||
|
||||
def _export_images(self, image, type, z):
|
||||
"""
|
||||
Take a project file (.gns3) and export images to the zip
|
||||
|
||||
:param image: Image path
|
||||
:param type: Type of image
|
||||
:param z: Zipfile instance for the export
|
||||
"""
|
||||
from . import MODULES
|
||||
|
||||
for module in MODULES:
|
||||
try:
|
||||
img_directory = module.instance().get_images_directory()
|
||||
except NotImplementedError:
|
||||
# Some modules don't have images
|
||||
continue
|
||||
|
||||
directory = os.path.split(img_directory)[-1:][0]
|
||||
|
||||
if os.path.exists(image):
|
||||
path = image
|
||||
else:
|
||||
path = os.path.join(img_directory, image)
|
||||
|
||||
if os.path.exists(path):
|
||||
arcname = os.path.join("images", directory, os.path.basename(image))
|
||||
z.write(path, arcname)
|
||||
break
|
||||
|
||||
def _export_project_file(self, path, z, include_images):
|
||||
"""
|
||||
Take a project file (.gns3) and patch it for the export
|
||||
|
||||
:param path: Path of the .gns3
|
||||
"""
|
||||
|
||||
with open(path) as f:
|
||||
topology = json.load(f)
|
||||
if "topology" in topology and "nodes" in topology["topology"]:
|
||||
for node in topology["topology"]["nodes"]:
|
||||
if "properties" in node and node["type"] != "DockerVM":
|
||||
for prop, value in node["properties"].items():
|
||||
if prop.endswith("image"):
|
||||
node["properties"][prop] = os.path.basename(value)
|
||||
if include_images is True:
|
||||
self._export_images(value, node["type"], z)
|
||||
z.writestr("project.gns3", json.dumps(topology).encode())
|
||||
|
||||
def import_zip(self, stream, gns3vm=True):
|
||||
"""
|
||||
Import a project contain in a zip file
|
||||
|
||||
:param stream: A io.BytesIO of the zipfile
|
||||
:param gns3vm: True move docker, iou and qemu to the GNS3 VM
|
||||
"""
|
||||
|
||||
with zipfile.ZipFile(stream) as myzip:
|
||||
myzip.extractall(self.path)
|
||||
|
||||
project_file = os.path.join(self.path, "project.gns3")
|
||||
if os.path.exists(project_file):
|
||||
with open(project_file) as f:
|
||||
topology = json.load(f)
|
||||
topology["project_id"] = self.id
|
||||
topology["name"] = self.name
|
||||
topology.setdefault("topology", {})
|
||||
topology["topology"].setdefault("nodes", [])
|
||||
topology["topology"]["servers"] = [
|
||||
{
|
||||
"id": 1,
|
||||
"local": True,
|
||||
"vm": False
|
||||
}
|
||||
]
|
||||
|
||||
# By default all node run on local server
|
||||
for node in topology["topology"]["nodes"]:
|
||||
node["server_id"] = 1
|
||||
|
||||
if gns3vm:
|
||||
# Move to servers/vm directory the data that should be import on remote server
|
||||
modules_to_vm = {
|
||||
"qemu": "QemuVM",
|
||||
"iou": "IOUDevice",
|
||||
"docker": "DockerVM"
|
||||
}
|
||||
vm_directory = os.path.join(self.path, "servers", "vm", "project-files")
|
||||
vm_server_use = False
|
||||
|
||||
for module, device_type in modules_to_vm.items():
|
||||
module_directory = os.path.join(self.path, "project-files", module)
|
||||
if os.path.exists(module_directory):
|
||||
os.makedirs(vm_directory, exist_ok=True)
|
||||
shutil.move(module_directory, os.path.join(vm_directory, module))
|
||||
|
||||
# Patch node to use the GNS3 VM
|
||||
for node in topology["topology"]["nodes"]:
|
||||
if node["type"] == device_type:
|
||||
node["server_id"] = 2
|
||||
vm_server_use = True
|
||||
|
||||
# We use the GNS3 VM. We need to add the server to the list
|
||||
if vm_server_use:
|
||||
topology["topology"]["servers"].append({
|
||||
"id": 2,
|
||||
"vm": True,
|
||||
"local": False
|
||||
})
|
||||
|
||||
# Write the modified topology
|
||||
with open(project_file, "w") as f:
|
||||
json.dump(topology, f, indent=4)
|
||||
|
||||
# Rename to a human distinctive name
|
||||
shutil.move(project_file, os.path.join(self.path, self.name + ".gns3"))
|
||||
if os.path.exists(os.path.join(self.path, "images")):
|
||||
self._import_images()
|
||||
|
||||
def _import_images(self):
|
||||
"""
|
||||
Copy images to the images directory or delete them if they
|
||||
already exists.
|
||||
"""
|
||||
image_dir = self._config().get("images_path")
|
||||
|
||||
root = os.path.join(self.path, "images")
|
||||
for (dirpath, dirnames, filenames) in os.walk(root):
|
||||
for filename in filenames:
|
||||
path = os.path.join(dirpath, filename)
|
||||
dst = os.path.join(image_dir, os.path.relpath(path, root))
|
||||
os.makedirs(os.path.dirname(dst), exist_ok=True)
|
||||
shutil.move(path, dst)
|
||||
|
||||
# Cleanup the project
|
||||
shutil.rmtree(root)
|
||||
|
@ -79,8 +79,6 @@ class ProjectManager:
|
||||
|
||||
if project_id is not None and project_id in self._projects:
|
||||
return self._projects[project_id]
|
||||
# FIXME: should we have an error?
|
||||
#raise aiohttp.web.HTTPConflict(text="Project ID {} is already in use on this server".format(project_id))
|
||||
project = Project(name=name, project_id=project_id, path=path, temporary=temporary)
|
||||
self._projects[project.id] = project
|
||||
return project
|
||||
|
103
gns3server/modules/qemu/qcow2.py
Normal file
103
gns3server/modules/qemu/qcow2.py
Normal file
@ -0,0 +1,103 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import struct
|
||||
|
||||
|
||||
class Qcow2Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Qcow2:
|
||||
"""
|
||||
Allow to parse a Qcow2 file
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
|
||||
self.path = path
|
||||
self._reload()
|
||||
|
||||
def _reload(self):
|
||||
# Each QCOW2 file begins with a header, in big endian format, as follows:
|
||||
#
|
||||
# typedef struct QCowHeader {
|
||||
# uint32_t magic;
|
||||
# uint32_t version;
|
||||
#
|
||||
# uint64_t backing_file_offset;
|
||||
# uint32_t backing_file_size;
|
||||
#
|
||||
# uint32_t cluster_bits;
|
||||
# uint64_t size; /* in bytes */
|
||||
# uint32_t crypt_method;
|
||||
#
|
||||
# uint32_t l1_size;
|
||||
# uint64_t l1_table_offset;
|
||||
#
|
||||
# uint64_t refcount_table_offset;
|
||||
# uint32_t refcount_table_clusters;
|
||||
#
|
||||
# uint32_t nb_snapshots;
|
||||
# uint64_t snapshots_offset;
|
||||
# } QCowHeader;
|
||||
struct_format = ">IIQi"
|
||||
|
||||
with open(self.path, 'rb') as f:
|
||||
content = f.read(struct.calcsize(struct_format))
|
||||
|
||||
try:
|
||||
self.magic, self.version, self.backing_file_offset, self.backing_file_size = struct.unpack_from(struct_format, content)
|
||||
except struct.error:
|
||||
raise Qcow2Error("Invalid file header for {}".format(self.path))
|
||||
|
||||
if self.magic != 1363560955: # The first 4 bytes contain the characters 'Q', 'F', 'I' followed by 0xfb.
|
||||
raise Qcow2Error("Invalid magic for {}".format(self.path))
|
||||
|
||||
@property
|
||||
def backing_file(self):
|
||||
"""
|
||||
When using linked clone this will return the path to the base image
|
||||
|
||||
:returns: None if it's not a linked clone, the path otherwise
|
||||
"""
|
||||
with open(self.path, 'rb') as f:
|
||||
f.seek(self.backing_file_offset)
|
||||
content = f.read(self.backing_file_size)
|
||||
path = content.decode()
|
||||
if len(path) == 0:
|
||||
return None
|
||||
return path
|
||||
|
||||
@asyncio.coroutine
|
||||
def rebase(self, qemu_img, base_image):
|
||||
"""
|
||||
Rebase a linked clone in order to use the correct disk
|
||||
|
||||
:param qemu_img: Path to the qemu-img binary
|
||||
:param base_image: Path to the base image
|
||||
"""
|
||||
if not os.path.exists(base_image):
|
||||
raise FileNotFoundError(base_image)
|
||||
command = [qemu_img, "rebase", "-u", "-b", base_image, self.path]
|
||||
process = yield from asyncio.create_subprocess_exec(*command)
|
||||
retcode = yield from process.wait()
|
||||
if retcode != 0:
|
||||
raise Qcow2Error("Could not rebase the image")
|
||||
self._reload()
|
@ -30,7 +30,7 @@ import asyncio
|
||||
import socket
|
||||
import gns3server
|
||||
|
||||
from pkg_resources import parse_version
|
||||
from gns3server.utils import parse_version
|
||||
from .qemu_error import QemuError
|
||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
@ -40,6 +40,9 @@ from ..base_vm import BaseVM
|
||||
from ...schemas.qemu import QEMU_OBJECT_SCHEMA, QEMU_PLATFORMS
|
||||
from ...utils.asyncio import monitor_process
|
||||
from ...utils.images import md5sum
|
||||
from .qcow2 import Qcow2, Qcow2Error
|
||||
from ...utils import macaddress_to_int, int_to_macaddress
|
||||
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
@ -79,8 +82,8 @@ class QemuVM(BaseVM):
|
||||
try:
|
||||
self.qemu_path = qemu_path
|
||||
except QemuError as e:
|
||||
# If the binary is not found for topologies 1.4 and later
|
||||
# search via the platform otherwise use the binary name
|
||||
# If the binary is not found for topologies 1.4 and later
|
||||
# search via the platform otherwise use the binary name
|
||||
if platform:
|
||||
self.platform = platform
|
||||
else:
|
||||
@ -490,7 +493,8 @@ class QemuVM(BaseVM):
|
||||
"""
|
||||
|
||||
if not mac_address:
|
||||
self._mac_address = "00:00:ab:%s:%s:00" % (self.id[-4:-2], self.id[-2:])
|
||||
# use the node UUID to generate a random MAC address
|
||||
self._mac_address = "00:%s:%s:%s:%s:00" % (self.project.id[-4:-2], self.project.id[-2:], self.id[-4:-2], self.id[-2:])
|
||||
else:
|
||||
self._mac_address = mac_address
|
||||
|
||||
@ -988,22 +992,18 @@ class QemuVM(BaseVM):
|
||||
Closes this QEMU VM.
|
||||
"""
|
||||
|
||||
log.debug('QEMU VM "{name}" [{id}] is closing'.format(name=self._name, id=self._id))
|
||||
if not (yield from super().close()):
|
||||
return False
|
||||
|
||||
self.acpi_shutdown = False
|
||||
yield from self.stop()
|
||||
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
self._console = None
|
||||
|
||||
for adapter in self._ethernet_adapters:
|
||||
if adapter is not None:
|
||||
for nio in adapter.ports.values():
|
||||
if nio and isinstance(nio, NIOUDP):
|
||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||
|
||||
yield from self.stop()
|
||||
|
||||
@asyncio.coroutine
|
||||
def _get_vm_status(self):
|
||||
"""
|
||||
@ -1237,90 +1237,47 @@ class QemuVM(BaseVM):
|
||||
options = []
|
||||
qemu_img_path = self._get_qemu_img()
|
||||
|
||||
if self._hda_disk_image:
|
||||
if not os.path.isfile(self._hda_disk_image) or not os.path.exists(self._hda_disk_image):
|
||||
if os.path.islink(self._hda_disk_image):
|
||||
raise QemuError("hda disk image '{}' linked to '{}' is not accessible".format(self._hda_disk_image, os.path.realpath(self._hda_disk_image)))
|
||||
drives = ["a", "b", "c", "d"]
|
||||
|
||||
for disk_index, drive in enumerate(drives):
|
||||
disk_image = getattr(self, "_hd{}_disk_image".format(drive))
|
||||
interface = getattr(self, "hd{}_disk_interface".format(drive))
|
||||
|
||||
if not disk_image:
|
||||
continue
|
||||
|
||||
disk_name = "hd" + drive
|
||||
|
||||
if not os.path.isfile(disk_image) or not os.path.exists(disk_image):
|
||||
if os.path.islink(disk_image):
|
||||
raise QemuError("{} disk image '{}' linked to '{}' is not accessible".format(disk_name, disk_image, os.path.realpath(disk_image)))
|
||||
else:
|
||||
raise QemuError("hda disk image '{}' is not accessible".format(self._hda_disk_image))
|
||||
raise QemuError("{} disk image '{}' is not accessible".format(disk_name, disk_image))
|
||||
if self._linked_clone:
|
||||
hda_disk = os.path.join(self.working_dir, "hda_disk.qcow2")
|
||||
if not os.path.exists(hda_disk):
|
||||
disk = os.path.join(self.working_dir, "{}_disk.qcow2".format(disk_name))
|
||||
if not os.path.exists(disk):
|
||||
# create the disk
|
||||
try:
|
||||
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
|
||||
"backing_file={}".format(self._hda_disk_image),
|
||||
"-f", "qcow2", hda_disk)
|
||||
"backing_file={}".format(disk_image),
|
||||
"-f", "qcow2", disk)
|
||||
retcode = yield from process.wait()
|
||||
if retcode is not None and retcode != 0:
|
||||
raise QemuError("Could not create {} disk image".format(disk_name))
|
||||
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
raise QemuError("Could not create hda disk image {}".format(e))
|
||||
else:
|
||||
hda_disk = self._hda_disk_image
|
||||
options.extend(["-drive", 'file={},if={},index=0,media=disk'.format(hda_disk, self.hda_disk_interface)])
|
||||
|
||||
if self._hdb_disk_image:
|
||||
if not os.path.isfile(self._hdb_disk_image) or not os.path.exists(self._hdb_disk_image):
|
||||
if os.path.islink(self._hdb_disk_image):
|
||||
raise QemuError("hdb disk image '{}' linked to '{}' is not accessible".format(self._hdb_disk_image, os.path.realpath(self._hdb_disk_image)))
|
||||
raise QemuError("Could not create {} disk image {}".format(disk_name, e))
|
||||
else:
|
||||
raise QemuError("hdb disk image '{}' is not accessible".format(self._hdb_disk_image))
|
||||
if self._linked_clone:
|
||||
hdb_disk = os.path.join(self.working_dir, "hdb_disk.qcow2")
|
||||
if not os.path.exists(hdb_disk):
|
||||
# The disk exists we check if the clone work
|
||||
try:
|
||||
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
|
||||
"backing_file={}".format(self._hdb_disk_image),
|
||||
"-f", "qcow2", hdb_disk)
|
||||
retcode = yield from process.wait()
|
||||
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
raise QemuError("Could not create hdb disk image {}".format(e))
|
||||
else:
|
||||
hdb_disk = self._hdb_disk_image
|
||||
options.extend(["-drive", 'file={},if={},index=1,media=disk'.format(hdb_disk, self.hdb_disk_interface)])
|
||||
qcow2 = Qcow2(disk)
|
||||
yield from qcow2.rebase(qemu_img_path, disk_image)
|
||||
except (Qcow2Error, OSError) as e:
|
||||
raise QemuError("Could not use qcow2 disk image {} for {} {}".format(disk_image, disk_name, e))
|
||||
|
||||
if self._hdc_disk_image:
|
||||
if not os.path.isfile(self._hdc_disk_image) or not os.path.exists(self._hdc_disk_image):
|
||||
if os.path.islink(self._hdc_disk_image):
|
||||
raise QemuError("hdc disk image '{}' linked to '{}' is not accessible".format(self._hdc_disk_image, os.path.realpath(self._hdc_disk_image)))
|
||||
else:
|
||||
raise QemuError("hdc disk image '{}' is not accessible".format(self._hdc_disk_image))
|
||||
if self._linked_clone:
|
||||
hdc_disk = os.path.join(self.working_dir, "hdc_disk.qcow2")
|
||||
if not os.path.exists(hdc_disk):
|
||||
try:
|
||||
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
|
||||
"backing_file={}".format(self._hdc_disk_image),
|
||||
"-f", "qcow2", hdc_disk)
|
||||
retcode = yield from process.wait()
|
||||
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
raise QemuError("Could not create hdc disk image {}".format(e))
|
||||
else:
|
||||
hdc_disk = self._hdc_disk_image
|
||||
options.extend(["-drive", 'file={},if={},index=2,media=disk'.format(hdc_disk, self.hdc_disk_interface)])
|
||||
|
||||
if self._hdd_disk_image:
|
||||
if not os.path.isfile(self._hdd_disk_image) or not os.path.exists(self._hdd_disk_image):
|
||||
if os.path.islink(self._hdd_disk_image):
|
||||
raise QemuError("hdd disk image '{}' linked to '{}' is not accessible".format(self._hdd_disk_image, os.path.realpath(self._hdd_disk_image)))
|
||||
else:
|
||||
raise QemuError("hdd disk image '{}' is not accessible".format(self._hdd_disk_image))
|
||||
if self._linked_clone:
|
||||
hdd_disk = os.path.join(self.working_dir, "hdd_disk.qcow2")
|
||||
if not os.path.exists(hdd_disk):
|
||||
try:
|
||||
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
|
||||
"backing_file={}".format(self._hdd_disk_image),
|
||||
"-f", "qcow2", hdd_disk)
|
||||
retcode = yield from process.wait()
|
||||
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
raise QemuError("Could not create hdd disk image {}".format(e))
|
||||
else:
|
||||
hdd_disk = self._hdd_disk_image
|
||||
options.extend(["-drive", 'file={},if={},index=3,media=disk'.format(hdd_disk, self.hdd_disk_interface)])
|
||||
disk = disk_image
|
||||
options.extend(["-drive", 'file={},if={},index={},media=disk'.format(disk, interface, disk_index)])
|
||||
|
||||
return options
|
||||
|
||||
@ -1374,7 +1331,7 @@ class QemuVM(BaseVM):
|
||||
patched_qemu = True
|
||||
|
||||
for adapter_number, adapter in enumerate(self._ethernet_adapters):
|
||||
mac = "%s%02x" % (self._mac_address[:-2], (int(self._mac_address[-2:]) + adapter_number) % 255)
|
||||
mac = int_to_macaddress(macaddress_to_int(self._mac_address) + adapter_number)
|
||||
nio = adapter.get_nio(0)
|
||||
if self._legacy_networking:
|
||||
# legacy QEMU networking syntax (-net)
|
||||
@ -1467,6 +1424,11 @@ class QemuVM(BaseVM):
|
||||
command.extend(["-smp", "cpus={}".format(self._cpus)])
|
||||
if self._run_with_kvm(self.qemu_path, self._options):
|
||||
command.extend(["-enable-kvm"])
|
||||
version = yield from self.manager.get_qemu_version(self.qemu_path)
|
||||
# Issue on some combo Intel CPU + KVM + Qemu 2.4.0
|
||||
# https://github.com/GNS3/gns3-server/issues/685
|
||||
if version and parse_version(version) >= parse_version("2.4.0") and self.platform == "x86_64":
|
||||
command.extend(["-machine", "smm=off"])
|
||||
command.extend(["-boot", "order={}".format(self._boot_priority)])
|
||||
cdrom_option = self._cdrom_option()
|
||||
command.extend(cdrom_option)
|
||||
|
@ -66,7 +66,10 @@ class VirtualBox(BaseManager):
|
||||
elif sys.platform.startswith("darwin"):
|
||||
vboxmanage_path = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage"
|
||||
else:
|
||||
vboxmanage_path = shutil.which("vboxmanage")
|
||||
vboxmanage_path = "vboxmanage"
|
||||
|
||||
if not os.path.isabs(vboxmanage_path):
|
||||
vboxmanage_path = shutil.which(vboxmanage_path)
|
||||
|
||||
if not vboxmanage_path:
|
||||
raise VirtualBoxError("Could not find VBoxManage")
|
||||
@ -83,8 +86,8 @@ class VirtualBox(BaseManager):
|
||||
@asyncio.coroutine
|
||||
def execute(self, subcommand, args, timeout=60):
|
||||
|
||||
# We use a lock prevent parallel execution due to strange errors
|
||||
# reported by a user and reproduced by us.
|
||||
# We use a lock prevent parallel execution due to strange errors
|
||||
# reported by a user and reproduced by us.
|
||||
# https://github.com/GNS3/gns3-gui/issues/261
|
||||
with (yield from self._execute_lock):
|
||||
vboxmanage_path = self.vboxmanage_path
|
||||
|
@ -28,9 +28,9 @@ import json
|
||||
import socket
|
||||
import asyncio
|
||||
|
||||
from pkg_resources import parse_version
|
||||
from gns3server.utils import parse_version
|
||||
from gns3server.utils.telnet_server import TelnetServer
|
||||
from gns3server.utils.asyncio import wait_for_file_creation, wait_for_named_pipe_creation
|
||||
from gns3server.utils.asyncio import wait_for_file_creation, wait_for_named_pipe_creation, locked_coroutine
|
||||
from .virtualbox_error import VirtualBoxError
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
from ..nios.nio_nat import NIONAT
|
||||
@ -60,7 +60,6 @@ class VirtualBoxVM(BaseVM):
|
||||
self._system_properties = {}
|
||||
self._telnet_server_thread = None
|
||||
self._serial_pipe = None
|
||||
self._closed = False
|
||||
|
||||
# VirtualBox settings
|
||||
self._adapters = adapters
|
||||
@ -159,7 +158,7 @@ class VirtualBoxVM(BaseVM):
|
||||
if self.id and os.path.isdir(os.path.join(self.working_dir, self._vmname)):
|
||||
vbox_file = os.path.join(self.working_dir, self._vmname, self._vmname + ".vbox")
|
||||
yield from self.manager.execute("registervm", [vbox_file])
|
||||
yield from self._reattach_hdds()
|
||||
yield from self._reattach_linked_hdds()
|
||||
else:
|
||||
yield from self._create_linked_clone()
|
||||
|
||||
@ -231,7 +230,7 @@ class VirtualBoxVM(BaseVM):
|
||||
if (yield from self.check_hw_virtualization()):
|
||||
self._hw_virtualization = True
|
||||
|
||||
@asyncio.coroutine
|
||||
@locked_coroutine
|
||||
def stop(self):
|
||||
"""
|
||||
Stops this VirtualBox VM.
|
||||
@ -251,7 +250,7 @@ class VirtualBoxVM(BaseVM):
|
||||
log.debug("Stop result: {}".format(result))
|
||||
|
||||
log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
|
||||
# yield from asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
|
||||
yield from asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
|
||||
try:
|
||||
# deactivate the first serial port
|
||||
yield from self._modify_vm("--uart1 off")
|
||||
@ -314,7 +313,10 @@ class VirtualBoxVM(BaseVM):
|
||||
return hdds
|
||||
|
||||
@asyncio.coroutine
|
||||
def _reattach_hdds(self):
|
||||
def _reattach_linked_hdds(self):
|
||||
"""
|
||||
Reattach linked cloned hard disks.
|
||||
"""
|
||||
|
||||
hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json")
|
||||
try:
|
||||
@ -333,10 +335,67 @@ class VirtualBoxVM(BaseVM):
|
||||
device=hdd_info["device"],
|
||||
medium=hdd_file))
|
||||
|
||||
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium "{}"'.format(hdd_info["controller"],
|
||||
hdd_info["port"],
|
||||
hdd_info["device"],
|
||||
hdd_file))
|
||||
try:
|
||||
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium "{}"'.format(hdd_info["controller"],
|
||||
hdd_info["port"],
|
||||
hdd_info["device"],
|
||||
hdd_file))
|
||||
|
||||
except VirtualBoxError as e:
|
||||
log.warn("VirtualBox VM '{name}' [{id}] error reattaching HDD {controller} {port} {device} {medium}: {error}".format(name=self.name,
|
||||
id=self.id,
|
||||
controller=hdd_info["controller"],
|
||||
port=hdd_info["port"],
|
||||
device=hdd_info["device"],
|
||||
medium=hdd_file,
|
||||
error=e))
|
||||
continue
|
||||
|
||||
@asyncio.coroutine
|
||||
def save_linked_hdds_info(self):
|
||||
"""
|
||||
Save linked cloned hard disks information.
|
||||
|
||||
:returns: disk table information
|
||||
"""
|
||||
|
||||
hdd_table = []
|
||||
if self._linked_clone:
|
||||
if os.path.exists(self.working_dir):
|
||||
hdd_files = yield from self._get_all_hdd_files()
|
||||
vm_info = yield from self._get_vm_info()
|
||||
for entry, value in vm_info.items():
|
||||
match = re.search("^([\s\w]+)\-(\d)\-(\d)$", entry) # match Controller-PortNumber-DeviceNumber entry
|
||||
if match:
|
||||
controller = match.group(1)
|
||||
port = match.group(2)
|
||||
device = match.group(3)
|
||||
if value in hdd_files and os.path.exists(os.path.join(self.working_dir, self._vmname, "Snapshots", os.path.basename(value))):
|
||||
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
|
||||
id=self.id,
|
||||
controller=controller,
|
||||
port=port,
|
||||
device=device))
|
||||
hdd_table.append(
|
||||
{
|
||||
"hdd": os.path.basename(value),
|
||||
"controller": controller,
|
||||
"port": port,
|
||||
"device": device,
|
||||
}
|
||||
)
|
||||
|
||||
if hdd_table:
|
||||
try:
|
||||
hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json")
|
||||
with open(hdd_info_file, "w", encoding="utf-8") as f:
|
||||
json.dump(hdd_table, f, indent=4)
|
||||
except OSError as e:
|
||||
log.warning("VirtualBox VM '{name}' [{id}] could not write HHD info file: {error}".format(name=self.name,
|
||||
id=self.id,
|
||||
error=e.strerror))
|
||||
|
||||
return hdd_table
|
||||
|
||||
@asyncio.coroutine
|
||||
def close(self):
|
||||
@ -348,6 +407,9 @@ class VirtualBoxVM(BaseVM):
|
||||
# VM is already closed
|
||||
return
|
||||
|
||||
if not (yield from super().close()):
|
||||
return False
|
||||
|
||||
log.debug("VirtualBox VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
@ -363,47 +425,29 @@ class VirtualBoxVM(BaseVM):
|
||||
yield from self.stop()
|
||||
|
||||
if self._linked_clone:
|
||||
hdd_table = []
|
||||
if os.path.exists(self.working_dir):
|
||||
hdd_files = yield from self._get_all_hdd_files()
|
||||
vm_info = yield from self._get_vm_info()
|
||||
for entry, value in vm_info.items():
|
||||
match = re.search("^([\s\w]+)\-(\d)\-(\d)$", entry)
|
||||
if match:
|
||||
controller = match.group(1)
|
||||
port = match.group(2)
|
||||
device = match.group(3)
|
||||
if value in hdd_files:
|
||||
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
|
||||
id=self.id,
|
||||
controller=controller,
|
||||
port=port,
|
||||
device=device))
|
||||
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(controller,
|
||||
port,
|
||||
device))
|
||||
hdd_table.append(
|
||||
{
|
||||
"hdd": os.path.basename(value),
|
||||
"controller": controller,
|
||||
"port": port,
|
||||
"device": device,
|
||||
}
|
||||
)
|
||||
hdd_table = yield from self.save_linked_hdds_info()
|
||||
for hdd in hdd_table.copy():
|
||||
log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name,
|
||||
id=self.id,
|
||||
controller=hdd["controller"],
|
||||
port=hdd["port"],
|
||||
device=hdd["device"]))
|
||||
try:
|
||||
yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(hdd["controller"],
|
||||
hdd["port"],
|
||||
hdd["device"]))
|
||||
except VirtualBoxError as e:
|
||||
log.warn("VirtualBox VM '{name}' [{id}] error detaching HDD {controller} {port} {device}: {error}".format(name=self.name,
|
||||
id=self.id,
|
||||
controller=hdd["controller"],
|
||||
port=hdd["port"],
|
||||
device=hdd["device"],
|
||||
error=e))
|
||||
continue
|
||||
|
||||
log.info("VirtualBox VM '{name}' [{id}] unregistering".format(name=self.name, id=self.id))
|
||||
yield from self.manager.execute("unregistervm", [self._name])
|
||||
|
||||
if hdd_table:
|
||||
try:
|
||||
hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json")
|
||||
with open(hdd_info_file, "w", encoding="utf-8") as f:
|
||||
json.dump(hdd_table, f, indent=4)
|
||||
except OSError as e:
|
||||
log.warning("VirtualBox VM '{name}' [{id}] could not write HHD info file: {error}".format(name=self.name,
|
||||
id=self.id,
|
||||
error=e.strerror))
|
||||
|
||||
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
|
||||
self._closed = True
|
||||
|
||||
|
@ -31,14 +31,14 @@ import codecs
|
||||
from collections import OrderedDict
|
||||
from gns3server.utils.interfaces import interfaces
|
||||
from gns3server.utils.asyncio import subprocess_check_output
|
||||
from pkg_resources import parse_version
|
||||
from gns3server.utils import parse_version
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
from ..base_manager import BaseManager
|
||||
from .vmware_vm import VMwareVM
|
||||
from .vmware_error import VMwareError
|
||||
from .nio_vmnet import NIOVMNET
|
||||
from gns3server.modules.base_manager import BaseManager
|
||||
from gns3server.modules.vmware.vmware_vm import VMwareVM
|
||||
from gns3server.modules.vmware.vmware_error import VMwareError
|
||||
from gns3server.modules.vmware.nio_vmnet import NIOVMNET
|
||||
|
||||
|
||||
class VMware(BaseManager):
|
||||
@ -105,15 +105,18 @@ class VMware(BaseManager):
|
||||
elif sys.platform.startswith("darwin"):
|
||||
vmrun_path = "/Applications/VMware Fusion.app/Contents/Library/vmrun"
|
||||
else:
|
||||
vmrun_path = shutil.which("vmrun")
|
||||
vmrun_path = "vmrun"
|
||||
|
||||
if not os.path.isabs(vmrun_path):
|
||||
vmrun_path = shutil.which(vmrun_path)
|
||||
|
||||
if not vmrun_path:
|
||||
raise VMwareError("Could not find vmrun")
|
||||
raise VMwareError("Could not find VMware vmrun, please make sure it is installed")
|
||||
if not os.path.isfile(vmrun_path):
|
||||
raise VMwareError("vmrun {} is not accessible".format(vmrun_path))
|
||||
if not os.access(vmrun_path, os.X_OK):
|
||||
raise VMwareError("vmrun is not executable")
|
||||
if os.path.basename(vmrun_path) not in ["vmrun", "vmrun.exe"]:
|
||||
if os.path.basename(vmrun_path).lower() not in ["vmrun", "vmrun.exe"]:
|
||||
raise VMwareError("Invalid vmrun executable name {}".format(os.path.basename(vmrun_path)))
|
||||
|
||||
self._vmrun_path = vmrun_path
|
||||
@ -137,6 +140,50 @@ class VMware(BaseManager):
|
||||
version = match.group(1)
|
||||
return version
|
||||
|
||||
@asyncio.coroutine
|
||||
def _check_vmware_player_requirements(self, player_version):
|
||||
"""
|
||||
Check minimum requirements to use VMware Player.
|
||||
|
||||
VIX 1.13 was the release for Player 6.
|
||||
VIX 1.14 was the release for Player 7.
|
||||
VIX 1.15 was the release for Workstation Player 12.
|
||||
|
||||
:param player_version: VMware Player major version.
|
||||
"""
|
||||
|
||||
player_version = int(player_version)
|
||||
if player_version < 6:
|
||||
raise VMwareError("Using VMware Player requires version 6 or above")
|
||||
elif player_version == 6:
|
||||
yield from self.check_vmrun_version(minimum_required_version="1.13.0")
|
||||
elif player_version == 7:
|
||||
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
|
||||
elif player_version >= 12:
|
||||
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
|
||||
|
||||
@asyncio.coroutine
|
||||
def _check_vmware_workstation_requirements(self, ws_version):
|
||||
"""
|
||||
Check minimum requirements to use VMware Workstation.
|
||||
|
||||
VIX 1.13 was the release for Workstation 10.
|
||||
VIX 1.14 was the release for Workstation 11.
|
||||
VIX 1.15 was the release for Workstation Pro 12.
|
||||
|
||||
:param ws_version: VMware Workstation major version.
|
||||
"""
|
||||
|
||||
ws_version = int(ws_version)
|
||||
if ws_version < 10:
|
||||
raise VMwareError("Using VMware Workstation requires version 10 or above")
|
||||
elif ws_version == 10:
|
||||
yield from self.check_vmrun_version(minimum_required_version="1.13.0")
|
||||
elif ws_version == 11:
|
||||
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
|
||||
elif ws_version >= 12:
|
||||
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
|
||||
|
||||
@asyncio.coroutine
|
||||
def check_vmware_version(self):
|
||||
"""
|
||||
@ -150,18 +197,17 @@ class VMware(BaseManager):
|
||||
player_version = self._find_vmware_version_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Player")
|
||||
if player_version:
|
||||
log.debug("VMware Player version {} detected".format(player_version))
|
||||
if int(player_version) < 6:
|
||||
raise VMwareError("Using VMware Player requires version 6 or above")
|
||||
yield from self._check_vmware_player_requirements(player_version)
|
||||
else:
|
||||
log.warning("Could not find VMware version")
|
||||
else:
|
||||
log.debug("VMware Workstation version {} detected".format(ws_version))
|
||||
if int(ws_version) < 10:
|
||||
raise VMwareError("Using VMware Workstation requires version 10 or above")
|
||||
return
|
||||
yield from self._check_vmware_workstation_requirements(ws_version)
|
||||
else:
|
||||
if sys.platform.startswith("darwin"):
|
||||
return # FIXME: no version checking on Mac OS X
|
||||
if not os.path.isdir("/Applications/VMware Fusion.app"):
|
||||
raise VMwareError("VMware Fusion is not installed in the standard location /Applications/VMware Fusion.app")
|
||||
return # FIXME: no version checking on Mac OS X but we support all versions of fusion
|
||||
|
||||
vmware_path = VMware._get_linux_vmware_binary()
|
||||
if vmware_path is None:
|
||||
@ -172,16 +218,16 @@ class VMware(BaseManager):
|
||||
match = re.search("VMware Workstation ([0-9]+)\.", output)
|
||||
version = None
|
||||
if match:
|
||||
# VMware Workstation has been detected
|
||||
version = match.group(1)
|
||||
log.debug("VMware Workstation version {} detected".format(version))
|
||||
if int(version) < 10:
|
||||
raise VMwareError("Using VMware Workstation requires version 10 or above")
|
||||
yield from self._check_vmware_workstation_requirements(version)
|
||||
match = re.search("VMware Player ([0-9]+)\.", output)
|
||||
if match:
|
||||
# VMware Player has been detected
|
||||
version = match.group(1)
|
||||
log.debug("VMware Player version {} detected".format(version))
|
||||
if int(version) < 6:
|
||||
raise VMwareError("Using VMware Player requires version 6 or above")
|
||||
yield from self._check_vmware_player_requirements(version)
|
||||
if version is None:
|
||||
log.warning("Could not find VMware version. Output of VMware: {}".format(output))
|
||||
raise VMwareError("Could not find VMware version. Output of VMware: {}".format(output))
|
||||
@ -350,7 +396,17 @@ class VMware(BaseManager):
|
||||
return stdout_data.decode("utf-8", errors="ignore").splitlines()
|
||||
|
||||
@asyncio.coroutine
|
||||
def check_vmrun_version(self):
|
||||
def check_vmrun_version(self, minimum_required_version="1.13.0"):
|
||||
"""
|
||||
Checks the vmrun version.
|
||||
|
||||
VMware VIX library version must be at least >= 1.13 by default
|
||||
VIX 1.13 was the release for VMware Fusion 6, Workstation 10, and Player 6.
|
||||
VIX 1.14 was the release for VMware Fusion 7, Workstation 11 and Player 7.
|
||||
VIX 1.15 was the release for VMware Fusion 8, Workstation Pro 12 and Workstation Player 12.
|
||||
|
||||
:param required_version: required vmrun version number
|
||||
"""
|
||||
|
||||
with (yield from self._execute_lock):
|
||||
vmrun_path = self.vmrun_path
|
||||
@ -363,10 +419,9 @@ class VMware(BaseManager):
|
||||
version = None
|
||||
if match:
|
||||
version = match.group(1)
|
||||
log.debug("VMware vmrun version {} detected".format(version))
|
||||
if parse_version(version) < parse_version("1.13"):
|
||||
# VMware VIX library version must be at least >= 1.13
|
||||
raise VMwareError("VMware vmrun executable version must be >= version 1.13")
|
||||
log.debug("VMware vmrun version {} detected, minimum required: {}".format(version, minimum_required_version))
|
||||
if parse_version(version) < parse_version(minimum_required_version):
|
||||
raise VMwareError("VMware vmrun executable version must be >= version {}".format(minimum_required_version))
|
||||
if version is None:
|
||||
log.warning("Could not find VMware vmrun version. Output: {}".format(output))
|
||||
raise VMwareError("Could not find VMware vmrun version. Output: {}".format(output))
|
||||
@ -592,8 +647,11 @@ class VMware(BaseManager):
|
||||
"""
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
from win32com.shell import shell, shellcon
|
||||
documents_folder = shell.SHGetSpecialFolderPath(None, shellcon.CSIDL_PERSONAL)
|
||||
import ctypes
|
||||
import ctypes.wintypes
|
||||
path = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
|
||||
ctypes.windll.shell32.SHGetFolderPathW(None, 5, None, 0, path)
|
||||
documents_folder = path.value
|
||||
windows_type = sys.getwindowsversion().product_type
|
||||
if windows_type == 2 or windows_type == 3:
|
||||
return '{}\My Virtual Machines'.format(documents_folder)
|
||||
@ -614,26 +672,39 @@ class VMware(BaseManager):
|
||||
yield from self.check_vmware_version()
|
||||
|
||||
inventory_path = self.get_vmware_inventory_path()
|
||||
if os.path.exists(inventory_path):
|
||||
# FIXME: inventory may exist if VMware workstation has not been fully uninstalled, therefore VMware player VMs are not searched
|
||||
if os.path.exists(inventory_path) and self.host_type != "player":
|
||||
# inventory may exist for VMware player if VMware workstation has been previously installed
|
||||
return self._get_vms_from_inventory(inventory_path)
|
||||
else:
|
||||
# VMware player has no inventory file, let's search the default location for VMs.
|
||||
# VMware player has no inventory file, let's search the default location for VMs
|
||||
vmware_preferences_path = self.get_vmware_preferences_path()
|
||||
default_vm_path = self.get_vmware_default_vm_path()
|
||||
|
||||
pairs = {}
|
||||
if os.path.exists(vmware_preferences_path):
|
||||
# the default vm path may be present in VMware preferences file.
|
||||
try:
|
||||
pairs = self.parse_vmware_file(vmware_preferences_path)
|
||||
if "prefvmx.defaultvmpath" in pairs:
|
||||
default_vm_path = pairs["prefvmx.defaultvmpath"]
|
||||
except OSError as e:
|
||||
log.warning('Could not read VMware preferences file "{}": {}'.format(vmware_preferences_path, e))
|
||||
|
||||
if "prefvmx.defaultvmpath" in pairs:
|
||||
default_vm_path = pairs["prefvmx.defaultvmpath"]
|
||||
if not os.path.isdir(default_vm_path):
|
||||
raise VMwareError('Could not find the default VM directory: "{}"'.format(default_vm_path))
|
||||
return self._get_vms_from_directory(default_vm_path)
|
||||
vms = self._get_vms_from_directory(default_vm_path)
|
||||
|
||||
# looks for VMX paths in the preferences file in case not all VMs are in the default directory
|
||||
for key, value in pairs.items():
|
||||
m = re.match(r'pref.mruVM(\d+)\.filename', key)
|
||||
if m:
|
||||
display_name = "pref.mruVM{}.displayName".format(m.group(1))
|
||||
if display_name in pairs:
|
||||
found = False
|
||||
for vm in vms:
|
||||
if vm["vmname"] == display_name:
|
||||
found = True
|
||||
if found is False:
|
||||
vms.append({"vmname": pairs[display_name], "vmx_path": value})
|
||||
return vms
|
||||
|
||||
@staticmethod
|
||||
def _get_linux_vmware_binary():
|
||||
@ -644,3 +715,10 @@ class VMware(BaseManager):
|
||||
if path is None:
|
||||
path = shutil.which("vmplayer")
|
||||
return path
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
loop = asyncio.get_event_loop()
|
||||
vmware = VMware.instance()
|
||||
print("=> Check version")
|
||||
loop.run_until_complete(asyncio.async(vmware.check_vmware_version()))
|
||||
|
@ -26,12 +26,13 @@ import asyncio
|
||||
import tempfile
|
||||
|
||||
from gns3server.utils.telnet_server import TelnetServer
|
||||
from gns3server.utils.interfaces import get_windows_interfaces
|
||||
from gns3server.utils.interfaces import interfaces
|
||||
from gns3server.utils.asyncio import wait_for_file_creation, wait_for_named_pipe_creation
|
||||
from collections import OrderedDict
|
||||
from .vmware_error import VMwareError
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
from ..nios.nio_nat import NIONAT
|
||||
from ..nios.nio_tap import NIOTAP
|
||||
from .nio_vmnet import NIOVMNET
|
||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..base_vm import BaseVM
|
||||
@ -144,6 +145,8 @@ class VMwareVM(BaseVM):
|
||||
|
||||
yield from self.manager.check_vmrun_version()
|
||||
if self._linked_clone and not os.path.exists(os.path.join(self.working_dir, os.path.basename(self._vmx_path))):
|
||||
if self.manager.host_type == "player":
|
||||
raise VMwareError("Linked clones are not supported by VMware Player")
|
||||
# create the base snapshot for linked clones
|
||||
base_snapshot_name = "GNS3 Linked Base for clones"
|
||||
vmsd_path = os.path.splitext(self._vmx_path)[0] + ".vmsd"
|
||||
@ -228,41 +231,28 @@ class VMwareVM(BaseVM):
|
||||
if self._get_vmx_setting(connected):
|
||||
del self._vmx_pairs[connected]
|
||||
|
||||
# check for adapter type
|
||||
if self._adapter_type != "default":
|
||||
adapter_type = "ethernet{}.virtualdev".format(adapter_number)
|
||||
if adapter_type in self._vmx_pairs and self._vmx_pairs[adapter_type] != self._adapter_type:
|
||||
raise VMwareError("Existing VMware network adapter {} is not of type {}, please fix or set adapter type to default in GNS3".format(adapter_number,
|
||||
self._adapter_type))
|
||||
|
||||
# # check if any vmnet interface managed by GNS3 is being used on existing VMware adapters
|
||||
# if self._get_vmx_setting("ethernet{}.present".format(adapter_number), "TRUE"):
|
||||
# connection_type = "ethernet{}.connectiontype".format(adapter_number)
|
||||
# if connection_type in self._vmx_pairs and self._vmx_pairs[connection_type] in ("hostonly", "custom"):
|
||||
# vnet = "ethernet{}.vnet".format(adapter_number)
|
||||
# if vnet in self._vmx_pairs:
|
||||
# vmnet = os.path.basename(self._vmx_pairs[vnet])
|
||||
# #nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
||||
# if self.manager.is_managed_vmnet(vmnet):
|
||||
# raise VMwareError("Network adapter {} is already associated with VMnet interface {} which is managed by GNS3, please remove".format(adapter_number, vmnet))
|
||||
|
||||
# then configure VMware network adapters
|
||||
self.manager.refresh_vmnet_list(ubridge=self._use_ubridge)
|
||||
for adapter_number in range(0, self._adapters):
|
||||
|
||||
# add/update the interface
|
||||
if self._adapter_type == "default":
|
||||
# force default to e1000 because some guest OS don't detect the adapter (i.e. Windows 2012 server)
|
||||
# when 'virtualdev' is not set in the VMX file.
|
||||
adapter_type = "e1000"
|
||||
else:
|
||||
adapter_type = self._adapter_type
|
||||
ethernet_adapter = {"ethernet{}.present".format(adapter_number): "TRUE",
|
||||
"ethernet{}.addresstype".format(adapter_number): "generated",
|
||||
"ethernet{}.generatedaddressoffset".format(adapter_number): "0"}
|
||||
"ethernet{}.generatedaddressoffset".format(adapter_number): "0",
|
||||
"ethernet{}.virtualdev".format(adapter_number): adapter_type}
|
||||
self._vmx_pairs.update(ethernet_adapter)
|
||||
if self._adapter_type != "default":
|
||||
self._vmx_pairs["ethernet{}.virtualdev".format(adapter_number)] = self._adapter_type
|
||||
|
||||
connection_type = "ethernet{}.connectiontype".format(adapter_number)
|
||||
if not self._use_any_adapter and connection_type in self._vmx_pairs and self._vmx_pairs[connection_type] in ("nat", "bridged", "hostonly"):
|
||||
continue
|
||||
self._vmx_pairs["ethernet{}.connectiontype".format(adapter_number)] = "custom"
|
||||
|
||||
self._vmx_pairs["ethernet{}.connectiontype".format(adapter_number)] = "custom"
|
||||
if self._use_ubridge:
|
||||
# make sure we have a vmnet per adapter if we use uBridge
|
||||
allocate_vmnet = False
|
||||
@ -271,7 +261,7 @@ class VMwareVM(BaseVM):
|
||||
vnet = "ethernet{}.vnet".format(adapter_number)
|
||||
if vnet in self._vmx_pairs:
|
||||
vmnet = os.path.basename(self._vmx_pairs[vnet])
|
||||
if self.manager.is_managed_vmnet(vmnet) or vmnet == "vmnet0":
|
||||
if self.manager.is_managed_vmnet(vmnet) or vmnet in ("vmnet0", "vmnet1", "vmnet8"):
|
||||
# vmnet already managed, try to allocate a new one
|
||||
allocate_vmnet = True
|
||||
else:
|
||||
@ -311,6 +301,7 @@ class VMwareVM(BaseVM):
|
||||
:param adapter_number: adapter number
|
||||
"""
|
||||
|
||||
block_host_traffic = self.manager.config.get_section_config("VMware").getboolean("block_host_traffic", False)
|
||||
vnet = "ethernet{}.vnet".format(adapter_number)
|
||||
if vnet not in self._vmx_pairs:
|
||||
raise VMwareError("vnet {} not in VMX file".format(vnet))
|
||||
@ -320,18 +311,29 @@ class VMwareVM(BaseVM):
|
||||
yield from self._ubridge_hypervisor.send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=vnet,
|
||||
interface=vmnet_interface))
|
||||
elif sys.platform.startswith("win"):
|
||||
windows_interfaces = get_windows_interfaces()
|
||||
windows_interfaces = interfaces()
|
||||
npf = None
|
||||
source_mac = None
|
||||
for interface in windows_interfaces:
|
||||
if "netcard" in interface and vmnet_interface in interface["netcard"]:
|
||||
npf = interface["id"]
|
||||
source_mac = interface["mac_address"]
|
||||
elif vmnet_interface in interface["name"]:
|
||||
npf = interface["id"]
|
||||
source_mac = interface["mac_address"]
|
||||
if npf:
|
||||
yield from self._ubridge_hypervisor.send('bridge add_nio_ethernet {name} "{interface}"'.format(name=vnet,
|
||||
interface=npf))
|
||||
else:
|
||||
raise VMwareError("Could not find NPF id for VMnet interface {}".format(vmnet_interface))
|
||||
|
||||
if block_host_traffic:
|
||||
if source_mac:
|
||||
yield from self._ubridge_hypervisor.send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=vnet,
|
||||
mac=source_mac))
|
||||
else:
|
||||
log.warn("Could not block host network traffic on {} (no MAC address found)".format(vmnet_interface))
|
||||
|
||||
elif sys.platform.startswith("darwin"):
|
||||
yield from self._ubridge_hypervisor.send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=vnet,
|
||||
interface=vmnet_interface))
|
||||
@ -344,6 +346,8 @@ class VMwareVM(BaseVM):
|
||||
lport=nio.lport,
|
||||
rhost=nio.rhost,
|
||||
rport=nio.rport))
|
||||
elif isinstance(nio, NIOTAP):
|
||||
yield from self._ubridge_hypervisor.send('bridge add_nio_tap {name} {tap}'.format(name=vnet, tap=nio.tap_device))
|
||||
|
||||
if nio.capturing:
|
||||
yield from self._ubridge_hypervisor.send('bridge start_capture {name} "{pcap_file}"'.format(name=vnet,
|
||||
@ -351,6 +355,14 @@ class VMwareVM(BaseVM):
|
||||
|
||||
yield from self._ubridge_hypervisor.send('bridge start {name}'.format(name=vnet))
|
||||
|
||||
# TODO: this only work when using PCAP (NIO Ethernet): current default on Linux is NIO RAW LINUX
|
||||
# source_mac = None
|
||||
# for interface in interfaces():
|
||||
# if interface["name"] == vmnet_interface:
|
||||
# source_mac = interface["mac_address"]
|
||||
# if source_mac:
|
||||
# yield from self._ubridge_hypervisor.send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=vnet, mac=source_mac))
|
||||
|
||||
@asyncio.coroutine
|
||||
def _delete_ubridge_connection(self, adapter_number):
|
||||
"""
|
||||
@ -437,21 +449,25 @@ class VMwareVM(BaseVM):
|
||||
else:
|
||||
yield from self._control_vm("start")
|
||||
|
||||
if self._use_ubridge and self._ubridge_hypervisor:
|
||||
for adapter_number in range(0, self._adapters):
|
||||
nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
||||
if nio:
|
||||
yield from self._add_ubridge_connection(nio, adapter_number)
|
||||
try:
|
||||
if self._use_ubridge and self._ubridge_hypervisor:
|
||||
for adapter_number in range(0, self._adapters):
|
||||
nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
||||
if nio:
|
||||
yield from self._add_ubridge_connection(nio, adapter_number)
|
||||
|
||||
if self._enable_remote_console and self._console is not None:
|
||||
try:
|
||||
if sys.platform.startswith("win"):
|
||||
yield from wait_for_named_pipe_creation(self._get_pipe_name())
|
||||
else:
|
||||
yield from wait_for_file_creation(self._get_pipe_name()) # wait for VMware to create the pipe file.
|
||||
except asyncio.TimeoutError:
|
||||
raise VMwareError('Pipe file "{}" for remote console has not been created by VMware'.format(self._get_pipe_name()))
|
||||
self._start_remote_console()
|
||||
if self._enable_remote_console and self._console is not None:
|
||||
try:
|
||||
if sys.platform.startswith("win"):
|
||||
yield from wait_for_named_pipe_creation(self._get_pipe_name())
|
||||
else:
|
||||
yield from wait_for_file_creation(self._get_pipe_name()) # wait for VMware to create the pipe file.
|
||||
except asyncio.TimeoutError:
|
||||
raise VMwareError('Pipe file "{}" for remote console has not been created by VMware'.format(self._get_pipe_name()))
|
||||
self._start_remote_console()
|
||||
except VMwareError:
|
||||
yield from self.stop()
|
||||
raise
|
||||
|
||||
if self._get_vmx_setting("vhv.enable", "TRUE"):
|
||||
self._hw_virtualization = True
|
||||
@ -471,11 +487,12 @@ class VMwareVM(BaseVM):
|
||||
yield from self._ubridge_hypervisor.stop()
|
||||
|
||||
try:
|
||||
if self.acpi_shutdown:
|
||||
# use ACPI to shutdown the VM
|
||||
yield from self._control_vm("stop", "soft")
|
||||
else:
|
||||
yield from self._control_vm("stop")
|
||||
if (yield from self.is_running()):
|
||||
if self.acpi_shutdown:
|
||||
# use ACPI to shutdown the VM
|
||||
yield from self._control_vm("stop", "soft")
|
||||
else:
|
||||
yield from self._control_vm("stop")
|
||||
finally:
|
||||
self._started = False
|
||||
|
||||
@ -484,17 +501,15 @@ class VMwareVM(BaseVM):
|
||||
self._vmnets.clear()
|
||||
# remove the adapters managed by GNS3
|
||||
for adapter_number in range(0, self._adapters):
|
||||
if self._get_vmx_setting("ethernet{}.vnet".format(adapter_number)) or \
|
||||
self._get_vmx_setting("ethernet{}.connectiontype".format(adapter_number)) is None:
|
||||
vnet = "ethernet{}.vnet".format(adapter_number)
|
||||
vnet = "ethernet{}.vnet".format(adapter_number)
|
||||
if self._get_vmx_setting(vnet) or self._get_vmx_setting("ethernet{}.connectiontype".format(adapter_number)) is None:
|
||||
if vnet in self._vmx_pairs:
|
||||
vmnet = os.path.basename(self._vmx_pairs[vnet])
|
||||
if not self.manager.is_managed_vmnet(vmnet):
|
||||
continue
|
||||
log.debug("removing adapter {}".format(adapter_number))
|
||||
for key in list(self._vmx_pairs.keys()):
|
||||
if key.startswith("ethernet{}.".format(adapter_number)):
|
||||
del self._vmx_pairs[key]
|
||||
self._vmx_pairs[vnet] = "vmnet1"
|
||||
self._vmx_pairs["ethernet{}.connectiontype".format(adapter_number)] = "custom"
|
||||
|
||||
# re-enable any remaining network adapters
|
||||
for adapter_number in range(self._adapters, self._maximum_adapters):
|
||||
@ -542,14 +557,8 @@ class VMwareVM(BaseVM):
|
||||
Closes this VMware VM.
|
||||
"""
|
||||
|
||||
if self._closed:
|
||||
# VM is already closed
|
||||
return
|
||||
|
||||
log.debug("VMware VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
self._console = None
|
||||
if not (yield from super().close()):
|
||||
return False
|
||||
|
||||
for adapter in self._ethernet_adapters.values():
|
||||
if adapter is not None:
|
||||
@ -567,9 +576,6 @@ class VMwareVM(BaseVM):
|
||||
if self._linked_clone:
|
||||
yield from self.manager.remove_from_vmware_inventory(self._vmx_path)
|
||||
|
||||
log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
|
||||
self._closed = True
|
||||
|
||||
@property
|
||||
def headless(self):
|
||||
"""
|
||||
@ -976,8 +982,8 @@ class VMwareVM(BaseVM):
|
||||
try:
|
||||
adapter = self._ethernet_adapters[adapter_number]
|
||||
except KeyError:
|
||||
raise VMwareError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
raise VMwareError("Adapter {adapter_number} doesn't exist on VMware VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
nio = adapter.get_nio(0)
|
||||
|
||||
|
@ -22,6 +22,7 @@ order to run a VPCS VM.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import subprocess
|
||||
import signal
|
||||
import re
|
||||
@ -31,7 +32,7 @@ import shutil
|
||||
from ...utils.asyncio import wait_for_process_termination
|
||||
from ...utils.asyncio import monitor_process
|
||||
from ...utils.asyncio import subprocess_check_output
|
||||
from pkg_resources import parse_version
|
||||
from gns3server.utils import parse_version
|
||||
from .vpcs_error import VPCSError
|
||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
@ -75,10 +76,8 @@ class VPCSVM(BaseVM):
|
||||
Closes this VPCS VM.
|
||||
"""
|
||||
|
||||
log.debug('VPCS "{name}" [{id}] is closing'.format(name=self._name, id=self._id))
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
self._console = None
|
||||
if not (yield from super().close()):
|
||||
return False
|
||||
|
||||
nio = self._ethernet_adapter.get_nio(0)
|
||||
if isinstance(nio, NIOUDP):
|
||||
@ -87,6 +86,8 @@ class VPCSVM(BaseVM):
|
||||
if self.is_running():
|
||||
self._terminate_process()
|
||||
|
||||
return True
|
||||
|
||||
@asyncio.coroutine
|
||||
def _check_requirements(self):
|
||||
"""
|
||||
@ -139,9 +140,11 @@ class VPCSVM(BaseVM):
|
||||
:returns: path to VPCS
|
||||
"""
|
||||
|
||||
path = self._manager.config.get_section_config("VPCS").get("vpcs_path", "vpcs")
|
||||
if path == "vpcs":
|
||||
path = shutil.which("vpcs")
|
||||
search_path = self._manager.config.get_section_config("VPCS").get("vpcs_path", "vpcs")
|
||||
path = shutil.which(search_path)
|
||||
# shutil.which return None if the path doesn't exists
|
||||
if not path:
|
||||
return search_path
|
||||
return path
|
||||
|
||||
@BaseVM.name.setter
|
||||
@ -419,8 +422,10 @@ class VPCSVM(BaseVM):
|
||||
command.extend(["-m", str(self._manager.get_mac_id(self.id))]) # the unique ID is used to set the MAC address offset
|
||||
command.extend(["-i", "1"]) # option to start only one VPC instance
|
||||
command.extend(["-F"]) # option to avoid the daemonization of VPCS
|
||||
if self._vpcs_version > parse_version("0.8"):
|
||||
command.extend(["-R"]) # disable relay feature of VPCS (starting with VPCS 0.8)
|
||||
if self._vpcs_version >= parse_version("0.8b"):
|
||||
command.extend(["-R"]) # disable the relay feature of VPCS (starting with VPCS 0.8)
|
||||
else:
|
||||
log.warn("The VPCS relay feature could not be disabled because the VPCS version is below 0.8b")
|
||||
|
||||
nio = self._ethernet_adapter.get_nio(0)
|
||||
if nio:
|
||||
@ -428,7 +433,10 @@ class VPCSVM(BaseVM):
|
||||
# UDP tunnel
|
||||
command.extend(["-s", str(nio.lport)]) # source UDP port
|
||||
command.extend(["-c", str(nio.rport)]) # destination UDP port
|
||||
command.extend(["-t", nio.rhost]) # destination host
|
||||
try:
|
||||
command.extend(["-t", socket.gethostbyname(nio.rhost)]) # destination host, we need to resolve the hostname because VPCS doesn't support it
|
||||
except socket.gaierror as e:
|
||||
raise VPCSError("Can't resolve hostname {}".format(nio.rhost))
|
||||
|
||||
elif isinstance(nio, NIOTAP):
|
||||
# TAP interface
|
||||
|
@ -26,6 +26,7 @@ import datetime
|
||||
import sys
|
||||
import locale
|
||||
import argparse
|
||||
import psutil
|
||||
import asyncio
|
||||
|
||||
from gns3server.server import Server
|
||||
@ -90,6 +91,7 @@ def parse_arguments(argv):
|
||||
parser.add_argument("--host", help="run on the given host/IP address")
|
||||
parser.add_argument("--port", help="run on the given port", type=int)
|
||||
parser.add_argument("--ssl", action="store_true", help="run in SSL mode")
|
||||
parser.add_argument("--controller", action="store_true", help="start as a GNS3 controller")
|
||||
parser.add_argument("--config", help="Configuration file")
|
||||
parser.add_argument("--certfile", help="SSL cert file")
|
||||
parser.add_argument("--certkey", help="SSL key file")
|
||||
@ -111,12 +113,13 @@ def parse_arguments(argv):
|
||||
config = Config.instance().get_section_config("Server")
|
||||
defaults = {
|
||||
"host": config.get("host", "0.0.0.0"),
|
||||
"port": config.get("port", 8000),
|
||||
"port": config.get("port", 3080),
|
||||
"ssl": config.getboolean("ssl", False),
|
||||
"certfile": config.get("certfile", ""),
|
||||
"certkey": config.get("certkey", ""),
|
||||
"record": config.get("record", ""),
|
||||
"local": config.getboolean("local", False),
|
||||
"controller": config.getboolean("controller", False),
|
||||
"allow": config.getboolean("allow_remote_console", False),
|
||||
"quiet": config.getboolean("quiet", False),
|
||||
"debug": config.getboolean("debug", False),
|
||||
@ -133,6 +136,7 @@ def set_config(args):
|
||||
config = Config.instance()
|
||||
server_config = config.get_section_config("Server")
|
||||
server_config["local"] = str(args.local)
|
||||
server_config["controller"] = str(args.controller)
|
||||
server_config["allow_remote_console"] = str(args.allow)
|
||||
server_config["host"] = args.host
|
||||
server_config["port"] = str(args.port)
|
||||
@ -177,6 +181,21 @@ def pid_lock(path):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def kill_ghosts():
|
||||
"""
|
||||
Kill process from previous GNS3 session
|
||||
"""
|
||||
detect_process = ["vpcs", "ubridge", "dynamips"]
|
||||
for proc in psutil.process_iter():
|
||||
try:
|
||||
name = proc.name().lower().split(".")[0]
|
||||
if name in detect_process:
|
||||
proc.kill()
|
||||
log.warning("Killed ghost process %s", name)
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
pass
|
||||
|
||||
|
||||
def run():
|
||||
args = parse_arguments(sys.argv[1:])
|
||||
|
||||
@ -186,6 +205,7 @@ def run():
|
||||
|
||||
if args.pid:
|
||||
pid_lock(args.pid)
|
||||
kill_ghosts()
|
||||
|
||||
level = logging.INFO
|
||||
if args.debug:
|
||||
@ -201,6 +221,9 @@ def run():
|
||||
|
||||
set_config(args)
|
||||
server_config = Config.instance().get_section_config("Server")
|
||||
if server_config.getboolean("controller"):
|
||||
log.info("Controller mode is enabled.")
|
||||
|
||||
if server_config.getboolean("local"):
|
||||
log.warning("Local mode is enabled. Beware, clients will have full control on your filesystem")
|
||||
|
||||
|
@ -23,58 +23,49 @@ DOCKER_CREATE_SCHEMA = {
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"description": "Docker VM instance identifier",
|
||||
"oneOf": [
|
||||
{"type": "string",
|
||||
"minLength": 36,
|
||||
"maxLength": 36,
|
||||
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"},
|
||||
{"type": "integer"} # for legacy projects
|
||||
]
|
||||
"type": "string",
|
||||
"minLength": 36,
|
||||
"maxLength": 36,
|
||||
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
|
||||
},
|
||||
"name": {
|
||||
"description": "Docker container name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"startcmd": {
|
||||
"description": "Docker CMD entry",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"imagename": {
|
||||
"description": "Docker image name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"adapters": {
|
||||
"description": "number of adapters",
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 64,
|
||||
},
|
||||
"adapter_type": {
|
||||
"description": "Docker adapter type",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"console": {
|
||||
"description": "console name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"description": "console TCP port",
|
||||
"minimum": 1,
|
||||
"maximum": 65535,
|
||||
"type": ["integer", "null"]
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
DOCKER_UPDATE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Request validation to update a Docker container",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "Docker container name",
|
||||
"console_type": {
|
||||
"description": "console type",
|
||||
"enum": ["telnet", "vnc", "http", "https"]
|
||||
},
|
||||
"console_resolution": {
|
||||
"description": "console resolution for VNC",
|
||||
"type": ["string", "null"],
|
||||
"pattern": "^[0-9]+x[0-9]+$"
|
||||
},
|
||||
"console_http_port": {
|
||||
"description": "Internal port in the container of the HTTP server",
|
||||
"type": "integer",
|
||||
},
|
||||
"console_http_path": {
|
||||
"description": "Path of the web interface",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"aux": {
|
||||
"description": "auxilary TCP port",
|
||||
"minimum": 1,
|
||||
"maximum": 65535,
|
||||
"type": ["integer", "null"]
|
||||
},
|
||||
"start_command": {
|
||||
"description": "Docker CMD entry",
|
||||
"type": ["string", "null"],
|
||||
"minLength": 0,
|
||||
},
|
||||
"image": {
|
||||
"description": "Docker image name",
|
||||
@ -83,32 +74,78 @@ DOCKER_UPDATE_SCHEMA = {
|
||||
},
|
||||
"adapters": {
|
||||
"description": "number of adapters",
|
||||
"type": "integer",
|
||||
"type": ["integer", "null"],
|
||||
"minimum": 0,
|
||||
"maximum": 64,
|
||||
},
|
||||
"adapter_type": {
|
||||
"description": "Docker adapter type",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"maximum": 99,
|
||||
},
|
||||
"environment": {
|
||||
"description": "Docker environment",
|
||||
"type": ["string", "null"],
|
||||
"minLength": 0,
|
||||
}
|
||||
|
||||
},
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
DOCKER_CAPTURE_SCHEMA = {
|
||||
|
||||
DOCKER_UPDATE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Request validation to start a packet capture on a Docker container port",
|
||||
"description": "Request validation to create a new Docker container",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"capture_file_name": {
|
||||
"description": "Capture file name",
|
||||
"name": {
|
||||
"description": "Docker container name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"console": {
|
||||
"description": "console TCP port",
|
||||
"minimum": 1,
|
||||
"maximum": 65535,
|
||||
"type": ["integer", "null"]
|
||||
},
|
||||
"console_resolution": {
|
||||
"description": "console resolution for VNC",
|
||||
"type": ["string", "null"],
|
||||
"pattern": "^[0-9]+x[0-9]+$"
|
||||
},
|
||||
"console_type": {
|
||||
"description": "console type",
|
||||
"enum": ["telnet", "vnc", "http", "https"]
|
||||
},
|
||||
"console_http_port": {
|
||||
"description": "Internal port in the container of the HTTP server",
|
||||
"type": "integer",
|
||||
},
|
||||
"console_http_path": {
|
||||
"description": "Path of the web interface",
|
||||
"type": "string",
|
||||
},
|
||||
"aux": {
|
||||
"description": "auxilary TCP port",
|
||||
"minimum": 1,
|
||||
"maximum": 65535,
|
||||
"type": ["integer", "null"]
|
||||
},
|
||||
"start_command": {
|
||||
"description": "Docker CMD entry",
|
||||
"type": ["string", "null"],
|
||||
"minLength": 0,
|
||||
},
|
||||
"environment": {
|
||||
"description": "Docker environment",
|
||||
"type": ["string", "null"],
|
||||
"minLength": 0,
|
||||
},
|
||||
"adapters": {
|
||||
"description": "number of adapters",
|
||||
"type": ["integer", "null"],
|
||||
"minimum": 0,
|
||||
"maximum": 99,
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["capture_file_name"]
|
||||
}
|
||||
|
||||
DOCKER_OBJECT_SCHEMA = {
|
||||
@ -128,12 +165,41 @@ DOCKER_OBJECT_SCHEMA = {
|
||||
"maxLength": 36,
|
||||
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
|
||||
},
|
||||
"cid": {
|
||||
"aux": {
|
||||
"description": "auxilary TCP port",
|
||||
"minimum": 1,
|
||||
"maximum": 65535,
|
||||
"type": "integer"
|
||||
},
|
||||
"console": {
|
||||
"description": "console TCP port",
|
||||
"minimum": 1,
|
||||
"maximum": 65535,
|
||||
"type": "integer"
|
||||
},
|
||||
"console_resolution": {
|
||||
"description": "console resolution for VNC",
|
||||
"type": "string",
|
||||
"pattern": "^[0-9]+x[0-9]+$"
|
||||
},
|
||||
"console_type": {
|
||||
"description": "console type",
|
||||
"enum": ["telnet", "vnc", "http", "https"]
|
||||
},
|
||||
"console_http_port": {
|
||||
"description": "Internal port in the container of the HTTP server",
|
||||
"type": "integer",
|
||||
},
|
||||
"console_http_path": {
|
||||
"description": "Path of the web interface",
|
||||
"type": "string",
|
||||
},
|
||||
"container_id": {
|
||||
"description": "Docker container ID",
|
||||
"type": "string",
|
||||
"minLength": 64,
|
||||
"minLength": 12,
|
||||
"maxLength": 64,
|
||||
"pattern": "^[a-zA-Z0-9_.-]{64}$"
|
||||
"pattern": "^[a-f0-9]+$"
|
||||
},
|
||||
"project_id": {
|
||||
"description": "Project UUID",
|
||||
@ -149,16 +215,44 @@ DOCKER_OBJECT_SCHEMA = {
|
||||
},
|
||||
"adapters": {
|
||||
"description": "number of adapters",
|
||||
"type": "integer",
|
||||
"type": ["integer", "null"],
|
||||
"minimum": 0,
|
||||
"maximum": 64,
|
||||
"maximum": 99,
|
||||
},
|
||||
"adapter_type": {
|
||||
"description": "Docker adapter type",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"start_command": {
|
||||
"description": "Docker CMD entry",
|
||||
"type": ["string", "null"],
|
||||
"minLength": 0,
|
||||
},
|
||||
"environment": {
|
||||
"description": "Docker environment",
|
||||
"type": ["string", "null"],
|
||||
"minLength": 0,
|
||||
},
|
||||
"vm_directory": {
|
||||
"decription": "Path to the VM working directory",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["vm_id", "project_id"]
|
||||
"required": ["vm_id", "project_id", "image", "container_id", "adapters", "aux", "console", "console_type", "console_resolution", "start_command", "environment", "vm_directory"]
|
||||
}
|
||||
|
||||
|
||||
DOCKER_LIST_IMAGES_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Docker list of images",
|
||||
"type": "array",
|
||||
"items": [
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"image": {
|
||||
"description": "Docker image name",
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -346,23 +346,3 @@ DEVICE_NIO_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
"required": ["nio"]
|
||||
}
|
||||
|
||||
DEVICE_CAPTURE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Request validation to start a packet capture on an Device instance port",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"capture_file_name": {
|
||||
"description": "Capture file name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"data_link_type": {
|
||||
"description": "PCAP data link type",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["capture_file_name", "data_link_type"]
|
||||
}
|
||||
|
@ -491,25 +491,6 @@ VM_UPDATE_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
VM_CAPTURE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Request validation to start a packet capture on a Dynamips VM instance port",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"capture_file_name": {
|
||||
"description": "Capture file name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"data_link_type": {
|
||||
"description": "PCAP data link type",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["capture_file_name", "data_link_type"]
|
||||
}
|
||||
|
||||
VM_OBJECT_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
|
@ -266,25 +266,6 @@ IOU_OBJECT_SCHEMA = {
|
||||
"command_line"]
|
||||
}
|
||||
|
||||
IOU_CAPTURE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Request validation to start a packet capture on a IOU instance",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"capture_file_name": {
|
||||
"description": "Capture file name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"data_link_type": {
|
||||
"description": "PCAP data link type",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["capture_file_name", "data_link_type"]
|
||||
}
|
||||
|
||||
IOU_CONFIGS_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
|
@ -147,20 +147,6 @@ VBOX_UPDATE_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
VBOX_CAPTURE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Request validation to start a packet capture on a VirtualBox VM instance port",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"capture_file_name": {
|
||||
"description": "Capture file name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["capture_file_name"]
|
||||
}
|
||||
|
||||
VBOX_OBJECT_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
|
@ -41,3 +41,24 @@ VM_LIST_IMAGES_SCHEMA = {
|
||||
],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
|
||||
VM_CAPTURE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Request validation to start a packet capture on a port",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"capture_file_name": {
|
||||
"description": "Capture file name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
"data_link_type": {
|
||||
"description": "PCAP data link type",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["capture_file_name"]
|
||||
}
|
||||
|
@ -140,20 +140,6 @@ VMWARE_UPDATE_SCHEMA = {
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
VMWARE_CAPTURE_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Request validation to start a packet capture on a VMware VM instance port",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"capture_file_name": {
|
||||
"description": "Capture file name",
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["capture_file_name"]
|
||||
}
|
||||
|
||||
VMWARE_OBJECT_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
|
@ -30,7 +30,6 @@ import time
|
||||
import atexit
|
||||
|
||||
from .web.route import Route
|
||||
from .web.request_handler import RequestHandler
|
||||
from .config import Config
|
||||
from .modules import MODULES
|
||||
from .modules.port_manager import PortManager
|
||||
@ -106,7 +105,7 @@ class Server:
|
||||
|
||||
def _signal_handling(self):
|
||||
|
||||
def signal_handler(signame):
|
||||
def signal_handler(signame, *args):
|
||||
log.warning("Server has got signal {}, exiting...".format(signame))
|
||||
asyncio.async(self.shutdown_server())
|
||||
|
||||
@ -199,8 +198,13 @@ class Server:
|
||||
Starts the server.
|
||||
"""
|
||||
|
||||
server_logger = logging.getLogger('aiohttp.server')
|
||||
# In debug mode we don't use the standard request log but a more complete in response.py
|
||||
if log.getEffectiveLevel() == logging.DEBUG:
|
||||
server_logger.setLevel(logging.CRITICAL)
|
||||
|
||||
logger = logging.getLogger("asyncio")
|
||||
logger.setLevel(logging.WARNING)
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
server_config = Config.instance().get_section_config("Server")
|
||||
if sys.platform.startswith("win"):
|
||||
@ -239,7 +243,7 @@ class Server:
|
||||
m.port_manager = self._port_manager
|
||||
|
||||
log.info("Starting server on {}:{}".format(self._host, self._port))
|
||||
self._handler = app.make_handler(handler=RequestHandler)
|
||||
self._handler = app.make_handler()
|
||||
server = self._run_application(self._handler, ssl_context)
|
||||
self._loop.run_until_complete(server)
|
||||
self._signal_handling()
|
||||
|
@ -19,13 +19,14 @@
|
||||
Represents a uBridge hypervisor and starts/stops the associated uBridge process.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import asyncio
|
||||
import socket
|
||||
import re
|
||||
|
||||
from pkg_resources import parse_version
|
||||
from gns3server.utils import parse_version
|
||||
from gns3server.utils.asyncio import wait_for_process_termination
|
||||
from gns3server.utils.asyncio import subprocess_check_output
|
||||
from .ubridge_hypervisor import UBridgeHypervisor
|
||||
@ -119,15 +120,15 @@ class Hypervisor(UBridgeHypervisor):
|
||||
@asyncio.coroutine
|
||||
def _check_ubridge_version(self):
|
||||
"""
|
||||
Checks if the ubridge executable version is >= 0.9.1
|
||||
Checks if the ubridge executable version is >= 0.9.4
|
||||
"""
|
||||
try:
|
||||
output = yield from subprocess_check_output(self._path, "-v", cwd=self._working_dir)
|
||||
match = re.search("ubridge version ([0-9a-z\.]+)", output)
|
||||
if match:
|
||||
version = match.group(1)
|
||||
if parse_version(version) < parse_version("0.9.1"):
|
||||
raise UbridgeError("uBridge executable version must be >= 0.9.1")
|
||||
if parse_version(version) < parse_version("0.9.4"):
|
||||
raise UbridgeError("uBridge executable version must be >= 0.9.4")
|
||||
else:
|
||||
raise UbridgeError("Could not determine uBridge version for {}".format(self._path))
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
@ -140,6 +141,12 @@ class Hypervisor(UBridgeHypervisor):
|
||||
"""
|
||||
|
||||
yield from self._check_ubridge_version()
|
||||
env = os.environ.copy()
|
||||
if sys.platform.startswith("win"):
|
||||
# add the Npcap directory to $PATH to force Dynamips to use npcap DLL instead of Winpcap (if installed)
|
||||
system_root = os.path.join(os.path.expandvars("%SystemRoot%"), "System32", "Npcap")
|
||||
if os.path.isdir(system_root):
|
||||
env["PATH"] = system_root + ';' + env["PATH"]
|
||||
try:
|
||||
command = self._build_command()
|
||||
log.info("starting ubridge: {}".format(command))
|
||||
@ -149,7 +156,8 @@ class Hypervisor(UBridgeHypervisor):
|
||||
self._process = yield from asyncio.create_subprocess_exec(*command,
|
||||
stdout=fd,
|
||||
stderr=subprocess.STDOUT,
|
||||
cwd=self._working_dir)
|
||||
cwd=self._working_dir,
|
||||
env=env)
|
||||
|
||||
log.info("ubridge started PID={}".format(self._process.pid))
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
@ -225,4 +233,6 @@ class Hypervisor(UBridgeHypervisor):
|
||||
|
||||
command = [self._path]
|
||||
command.extend(["-H", "{}:{}".format(self._host, self._port)])
|
||||
if log.getEffectiveLevel() == logging.DEBUG:
|
||||
command.extend(["-d", "2"])
|
||||
return command
|
||||
|
@ -24,3 +24,10 @@ class UbridgeError(Exception):
|
||||
|
||||
def __init__(self, message):
|
||||
Exception.__init__(self, message)
|
||||
|
||||
|
||||
class UbridgeNamespaceError(Exception):
|
||||
"""
|
||||
Raised if ubridge can not move a container to a namespace
|
||||
"""
|
||||
pass
|
||||
|
@ -16,6 +16,8 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import re
|
||||
import textwrap
|
||||
import posixpath
|
||||
|
||||
|
||||
@ -26,3 +28,62 @@ def force_unix_path(path):
|
||||
|
||||
path = path.replace("\\", "/")
|
||||
return posixpath.normpath(path)
|
||||
|
||||
|
||||
def macaddress_to_int(mac_address):
|
||||
"""
|
||||
Convert a macaddress with the format 00:0c:29:11:b0:0a to a int
|
||||
|
||||
:param mac_address: The mac address
|
||||
|
||||
:returns: Integer
|
||||
"""
|
||||
return int(mac_address.replace(":", ""), 16)
|
||||
|
||||
|
||||
def int_to_macaddress(integer):
|
||||
"""
|
||||
Convert an integer to a mac address
|
||||
"""
|
||||
return ":".join(textwrap.wrap("%012x" % (integer), width=2))
|
||||
|
||||
|
||||
def parse_version(version):
|
||||
"""
|
||||
Return a comparable tuple from a version string. We try to force tuple to semver with version like 1.2.0
|
||||
|
||||
Replace pkg_resources.parse_version which now display a warning when use for comparing version with tuple
|
||||
|
||||
:returns: Version string as comparable tuple
|
||||
"""
|
||||
|
||||
release_type_found = False
|
||||
version_infos = re.split('(\.|[a-z]+)', version)
|
||||
version = []
|
||||
for info in version_infos:
|
||||
if info == '.' or len(info) == 0:
|
||||
continue
|
||||
try:
|
||||
info = int(info)
|
||||
# We pad with zero to compare only on string
|
||||
# This avoid issue when comparing version with different length
|
||||
version.append("%06d" % (info,))
|
||||
except ValueError:
|
||||
# Force to a version with three number
|
||||
if len(version) == 1:
|
||||
version.append("00000")
|
||||
if len(version) == 2:
|
||||
version.append("000000")
|
||||
# We want rc to be at lower level than dev version
|
||||
if info == 'rc':
|
||||
info = 'c'
|
||||
version.append(info)
|
||||
release_type_found = True
|
||||
if release_type_found is False:
|
||||
# Force to a version with three number
|
||||
if len(version) == 1:
|
||||
version.append("00000")
|
||||
if len(version) == 2:
|
||||
version.append("000000")
|
||||
version.append("final")
|
||||
return tuple(version)
|
||||
|
@ -16,24 +16,26 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
import functools
|
||||
import asyncio
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def wait_run_in_executor(func, *args):
|
||||
def wait_run_in_executor(func, *args, **kwargs):
|
||||
"""
|
||||
Run blocking code in a different thread and wait
|
||||
for the result.
|
||||
|
||||
:param func: Run this function in a different thread
|
||||
:param args: Parameters of the function
|
||||
:param kwargs: Keyword parameters of the function
|
||||
:returns: Return the result of the function
|
||||
"""
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
future = loop.run_in_executor(None, func, *args)
|
||||
future = loop.run_in_executor(None, functools.partial(func, *args, **kwargs))
|
||||
yield from asyncio.wait([future])
|
||||
return future.result()
|
||||
|
||||
@ -54,7 +56,7 @@ def subprocess_check_output(*args, cwd=None, env=None):
|
||||
if output is None:
|
||||
return ""
|
||||
# If we received garbage we ignore invalid characters
|
||||
# it should happend only when user try to use another binary
|
||||
# it should happend only when user try to use another binary
|
||||
# and the code of VPCS, dynamips... Will detect it's not the correct binary
|
||||
return output.decode("utf-8", errors="ignore")
|
||||
|
||||
@ -125,3 +127,24 @@ def wait_for_named_pipe_creation(pipe_path, timeout=60):
|
||||
else:
|
||||
return
|
||||
raise asyncio.TimeoutError()
|
||||
|
||||
|
||||
def locked_coroutine(f):
|
||||
"""
|
||||
Method decorator that replace asyncio.coroutine that warranty
|
||||
that this specific method of this class instance will not we
|
||||
executed twice at the same time
|
||||
"""
|
||||
@asyncio.coroutine
|
||||
def new_function(*args, **kwargs):
|
||||
|
||||
# In the instance of the class we will store
|
||||
# a lock has an attribute.
|
||||
lock_var_name = "__" + f.__name__ + "_lock"
|
||||
if not hasattr(args[0], lock_var_name):
|
||||
setattr(args[0], lock_var_name, asyncio.Lock())
|
||||
|
||||
with (yield from getattr(args[0], lock_var_name)):
|
||||
return (yield from f(*args, **kwargs))
|
||||
|
||||
return new_function
|
116
gns3server/utils/asyncio/raw_command_server.py
Normal file
116
gns3server/utils/asyncio/raw_command_server.py
Normal file
@ -0,0 +1,116 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2014 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import re
|
||||
import asyncio
|
||||
import asyncio.subprocess
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
READ_SIZE = 4096
|
||||
|
||||
|
||||
class AsyncioRawCommandServer:
|
||||
"""
|
||||
Expose a process on the network his stdoud and stdin will be forward
|
||||
on network
|
||||
"""
|
||||
|
||||
def __init__(self, command, replaces=[]):
|
||||
"""
|
||||
:param command: Command to run
|
||||
:param replaces: List of tuple to replace in the output ex: [(b":8080", b":6000")]
|
||||
"""
|
||||
self._command = command
|
||||
self._replaces = replaces
|
||||
# We limit number of process
|
||||
self._lock = asyncio.Semaphore(value=4)
|
||||
|
||||
@asyncio.coroutine
|
||||
def run(self, network_reader, network_writer):
|
||||
yield from self._lock.acquire()
|
||||
process = yield from asyncio.subprocess.create_subprocess_exec(*self._command,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.STDOUT,
|
||||
stdin=asyncio.subprocess.PIPE)
|
||||
try:
|
||||
yield from self._process(network_reader, network_writer, process.stdout, process.stdin)
|
||||
except ConnectionResetError:
|
||||
network_writer.close()
|
||||
if process.returncode is None:
|
||||
process.kill()
|
||||
yield from process.wait()
|
||||
self._lock.release()
|
||||
|
||||
@asyncio.coroutine
|
||||
def _process(self, network_reader, network_writer, process_reader, process_writer):
|
||||
network_read = asyncio.async(network_reader.read(READ_SIZE))
|
||||
reader_read = asyncio.async(process_reader.read(READ_SIZE))
|
||||
timeout = 30
|
||||
|
||||
while True:
|
||||
done, pending = yield from asyncio.wait(
|
||||
[
|
||||
network_read,
|
||||
reader_read
|
||||
],
|
||||
timeout=timeout,
|
||||
return_when=asyncio.FIRST_COMPLETED)
|
||||
if len(done) == 0:
|
||||
raise ConnectionResetError()
|
||||
for coro in done:
|
||||
data = coro.result()
|
||||
if coro == network_read:
|
||||
if network_reader.at_eof():
|
||||
raise ConnectionResetError()
|
||||
|
||||
network_read = asyncio.async(network_reader.read(READ_SIZE))
|
||||
|
||||
process_writer.write(data)
|
||||
yield from process_writer.drain()
|
||||
elif coro == reader_read:
|
||||
if process_reader.at_eof():
|
||||
raise ConnectionResetError()
|
||||
|
||||
reader_read = asyncio.async(process_reader.read(READ_SIZE))
|
||||
|
||||
for replace in self._replaces:
|
||||
data = data.replace(replace[0], replace[1])
|
||||
timeout = 2 # We reduce the timeout when the process start to return stuff to avoid problem with server not closing the connection
|
||||
|
||||
network_writer.write(data)
|
||||
yield from network_writer.drain()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
command = ["nc", "localhost", "80"]
|
||||
server = AsyncioRawCommandServer(command)
|
||||
coro = asyncio.start_server(server.run, '127.0.0.1', 4444, loop=loop)
|
||||
s = loop.run_until_complete(coro)
|
||||
|
||||
try:
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
# Close the server
|
||||
s.close()
|
||||
loop.run_until_complete(s.wait_closed())
|
||||
loop.close()
|
285
gns3server/utils/asyncio/telnet_server.py
Normal file
285
gns3server/utils/asyncio/telnet_server.py
Normal file
@ -0,0 +1,285 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2014 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import re
|
||||
import asyncio
|
||||
import asyncio.subprocess
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Mostly from https://code.google.com/p/miniboa/source/browse/trunk/miniboa/telnet.py
|
||||
|
||||
# Telnet Commands
|
||||
SE = 240 # End of sub-negotiation parameters
|
||||
NOP = 241 # No operation
|
||||
DATMK = 242 # Data stream portion of a sync.
|
||||
BREAK = 243 # NVT Character BRK
|
||||
IP = 244 # Interrupt Process
|
||||
AO = 245 # Abort Output
|
||||
AYT = 246 # Are you there
|
||||
EC = 247 # Erase Character
|
||||
EL = 248 # Erase Line
|
||||
GA = 249 # The Go Ahead Signal
|
||||
SB = 250 # Sub-option to follow
|
||||
WILL = 251 # Will; request or confirm option begin
|
||||
WONT = 252 # Wont; deny option request
|
||||
DO = 253 # Do = Request or confirm remote option
|
||||
DONT = 254 # Don't = Demand or confirm option halt
|
||||
IAC = 255 # Interpret as Command
|
||||
SEND = 1 # Sub-process negotiation SEND command
|
||||
IS = 0 # Sub-process negotiation IS command
|
||||
|
||||
# Telnet Options
|
||||
BINARY = 0 # Transmit Binary
|
||||
ECHO = 1 # Echo characters back to sender
|
||||
RECON = 2 # Reconnection
|
||||
SGA = 3 # Suppress Go-Ahead
|
||||
TMARK = 6 # Timing Mark
|
||||
TTYPE = 24 # Terminal Type
|
||||
NAWS = 31 # Negotiate About Window Size
|
||||
LINEMO = 34 # Line Mode
|
||||
|
||||
READ_SIZE = 1024
|
||||
|
||||
|
||||
class AsyncioTelnetServer:
|
||||
|
||||
def __init__(self, reader=None, writer=None, binary=True, echo=False):
|
||||
self._reader = reader
|
||||
self._writer = writer
|
||||
self._clients = set()
|
||||
self._lock = asyncio.Lock()
|
||||
self._reader_process = None
|
||||
self._current_read = None
|
||||
|
||||
self._binary = binary
|
||||
# If echo is true when the client send data
|
||||
# the data is echo on his terminal by telnet otherwise
|
||||
# it's our job (or the wrapped app) to send back the data
|
||||
self._echo = echo
|
||||
|
||||
@asyncio.coroutine
|
||||
def run(self, network_reader, network_writer):
|
||||
# Keep track of connected clients
|
||||
self._clients.add(network_writer)
|
||||
|
||||
try:
|
||||
# Send initial telnet session opening
|
||||
if self._echo:
|
||||
network_writer.write(bytes([IAC, WILL, ECHO]))
|
||||
else:
|
||||
network_writer.write(bytes([
|
||||
IAC, WONT, ECHO,
|
||||
IAC, DONT, ECHO]))
|
||||
|
||||
if self._binary:
|
||||
network_writer.write(bytes([
|
||||
IAC, WILL, SGA,
|
||||
IAC, WILL, BINARY,
|
||||
IAC, DO, BINARY]))
|
||||
else:
|
||||
network_writer.write(bytes([
|
||||
IAC, WONT, SGA,
|
||||
IAC, DONT, SGA,
|
||||
IAC, WONT, BINARY,
|
||||
IAC, DONT, BINARY]))
|
||||
yield from network_writer.drain()
|
||||
|
||||
yield from self._process(network_reader, network_writer)
|
||||
except ConnectionResetError:
|
||||
with (yield from self._lock):
|
||||
|
||||
network_writer.close()
|
||||
|
||||
if self._reader_process == network_reader:
|
||||
self._reader_process = None
|
||||
# Cancel current read from this reader
|
||||
self._current_read.cancel()
|
||||
self._clients.remove(network_writer)
|
||||
|
||||
@asyncio.coroutine
|
||||
def _get_reader(self, network_reader):
|
||||
"""
|
||||
Get a reader or None if another reader is already reading.
|
||||
"""
|
||||
with (yield from self._lock):
|
||||
if self._reader_process is None:
|
||||
self._reader_process = network_reader
|
||||
if self._reader_process == network_reader:
|
||||
self._current_read = asyncio.async(self._reader.read(READ_SIZE))
|
||||
return self._current_read
|
||||
return None
|
||||
|
||||
@asyncio.coroutine
|
||||
def _process(self, network_reader, network_writer):
|
||||
network_read = asyncio.async(network_reader.read(READ_SIZE))
|
||||
reader_read = yield from self._get_reader(network_reader)
|
||||
|
||||
while True:
|
||||
if reader_read is None:
|
||||
reader_read = yield from self._get_reader(network_reader)
|
||||
if reader_read is None:
|
||||
done, pending = yield from asyncio.wait(
|
||||
[
|
||||
network_read,
|
||||
],
|
||||
timeout=1,
|
||||
return_when=asyncio.FIRST_COMPLETED)
|
||||
else:
|
||||
done, pending = yield from asyncio.wait(
|
||||
[
|
||||
network_read,
|
||||
reader_read
|
||||
],
|
||||
return_when=asyncio.FIRST_COMPLETED)
|
||||
for coro in done:
|
||||
data = coro.result()
|
||||
|
||||
if coro == network_read:
|
||||
if network_reader.at_eof():
|
||||
raise ConnectionResetError()
|
||||
|
||||
network_read = asyncio.async(network_reader.read(READ_SIZE))
|
||||
|
||||
if IAC in data:
|
||||
data = yield from self._IAC_parser(data, network_reader, network_writer)
|
||||
if len(data) == 0:
|
||||
continue
|
||||
|
||||
if not self._binary:
|
||||
data = data.replace(b"\r\n", b"\n")
|
||||
|
||||
if self._writer:
|
||||
self._writer.write(data)
|
||||
yield from self._writer.drain()
|
||||
elif coro == reader_read:
|
||||
if self._reader.at_eof():
|
||||
raise ConnectionResetError()
|
||||
|
||||
reader_read = yield from self._get_reader(network_reader)
|
||||
|
||||
# Replicate the output on all clients
|
||||
for writer in self._clients:
|
||||
writer.write(data)
|
||||
yield from writer.drain()
|
||||
|
||||
def _IAC_parser(self, buf, network_reader, network_writer):
|
||||
"""
|
||||
Processes and removes any Telnet commands from the buffer.
|
||||
|
||||
:param buf: buffer
|
||||
:returns: buffer minus Telnet commands
|
||||
"""
|
||||
|
||||
skip_to = 0
|
||||
while True:
|
||||
# Locate an IAC to process
|
||||
iac_loc = buf.find(IAC, skip_to)
|
||||
if iac_loc < 0:
|
||||
break
|
||||
|
||||
# Get the TELNET command
|
||||
iac_cmd = bytearray([IAC])
|
||||
try:
|
||||
iac_cmd.append(buf[iac_loc + 1])
|
||||
except IndexError:
|
||||
d = yield from network_reader.read(1)
|
||||
buf.extend(d)
|
||||
iac_cmd.append(buf[iac_loc + 1])
|
||||
|
||||
# Is this just a 2-byte TELNET command?
|
||||
if iac_cmd[1] not in [WILL, WONT, DO, DONT]:
|
||||
if iac_cmd[1] == AYT:
|
||||
log.debug("Telnet server received Are-You-There (AYT)")
|
||||
network_writer.write(b'\r\nYour Are-You-There received. I am here.\r\n')
|
||||
elif iac_cmd[1] == IAC:
|
||||
# It's data, not an IAC
|
||||
iac_cmd.pop()
|
||||
# This prevents the 0xff from being
|
||||
# interrupted as yet another IAC
|
||||
skip_to = iac_loc + 1
|
||||
log.debug("Received IAC IAC")
|
||||
elif iac_cmd[1] == NOP:
|
||||
pass
|
||||
else:
|
||||
log.debug("Unhandled telnet command: "
|
||||
"{0:#x} {1:#x}".format(*iac_cmd))
|
||||
|
||||
# This must be a 3-byte TELNET command
|
||||
else:
|
||||
try:
|
||||
iac_cmd.append(buf[iac_loc + 2])
|
||||
except IndexError:
|
||||
d = yield from network_reader.read(1)
|
||||
buf.extend(d)
|
||||
iac_cmd.append(buf[iac_loc + 2])
|
||||
# We do ECHO, SGA, and BINARY. Period.
|
||||
if iac_cmd[1] == DO:
|
||||
if iac_cmd[2] not in [ECHO, SGA, BINARY]:
|
||||
network_writer.write(bytes([IAC, WONT, iac_cmd[2]]))
|
||||
log.debug("Telnet WON'T {:#x}".format(iac_cmd[2]))
|
||||
else:
|
||||
if iac_cmd[2] == SGA:
|
||||
if self._binary:
|
||||
network_writer.write(bytes([IAC, WILL, iac_cmd[2]]))
|
||||
else:
|
||||
network_writer.write(bytes([IAC, WONT, iac_cmd[2]]))
|
||||
log.debug("Telnet WON'T {:#x}".format(iac_cmd[2]))
|
||||
|
||||
elif iac_cmd[1] == DONT:
|
||||
log.debug("Unhandled DONT telnet command: "
|
||||
"{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
|
||||
elif iac_cmd[1] == WILL:
|
||||
log.debug("Unhandled WILL telnet command: "
|
||||
"{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
|
||||
elif iac_cmd[1] == WONT:
|
||||
log.debug("Unhandled WONT telnet command: "
|
||||
"{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
|
||||
else:
|
||||
log.debug("Unhandled telnet command: "
|
||||
"{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
|
||||
|
||||
# Remove the entire TELNET command from the buffer
|
||||
buf = buf.replace(iac_cmd, b'', 1)
|
||||
|
||||
yield from network_writer.drain()
|
||||
|
||||
# Return the new copy of the buffer, minus telnet commands
|
||||
return buf
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
process = loop.run_until_complete(asyncio.async(asyncio.subprocess.create_subprocess_exec("/bin/sh", "-i",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.STDOUT,
|
||||
stdin=asyncio.subprocess.PIPE)))
|
||||
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=False, echo=False)
|
||||
|
||||
coro = asyncio.start_server(server.run, '127.0.0.1', 4444, loop=loop)
|
||||
s = loop.run_until_complete(coro)
|
||||
|
||||
try:
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
# Close the server
|
||||
s.close()
|
||||
loop.run_until_complete(s.wait_closed())
|
||||
loop.close()
|
@ -36,7 +36,8 @@ def md5sum(path):
|
||||
try:
|
||||
with open(path + '.md5sum') as f:
|
||||
return f.read()
|
||||
except OSError:
|
||||
# Unicode error is when user rename an image to .md5sum ....
|
||||
except (OSError, UnicodeDecodeError):
|
||||
pass
|
||||
|
||||
try:
|
||||
|
@ -23,7 +23,7 @@ import struct
|
||||
import psutil
|
||||
|
||||
if psutil.version_info < (3, 0, 0):
|
||||
raise Exception("psutil version should >= 3.0.0. If you are under ubuntu/debian install gns3 via apt instead of pip")
|
||||
raise Exception("psutil version should >= 3.0.0. If you are under Ubuntu/Debian install gns3 via apt instead of pip")
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
@ -59,6 +59,7 @@ def _get_windows_interfaces_from_registry():
|
||||
interfaces.append({"id": npf_interface,
|
||||
"name": name,
|
||||
"ip_address": ip_address,
|
||||
"mac_address": "", # TODO: find MAC address in registry
|
||||
"netcard": netcard})
|
||||
winreg.CloseKey(hkeyinterface)
|
||||
winreg.CloseKey(hkeycon)
|
||||
@ -99,6 +100,7 @@ def get_windows_interfaces():
|
||||
interfaces.append({"id": npf_interface,
|
||||
"name": adapter.NetConnectionID,
|
||||
"ip_address": ip_address,
|
||||
"mac_address": adapter.MACAddress,
|
||||
"netcard": adapter.name})
|
||||
except (AttributeError, pywintypes.com_error):
|
||||
log.warn("Could not use the COM service to retrieve interface info, trying using the registry...")
|
||||
@ -137,6 +139,23 @@ def is_interface_up(interface):
|
||||
return True
|
||||
|
||||
|
||||
def _check_windows_service(service_name):
|
||||
|
||||
import pywintypes
|
||||
import win32service
|
||||
import win32serviceutil
|
||||
|
||||
try:
|
||||
if win32serviceutil.QueryServiceStatus(service_name, None)[1] != win32service.SERVICE_RUNNING:
|
||||
return False
|
||||
except pywintypes.error as e:
|
||||
if e.winerror == 1060:
|
||||
return False
|
||||
else:
|
||||
raise aiohttp.web.HTTPInternalServerError(text="Could not check if the {} service is running: {}".format(service_name, e.strerror))
|
||||
return True
|
||||
|
||||
|
||||
def interfaces():
|
||||
"""
|
||||
Gets the network interfaces on this server.
|
||||
@ -148,21 +167,32 @@ def interfaces():
|
||||
if not sys.platform.startswith("win"):
|
||||
for interface in sorted(psutil.net_if_addrs().keys()):
|
||||
ip_address = ""
|
||||
mac_address = ""
|
||||
for addr in psutil.net_if_addrs()[interface]:
|
||||
# get the first available IPv4 address only
|
||||
if addr.family == socket.AF_INET:
|
||||
ip_address = addr.address
|
||||
break
|
||||
if addr.family == psutil.AF_LINK:
|
||||
mac_address = addr.address
|
||||
results.append({"id": interface,
|
||||
"name": interface,
|
||||
"ip_address": ip_address})
|
||||
"ip_address": ip_address,
|
||||
"mac_address": mac_address})
|
||||
else:
|
||||
try:
|
||||
results = get_windows_interfaces()
|
||||
service_installed = True
|
||||
if not _check_windows_service("npf") and not _check_windows_service("npcap"):
|
||||
service_installed = False
|
||||
else:
|
||||
results = get_windows_interfaces()
|
||||
except ImportError:
|
||||
message = "pywin32 module is not installed, please install it on the server to get the available interface names"
|
||||
raise aiohttp.web.HTTPInternalServerError(text=message)
|
||||
except Exception as e:
|
||||
log.error("uncaught exception {type}".format(type=type(e)), exc_info=1)
|
||||
raise aiohttp.web.HTTPInternalServerError(text="uncaught exception: {}".format(e))
|
||||
|
||||
if service_installed is False:
|
||||
raise aiohttp.web.HTTPInternalServerError(text="The Winpcap or Npcap is not installed or running")
|
||||
|
||||
return results
|
||||
|
@ -166,7 +166,10 @@ def vmnet_windows(args, vmnet_range_start, vmnet_range_end):
|
||||
continue
|
||||
print("Adding vmnet{}...".format(vmnet_number))
|
||||
os.system('"{}" -- add adapter vmnet{}'.format(vnetlib_path, vmnet_number))
|
||||
|
||||
os.system("net stop npf")
|
||||
os.system("net start npf")
|
||||
os.system("net stop npcap")
|
||||
os.system("net start npcap")
|
||||
|
||||
def vmnet_unix(args, vmnet_range_start, vmnet_range_end):
|
||||
"""
|
@ -23,5 +23,5 @@
|
||||
# or negative for a release candidate or beta (after the base version
|
||||
# number has been incremented)
|
||||
|
||||
__version__ = "1.4.2"
|
||||
__version_info__ = (1, 4, 2, 0)
|
||||
__version__ = "1.5.3"
|
||||
__version_info__ = (1, 5, 3, 0)
|
||||
|
@ -74,7 +74,7 @@ class ColouredStreamHandler(logging.StreamHandler):
|
||||
stream.write(msg)
|
||||
stream.write(self.terminator)
|
||||
self.flush()
|
||||
# On OSX when frozen flush raise a BrokenPipeError
|
||||
# On OSX when frozen flush raise a BrokenPipeError
|
||||
except BrokenPipeError:
|
||||
pass
|
||||
except Exception:
|
||||
|
@ -18,6 +18,7 @@
|
||||
import json
|
||||
import jsonschema
|
||||
import aiohttp.web
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
import jinja2
|
||||
@ -41,7 +42,8 @@ class Response(aiohttp.web.Response):
|
||||
headers['Server'] = "Python/{0[0]}.{0[1]} GNS3/{1}".format(sys.version_info, __version__)
|
||||
super().__init__(headers=headers, **kwargs)
|
||||
|
||||
def start(self, request):
|
||||
@asyncio.coroutine
|
||||
def prepare(self, request):
|
||||
if log.getEffectiveLevel() == logging.DEBUG:
|
||||
log.info("%s %s", request.method, request.path_qs)
|
||||
log.debug("%s", dict(request.headers))
|
||||
@ -51,7 +53,7 @@ class Response(aiohttp.web.Response):
|
||||
log.debug(dict(self.headers))
|
||||
if hasattr(self, 'body') and self.body is not None and self.headers["CONTENT-TYPE"] == "application/json":
|
||||
log.debug(json.loads(self.body.decode('utf-8')))
|
||||
return super().start(request)
|
||||
return (yield from super().prepare(request))
|
||||
|
||||
def html(self, answer):
|
||||
"""
|
||||
|
@ -17,11 +17,13 @@
|
||||
|
||||
import sys
|
||||
import json
|
||||
import jsonschema
|
||||
import urllib
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import logging
|
||||
import traceback
|
||||
import jsonschema
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -33,10 +35,11 @@ from ..config import Config
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def parse_request(request, input_schema):
|
||||
def parse_request(request, input_schema, raw):
|
||||
"""Parse body of request and raise HTTP errors in case of problems"""
|
||||
|
||||
content_length = request.content_length
|
||||
if content_length is not None and content_length > 0:
|
||||
if content_length is not None and content_length > 0 and not raw:
|
||||
body = yield from request.read()
|
||||
try:
|
||||
request.json = json.loads(body.decode('utf-8'))
|
||||
@ -45,13 +48,21 @@ def parse_request(request, input_schema):
|
||||
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON {}".format(e))
|
||||
else:
|
||||
request.json = {}
|
||||
try:
|
||||
jsonschema.validate(request.json, input_schema)
|
||||
except jsonschema.ValidationError as e:
|
||||
log.error("Invalid input query. JSON schema error: {}".format(e.message))
|
||||
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON: {} in schema: {}".format(
|
||||
e.message,
|
||||
json.dumps(e.schema)))
|
||||
|
||||
# Parse the query string
|
||||
if len(request.query_string) > 0:
|
||||
for (k, v) in urllib.parse.parse_qs(request.query_string).items():
|
||||
request.json[k] = v[0]
|
||||
|
||||
if input_schema:
|
||||
try:
|
||||
jsonschema.validate(request.json, input_schema)
|
||||
except jsonschema.ValidationError as e:
|
||||
log.error("Invalid input query. JSON schema error: {}".format(e.message))
|
||||
raise aiohttp.web.HTTPBadRequest(text="Invalid JSON: {} in schema: {}".format(
|
||||
e.message,
|
||||
json.dumps(e.schema)))
|
||||
|
||||
return request
|
||||
|
||||
|
||||
@ -161,12 +172,13 @@ class Route(object):
|
||||
if api_version is None or raw is True:
|
||||
response = Response(request=request, route=route, output_schema=output_schema)
|
||||
|
||||
request = yield from parse_request(request, None, raw)
|
||||
yield from func(request, response)
|
||||
return response
|
||||
|
||||
# API call
|
||||
try:
|
||||
request = yield from parse_request(request, input_schema)
|
||||
request = yield from parse_request(request, input_schema, raw)
|
||||
record_file = server_config.get("record")
|
||||
if record_file:
|
||||
try:
|
||||
@ -180,7 +192,7 @@ class Route(object):
|
||||
except aiohttp.web.HTTPBadRequest as e:
|
||||
response = Response(request=request, route=route)
|
||||
response.set_status(e.status)
|
||||
response.json({"message": e.text, "status": e.status, "path": route, "request": request.json})
|
||||
response.json({"message": e.text, "status": e.status, "path": route, "request": request.json, "method": request.method})
|
||||
except aiohttp.web.HTTPException as e:
|
||||
response = Response(request=request, route=route)
|
||||
response.set_status(e.status)
|
||||
|
@ -1,14 +1,19 @@
|
||||
[Unit]
|
||||
Description=GNS3 server
|
||||
Wants=network-online.target
|
||||
After=network.target network-online.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
Environment=statedir=/var/cache/gns3
|
||||
PIDFile=/var/run/gns3.pid
|
||||
ExecStart=/usr/local/bin/gns3server --log /var/log/gns3.log \
|
||||
--pid /var/run/gns3.pid --daemon
|
||||
Restart=on-abort
|
||||
User=gns3
|
||||
Group=gns3
|
||||
PermissionsStartOnly=true
|
||||
ExecStartPre=/bin/mkdir -p /var/log/gns3 /var/run/gns3
|
||||
ExecStartPre=/bin/chown -R gns3:gns3 /var/log/gns3 /var/run/gns3
|
||||
ExecStart=/usr/local/bin/gns3server --log /var/log/gns3/gns3.log \
|
||||
--pid /var/run/gns3/gns3.pid --daemon
|
||||
Restart=on-abort
|
||||
PIDFile=/var/run/gns3/gns3.pid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
@ -1,5 +1,8 @@
|
||||
jsonschema>=2.4.0
|
||||
aiohttp==0.17.4
|
||||
aiohttp>=1.2.0
|
||||
aiohttp_cors>=0.4.0
|
||||
yarl>=0.7.0
|
||||
Jinja2>=2.7.3
|
||||
raven>=5.2.0
|
||||
psutil>=3.0.0
|
||||
zipstream>=1.1.3
|
||||
|
355
scripts/remote-install.sh
Normal file
355
scripts/remote-install.sh
Normal file
@ -0,0 +1,355 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#
|
||||
# Install GNS3 on a remote Ubuntu 14.04 LTS server
|
||||
# This create a dedicated user and setup all the package
|
||||
# and optionnaly a VPN
|
||||
#
|
||||
|
||||
function help {
|
||||
echo "Usage:" >&2
|
||||
echo "--with-openvpn: Install Open VPN" >&2
|
||||
echo "--with-iou: Install IOU" >&2
|
||||
echo "--with-i386-repository: Add i386 repositories require by IOU if they are not available on the system. Warning this will replace your source.list in order to use official ubuntu mirror" >&2
|
||||
echo "--unstable: Use the GNS3 unstable repository"
|
||||
echo "--help: This help" >&2
|
||||
}
|
||||
|
||||
function log {
|
||||
tput setaf 2
|
||||
echo "=> $1" >&2
|
||||
tput sgr0
|
||||
}
|
||||
|
||||
lsb_release -d | grep "Ubuntu 14.04" > /dev/null
|
||||
if [ $? != 0 ]
|
||||
then
|
||||
echo "You can use this script on Ubuntu 14.04 LTS only"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Read the options
|
||||
USE_VPN=0
|
||||
USE_IOU=0
|
||||
I386_REPO=0
|
||||
UNSTABLE=0
|
||||
|
||||
TEMP=`getopt -o h --long with-openvpn,with-iou,with-i386-repository,unstable,help -n 'gns3-remote-install.sh' -- "$@"`
|
||||
if [ $? != 0 ]
|
||||
then
|
||||
help
|
||||
exit 1
|
||||
fi
|
||||
eval set -- "$TEMP"
|
||||
|
||||
# extract options and their arguments into variables.
|
||||
while true ; do
|
||||
case "$1" in
|
||||
--with-openvpn)
|
||||
USE_VPN=1
|
||||
shift
|
||||
;;
|
||||
--with-iou)
|
||||
USE_IOU=1
|
||||
shift
|
||||
;;
|
||||
--with-i386-repository)
|
||||
I386_REPO=1
|
||||
shift
|
||||
;;
|
||||
--unstable)
|
||||
UNSTABLE=1
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
help
|
||||
exit 1
|
||||
;;
|
||||
--) shift ; break ;;
|
||||
*) echo "Internal error! $1" ; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Exit in case of error
|
||||
set -e
|
||||
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
|
||||
log "Add GNS3 repository"
|
||||
|
||||
if [ $UNSTABLE == 1 ]
|
||||
then
|
||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||
deb http://ppa.launchpad.net/gns3/unstable/ubuntu trusty main
|
||||
deb-src http://ppa.launchpad.net/gns3/unstable/ubuntu trusty main
|
||||
deb http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
|
||||
deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
|
||||
EOFLIST
|
||||
else
|
||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||
deb http://ppa.launchpad.net/gns3/ppa/ubuntu trusty main
|
||||
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu trusty main
|
||||
deb http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
|
||||
deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
|
||||
EOFLIST
|
||||
fi
|
||||
|
||||
if [ $I386_REPO == 1 ]
|
||||
then
|
||||
cat <<EOFLIST2 >> /etc/apt/sources.list
|
||||
###### Ubuntu Main Repos
|
||||
deb http://archive.ubuntu.com/ubuntu/ trusty main universe multiverse
|
||||
deb-src http://archive.ubuntu.com/ubuntu/ trusty main universe multiverse
|
||||
|
||||
###### Ubuntu Update Repos
|
||||
deb http://archive.ubuntu.com/ubuntu/ trusty-security main universe multiverse
|
||||
deb http://archive.ubuntu.com/ubuntu/ trusty-updates main universe multiverse
|
||||
deb-src http://archive.ubuntu.com/ubuntu/ trusty-security main universe multiverse
|
||||
deb-src http://archive.ubuntu.com/ubuntu/ trusty-updates main universe multiverse
|
||||
EOFLIST2
|
||||
fi
|
||||
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A2E3EF7B
|
||||
|
||||
log "Update system packages"
|
||||
apt-get update
|
||||
|
||||
log "Upgrade packages"
|
||||
apt-get upgrade -y
|
||||
|
||||
log " Install GNS3 packages"
|
||||
apt-get install -y gns3-server
|
||||
|
||||
log "Create user GNS3 with /opt/gns3 as home directory"
|
||||
if [ ! -d "/opt/gns3/" ]
|
||||
then
|
||||
useradd -d /opt/gns3/ -m gns3
|
||||
fi
|
||||
|
||||
log "Install docker"
|
||||
if [ ! -f "/usr/bin/docker" ]
|
||||
then
|
||||
curl -sSL https://get.docker.com | bash
|
||||
fi
|
||||
|
||||
log "Add GNS3 to the docker group"
|
||||
usermod -aG docker gns3
|
||||
|
||||
if [ $USE_IOU == 1 ]
|
||||
then
|
||||
log "IOU setup"
|
||||
dpkg --add-architecture i386
|
||||
apt-get update
|
||||
|
||||
apt-get install -y gns3-iou
|
||||
|
||||
# Force the host name to gns3vm
|
||||
echo gns3vm > /etc/hostname
|
||||
|
||||
# Force hostid for IOU
|
||||
dd if=/dev/zero bs=4 count=1 of=/etc/hostid
|
||||
|
||||
# Block iou call. The server is down
|
||||
echo "127.0.0.254 xml.cisco.com" | tee --append /etc/hosts
|
||||
fi
|
||||
|
||||
log "Add gns3 to the kvm group"
|
||||
usermod -aG kvm gns3
|
||||
|
||||
log "Setup GNS3 server"
|
||||
|
||||
mkdir -p /etc/gns3
|
||||
cat <<EOFC > /etc/gns3/gns3_server.conf
|
||||
[Server]
|
||||
host = 0.0.0.0
|
||||
port = 3080
|
||||
images_path = /opt/gns3/images
|
||||
projects_path = /opt/gns3/projects
|
||||
report_errors = True
|
||||
|
||||
[Qemu]
|
||||
enable_kvm = True
|
||||
EOFC
|
||||
|
||||
chown -R gns3:gns3 /etc/gns3
|
||||
chmod -R 700 /etc/gns3
|
||||
|
||||
cat <<EOFI > /etc/init/gns3.conf
|
||||
description "GNS3 server"
|
||||
author "GNS3 Team"
|
||||
|
||||
start on filesystem or runlevel [2345]
|
||||
stop on runlevel [016]
|
||||
respawn
|
||||
console log
|
||||
|
||||
|
||||
script
|
||||
exec start-stop-daemon --start --make-pidfile --pidfile /var/run/gns3.pid --chuid gns3 --exec "/usr/bin/gns3server"
|
||||
end script
|
||||
|
||||
pre-start script
|
||||
echo "" > /var/log/upstart/gns3.log
|
||||
echo "[`date`] GNS3 Starting"
|
||||
end script
|
||||
|
||||
pre-stop script
|
||||
echo "[`date`] GNS3 Stopping"
|
||||
end script
|
||||
EOFI
|
||||
|
||||
chown root:root /etc/init/gns3.conf
|
||||
chmod 644 /etc/init/gns3.conf
|
||||
|
||||
|
||||
log "Start GNS3 service"
|
||||
set +e
|
||||
service gns3 stop
|
||||
set -e
|
||||
service gns3 start
|
||||
|
||||
log "GNS3 installed with success"
|
||||
|
||||
if [ $USE_VPN == 1 ]
|
||||
then
|
||||
log "Setup VPN"
|
||||
|
||||
cat <<EOFSERVER > /etc/gns3/gns3_server.conf
|
||||
[Server]
|
||||
host = 172.16.253.1
|
||||
port = 3080
|
||||
images_path = /opt/gns3/images
|
||||
projects_path = /opt/gns3/projects
|
||||
report_errors = True
|
||||
|
||||
[Qemu]
|
||||
enable_kvm = True
|
||||
EOFSERVER
|
||||
|
||||
log "Install packages for Open VPN"
|
||||
|
||||
apt-get install -y \
|
||||
openvpn \
|
||||
uuid \
|
||||
dnsutils \
|
||||
nginx-light
|
||||
|
||||
MY_IP_ADDR=$(dig @ns1.google.com -t txt o-o.myaddr.l.google.com +short | sed 's/"//g')
|
||||
|
||||
log "IP detected: $MY_IP_ADDR"
|
||||
|
||||
UUID=$(uuid)
|
||||
|
||||
log "Update motd"
|
||||
|
||||
cat <<EOFMOTD > /etc/update-motd.d/70-openvpn
|
||||
#!/bin/sh
|
||||
echo ""
|
||||
echo "_______________________________________________________________________________________________"
|
||||
echo "Download the VPN configuration here:"
|
||||
echo "http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn"
|
||||
echo ""
|
||||
echo "And add it to your openvpn client."
|
||||
echo ""
|
||||
echo "apt-get remove nginx-light to disable the HTTP server."
|
||||
echo "And remove this file with rm /etc/update-motd.d/70-openvpn"
|
||||
EOFMOTD
|
||||
chmod 755 /etc/update-motd.d/70-openvpn
|
||||
|
||||
|
||||
mkdir -p /etc/openvpn/
|
||||
|
||||
[ -d /dev/net ] || mkdir -p /dev/net
|
||||
[ -c /dev/net/tun ] || mknod /dev/net/tun c 10 200
|
||||
|
||||
log "Create keys"
|
||||
|
||||
[ -f /etc/openvpn/dh.pem ] || openssl dhparam -out /etc/openvpn/dh.pem 2048
|
||||
[ -f /etc/openvpn/key.pem ] || openssl genrsa -out /etc/openvpn/key.pem 2048
|
||||
chmod 600 /etc/openvpn/key.pem
|
||||
[ -f /etc/openvpn/csr.pem ] || openssl req -new -key /etc/openvpn/key.pem -out /etc/openvpn/csr.pem -subj /CN=OpenVPN/
|
||||
[ -f /etc/openvpn/cert.pem ] || openssl x509 -req -in /etc/openvpn/csr.pem -out /etc/openvpn/cert.pem -signkey /etc/openvpn/key.pem -days 24855
|
||||
|
||||
log "Create client configuration"
|
||||
cat <<EOFCLIENT > /root/client.ovpn
|
||||
client
|
||||
nobind
|
||||
comp-lzo
|
||||
dev tun
|
||||
<key>
|
||||
`cat /etc/openvpn/key.pem`
|
||||
</key>
|
||||
<cert>
|
||||
`cat /etc/openvpn/cert.pem`
|
||||
</cert>
|
||||
<ca>
|
||||
`cat /etc/openvpn/cert.pem`
|
||||
</ca>
|
||||
<dh>
|
||||
`cat /etc/openvpn/dh.pem`
|
||||
</dh>
|
||||
<connection>
|
||||
remote $MY_IP_ADDR 1194 udp
|
||||
</connection>
|
||||
EOFCLIENT
|
||||
|
||||
cat <<EOFUDP > /etc/openvpn/udp1194.conf
|
||||
server 172.16.253.0 255.255.255.0
|
||||
verb 3
|
||||
duplicate-cn
|
||||
comp-lzo
|
||||
key key.pem
|
||||
ca cert.pem
|
||||
cert cert.pem
|
||||
dh dh.pem
|
||||
keepalive 10 60
|
||||
persist-key
|
||||
persist-tun
|
||||
proto udp
|
||||
port 1194
|
||||
dev tun1194
|
||||
status openvpn-status-1194.log
|
||||
log-append /var/log/openvpn-udp1194.log
|
||||
EOFUDP
|
||||
|
||||
echo "Setup HTTP server for serving client certificate"
|
||||
mkdir -p /usr/share/nginx/openvpn/$UUID
|
||||
cp /root/client.ovpn /usr/share/nginx/openvpn/$UUID/$HOSTNAME.ovpn
|
||||
touch /usr/share/nginx/openvpn/$UUID/index.html
|
||||
touch /usr/share/nginx/openvpn/index.html
|
||||
|
||||
cat <<EOFNGINX > /etc/nginx/sites-available/openvpn
|
||||
server {
|
||||
listen 8003;
|
||||
root /usr/share/nginx/openvpn;
|
||||
}
|
||||
EOFNGINX
|
||||
|
||||
[ -f /etc/nginx/sites-enabled/openvpn ] || ln -s /etc/nginx/sites-available/openvpn /etc/nginx/sites-enabled/
|
||||
service nginx stop
|
||||
service nginx start
|
||||
|
||||
log "Restart OpenVPN"
|
||||
|
||||
set +e
|
||||
service openvpn stop
|
||||
service openvpn start
|
||||
|
||||
log "Download http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn to setup your OpenVPN client after rebooting the server"
|
||||
|
||||
fi
|
13
setup.py
13
setup.py
@ -38,16 +38,7 @@ class PyTest(TestCommand):
|
||||
errcode = pytest.main(self.test_args)
|
||||
sys.exit(errcode)
|
||||
|
||||
dependencies = [
|
||||
"jsonschema>=2.4.0",
|
||||
"aiohttp>=0.15.1",
|
||||
"Jinja2>=2.7.3",
|
||||
"raven>=5.2.0",
|
||||
"psutil>=3.0.0"
|
||||
]
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
dependencies.append("pywin32>=219")
|
||||
dependencies = open("requirements.txt", "r").read().splitlines()
|
||||
|
||||
setup(
|
||||
name="gns3-server",
|
||||
@ -62,7 +53,7 @@ setup(
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"gns3server = gns3server.main:main",
|
||||
"gns3vmnet = utils.vmnet:main",
|
||||
"gns3vmnet = gns3server.utils.vmnet:main",
|
||||
]
|
||||
},
|
||||
packages=find_packages(".", exclude=["docs", "tests"]),
|
||||
|
@ -81,16 +81,25 @@ def _get_unused_port():
|
||||
def server(request, loop, port_manager, monkeypatch):
|
||||
"""A GNS3 server"""
|
||||
|
||||
port = _get_unused_port()
|
||||
host = "localhost"
|
||||
app = web.Application()
|
||||
for method, route, handler in Route.get_routes():
|
||||
app.router.add_route(method, route, handler)
|
||||
for module in MODULES:
|
||||
instance = module.instance()
|
||||
instance.port_manager = port_manager
|
||||
srv = loop.create_server(app.make_handler(), host, port)
|
||||
srv = loop.run_until_complete(srv)
|
||||
|
||||
host = "localhost"
|
||||
|
||||
# We try multiple time. Because on Travis test can fail when because the port is taken by someone else
|
||||
for i in range(0, 5):
|
||||
port = _get_unused_port()
|
||||
try:
|
||||
srv = loop.create_server(app.make_handler(), host, port)
|
||||
srv = loop.run_until_complete(srv)
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
|
||||
def tear_down():
|
||||
for module in MODULES:
|
||||
@ -151,6 +160,7 @@ def run_around_tests(monkeypatch, port_manager):
|
||||
config.set("Server", "project_directory", os.path.join(tmppath, 'projects'))
|
||||
config.set("Server", "images_path", os.path.join(tmppath, 'images'))
|
||||
config.set("Server", "auth", False)
|
||||
config.set("Server", "controller", False)
|
||||
|
||||
# Prevent executions of the VM if we forgot to mock something
|
||||
config.set("VirtualBox", "vboxmanage_path", tmppath)
|
||||
|
36
tests/controller/test_controller.py
Normal file
36
tests/controller/test_controller.py
Normal file
@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import pytest
|
||||
|
||||
from gns3server.controller import Controller
|
||||
from gns3server.config import Config
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def controller():
|
||||
Controller._instance = None
|
||||
return Controller.instance()
|
||||
|
||||
|
||||
|
||||
def test_isEnabled(controller):
|
||||
Config.instance().set("Server", "controller", False)
|
||||
assert not controller.isEnabled()
|
||||
Config.instance().set("Server", "controller", True)
|
||||
assert controller.isEnabled()
|
||||
|
@ -104,7 +104,7 @@ class Query:
|
||||
if path is None:
|
||||
return
|
||||
with open(self._example_file_path(method, route), 'w+') as f:
|
||||
f.write("curl -i -X {} 'http://localhost:8000/v{}{}'".format(method, api_version, path))
|
||||
f.write("curl -i -X {} 'http://localhost:3080/v{}{}'".format(method, api_version, path))
|
||||
if body:
|
||||
f.write(" -d '{}'".format(re.sub(r"\n", "", json.dumps(json.loads(body), sort_keys=True))))
|
||||
f.write("\n\n")
|
||||
|
185
tests/handlers/api/test_docker.py
Normal file
185
tests/handlers/api/test_docker.py
Normal file
@ -0,0 +1,185 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import pytest
|
||||
import os
|
||||
import stat
|
||||
import sys
|
||||
import uuid
|
||||
import aiohttp
|
||||
|
||||
from tests.utils import asyncio_patch
|
||||
from unittest.mock import patch, MagicMock, PropertyMock
|
||||
from gns3server.modules.docker import Docker
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def base_params():
|
||||
"""Return standard parameters"""
|
||||
return {"name": "PC TEST 1", "image": "nginx", "start_command": "nginx-daemon", "adapters": 2, "environment": "YES=1\nNO=0", "console_type": "telnet", "console_resolution": "1280x1024"}
|
||||
|
||||
|
||||
@pytest.yield_fixture(autouse=True)
|
||||
def mock_connection():
|
||||
docker = Docker.instance()
|
||||
docker._connected = True
|
||||
docker._connector = MagicMock()
|
||||
yield
|
||||
Docker._instance = None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def vm(server, project, base_params):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "nginx"}]) as mock_list:
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value={"Id": "8bd8153ea8f5"}) as mock:
|
||||
response = server.post("/projects/{project_id}/docker/vms".format(project_id=project.id), base_params)
|
||||
if response.status != 201:
|
||||
print(response.body)
|
||||
assert response.status == 201
|
||||
return response.json
|
||||
|
||||
|
||||
def test_docker_create(server, project, base_params):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "nginx"}]) as mock_list:
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value={"Id": "8bd8153ea8f5"}) as mock:
|
||||
response = server.post("/projects/{project_id}/docker/vms".format(project_id=project.id), base_params)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/docker/vms"
|
||||
assert response.json["name"] == "PC TEST 1"
|
||||
assert response.json["project_id"] == project.id
|
||||
assert response.json["container_id"] == "8bd8153ea8f5"
|
||||
assert response.json["image"] == "nginx:latest"
|
||||
assert response.json["adapters"] == 2
|
||||
assert response.json["environment"] == "YES=1\nNO=0"
|
||||
assert response.json["console_resolution"] == "1280x1024"
|
||||
|
||||
|
||||
def test_docker_start(server, vm):
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.start", return_value=True) as mock:
|
||||
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_stop(server, vm):
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.stop", return_value=True) as mock:
|
||||
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/stop".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_reload(server, vm):
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.restart", return_value=True) as mock:
|
||||
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/reload".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_delete(server, vm):
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.delete", return_value=True) as mock:
|
||||
response = server.delete("/projects/{project_id}/docker/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_reload(server, vm):
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.pause", return_value=True) as mock:
|
||||
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/suspend".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
assert mock.called
|
||||
assert response.status == 204
|
||||
|
||||
|
||||
def test_docker_nio_create_udp(server, vm):
|
||||
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"},
|
||||
example=True)
|
||||
assert response.status == 201
|
||||
assert response.route == "/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
assert response.json["type"] == "nio_udp"
|
||||
|
||||
|
||||
def test_docker_delete_nio(server, vm):
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.adapter_remove_nio_binding") as mock:
|
||||
response = server.delete("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
assert response.status == 204
|
||||
assert response.route == "/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||
|
||||
|
||||
def test_docker_update(server, vm, tmpdir, free_console_port):
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.update") as mock:
|
||||
response = server.put("/projects/{project_id}/docker/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"name": "test",
|
||||
"console": free_console_port,
|
||||
"start_command": "yes",
|
||||
"environment": "GNS3=1\nGNS4=0"},
|
||||
example=True)
|
||||
assert mock.called
|
||||
assert response.status == 200
|
||||
assert response.json["name"] == "test"
|
||||
assert response.json["console"] == free_console_port
|
||||
assert response.json["start_command"] == "yes"
|
||||
assert response.json["environment"] == "GNS3=1\nGNS4=0"
|
||||
|
||||
|
||||
def test_docker_start_capture(server, vm, tmpdir, project):
|
||||
|
||||
with patch("gns3server.modules.docker.docker_vm.DockerVM.is_running", return_value=True) as mock:
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.start_capture") as start_capture:
|
||||
|
||||
params = {"capture_file_name": "test.pcap", "data_link_type": "DLT_EN10MB"}
|
||||
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=params, example=True)
|
||||
|
||||
assert response.status == 200
|
||||
|
||||
assert start_capture.called
|
||||
assert "test.pcap" in response.json["pcap_file_path"]
|
||||
|
||||
|
||||
def test_docker_start_capture_not_started(server, vm, tmpdir):
|
||||
|
||||
with patch("gns3server.modules.docker.docker_vm.DockerVM.is_running", return_value=False) as mock:
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.start_capture") as start_capture:
|
||||
|
||||
params = {"capture_file_name": "test.pcap", "data_link_type": "DLT_EN10MB"}
|
||||
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/start_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), body=params)
|
||||
|
||||
assert not start_capture.called
|
||||
assert response.status == 409
|
||||
|
||||
|
||||
def test_docker_stop_capture(server, vm, tmpdir, project):
|
||||
|
||||
with patch("gns3server.modules.docker.docker_vm.DockerVM.is_running", return_value=True) as mock:
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.stop_capture") as stop_capture:
|
||||
|
||||
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
|
||||
|
||||
assert response.status == 204
|
||||
|
||||
assert stop_capture.called
|
||||
|
||||
|
||||
def test_docker_stop_capture_not_started(server, vm, tmpdir):
|
||||
|
||||
with patch("gns3server.modules.docker.docker_vm.DockerVM.is_running", return_value=False) as mock:
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.stop_capture") as stop_capture:
|
||||
|
||||
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/stop_capture".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
|
||||
|
||||
assert not stop_capture.called
|
||||
assert response.status == 409
|
@ -173,9 +173,9 @@ def test_upload_vm(server, tmpdir):
|
||||
|
||||
|
||||
def test_upload_vm_permission_denied(server, tmpdir):
|
||||
with open(str(tmpdir / "test2"), "w+") as f:
|
||||
with open(str(tmpdir / "test2.tmp"), "w+") as f:
|
||||
f.write("")
|
||||
os.chmod(str(tmpdir / "test2"), 0)
|
||||
os.chmod(str(tmpdir / "test2.tmp"), 0)
|
||||
|
||||
with patch("gns3server.modules.Dynamips.get_images_directory", return_value=str(tmpdir),):
|
||||
response = server.post("/dynamips/vms/test2", body="TEST", raw=True)
|
||||
|
@ -27,15 +27,15 @@ from gns3server.version import __version__
|
||||
|
||||
|
||||
def test_stream(server, tmpdir, loop):
|
||||
with open(str(tmpdir / "test"), 'w+') as f:
|
||||
with open(str(tmpdir / "test.pcap"), 'w+') as f:
|
||||
f.write("hello")
|
||||
|
||||
def go(future):
|
||||
query = json.dumps({"location": str(tmpdir / "test")})
|
||||
query = json.dumps({"location": str(tmpdir / "test.pcap")})
|
||||
headers = {'content-type': 'application/json'}
|
||||
response = yield from aiohttp.request("GET", server.get_url("/files/stream", 1), data=query, headers=headers)
|
||||
response.body = yield from response.content.read(5)
|
||||
with open(str(tmpdir / "test"), 'a') as f:
|
||||
with open(str(tmpdir / "test.pcap"), 'a') as f:
|
||||
f.write("world")
|
||||
response.body += yield from response.content.read(5)
|
||||
response.close()
|
||||
@ -48,7 +48,7 @@ def test_stream(server, tmpdir, loop):
|
||||
assert response.body == b'helloworld'
|
||||
|
||||
|
||||
def test_stream_file_not_found(server, tmpdir, loop):
|
||||
def test_stream_file_not_pcap(server, tmpdir, loop):
|
||||
def go(future):
|
||||
query = json.dumps({"location": str(tmpdir / "test")})
|
||||
headers = {'content-type': 'application/json'}
|
||||
@ -56,6 +56,20 @@ def test_stream_file_not_found(server, tmpdir, loop):
|
||||
response.close()
|
||||
future.set_result(response)
|
||||
|
||||
future = asyncio.Future()
|
||||
asyncio.async(go(future))
|
||||
response = loop.run_until_complete(future)
|
||||
assert response.status == 403
|
||||
|
||||
|
||||
def test_stream_file_not_found(server, tmpdir, loop):
|
||||
def go(future):
|
||||
query = json.dumps({"location": str(tmpdir / "test.pcap")})
|
||||
headers = {'content-type': 'application/json'}
|
||||
response = yield from aiohttp.request("GET", server.get_url("/files/stream", 1), data=query, headers=headers)
|
||||
response.close()
|
||||
future.set_result(response)
|
||||
|
||||
future = asyncio.Future()
|
||||
asyncio.async(go(future))
|
||||
response = loop.run_until_complete(future)
|
||||
|
@ -349,11 +349,3 @@ def test_upload_vm(server, tmpdir):
|
||||
assert checksum == "033bd94b1168d7e4f0d644c3c95e35bf"
|
||||
|
||||
|
||||
def test_upload_vm_permission_denied(server, tmpdir):
|
||||
with open(str(tmpdir / "test2"), "w+") as f:
|
||||
f.write("")
|
||||
os.chmod(str(tmpdir / "test2"), 0)
|
||||
|
||||
with patch("gns3server.modules.IOU.get_images_directory", return_value=str(tmpdir),):
|
||||
response = server.post("/iou/vms/test2", body="TEST", raw=True)
|
||||
assert response.status == 409
|
||||
|
@ -23,8 +23,9 @@ import uuid
|
||||
import os
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import zipfile
|
||||
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import patch, MagicMock
|
||||
from tests.utils import asyncio_patch
|
||||
|
||||
from gns3server.handlers.api.project_handler import ProjectHandler
|
||||
@ -261,3 +262,75 @@ def test_get_file(server, tmpdir):
|
||||
|
||||
response = server.get("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True)
|
||||
assert response.status == 403
|
||||
|
||||
|
||||
def test_write_file(server, tmpdir):
|
||||
|
||||
with patch("gns3server.config.Config.get_section_config", return_value={"project_directory": str(tmpdir)}):
|
||||
project = ProjectManager.instance().create_project()
|
||||
|
||||
with open(os.path.join(project.path, "hello"), "w+") as f:
|
||||
f.write("world")
|
||||
|
||||
response = server.post("/projects/{project_id}/files/hello".format(project_id=project.id), body="universe", raw=True)
|
||||
assert response.status == 200
|
||||
|
||||
with open(os.path.join(project.path, "hello")) as f:
|
||||
content = f.read()
|
||||
assert content == "universe"
|
||||
|
||||
response = server.post("/projects/{project_id}/files/test/false".format(project_id=project.id), body="universe", raw=True)
|
||||
assert response.status == 404
|
||||
|
||||
response = server.post("/projects/{project_id}/files/../hello".format(project_id=project.id), body="universe", raw=True)
|
||||
assert response.status == 403
|
||||
|
||||
|
||||
def test_export(server, tmpdir, loop, project):
|
||||
|
||||
os.makedirs(project.path, exist_ok=True)
|
||||
with open(os.path.join(project.path, 'a'), 'w+') as f:
|
||||
f.write('hello')
|
||||
|
||||
response = server.get("/projects/{project_id}/export".format(project_id=project.id), raw=True)
|
||||
assert response.status == 200
|
||||
assert response.headers['CONTENT-TYPE'] == 'application/gns3project'
|
||||
assert response.headers['CONTENT-DISPOSITION'] == 'attachment; filename="{}.gns3project"'.format(project.name)
|
||||
|
||||
with open(str(tmpdir / 'project.zip'), 'wb+') as f:
|
||||
f.write(response.body)
|
||||
|
||||
with zipfile.ZipFile(str(tmpdir / 'project.zip')) as myzip:
|
||||
with myzip.open("a") as myfile:
|
||||
content = myfile.read()
|
||||
assert content == b"hello"
|
||||
|
||||
|
||||
def test_export_include_image(server, tmpdir, loop, project):
|
||||
|
||||
project.export = MagicMock()
|
||||
response = server.get("/projects/{project_id}/export".format(project_id=project.id), raw=True)
|
||||
project.export.assert_called_with(include_images=False)
|
||||
|
||||
response = server.get("/projects/{project_id}/export?include_images=0".format(project_id=project.id), raw=True)
|
||||
project.export.assert_called_with(include_images=False)
|
||||
|
||||
response = server.get("/projects/{project_id}/export?include_images=1".format(project_id=project.id), raw=True)
|
||||
project.export.assert_called_with(include_images=True)
|
||||
|
||||
|
||||
def test_import(server, tmpdir, loop, project):
|
||||
|
||||
with zipfile.ZipFile(str(tmpdir / "test.zip"), 'w') as myzip:
|
||||
myzip.writestr("demo", b"hello")
|
||||
|
||||
project_id = project.id
|
||||
|
||||
with open(str(tmpdir / "test.zip"), "rb") as f:
|
||||
response = server.post("/projects/{project_id}/import".format(project_id=project_id), body=f.read(), raw=True)
|
||||
assert response.status == 201
|
||||
|
||||
project = ProjectManager.instance().get_project(project_id=project_id)
|
||||
with open(os.path.join(project.path, "demo")) as f:
|
||||
content = f.read()
|
||||
assert content == "hello"
|
||||
|
@ -263,9 +263,9 @@ def test_upload_vm_forbiden_location(server, tmpdir):
|
||||
|
||||
|
||||
def test_upload_vm_permission_denied(server, tmpdir):
|
||||
with open(str(tmpdir / "test2"), "w+") as f:
|
||||
with open(str(tmpdir / "test2.tmp"), "w+") as f:
|
||||
f.write("")
|
||||
os.chmod(str(tmpdir / "test2"), 0)
|
||||
os.chmod(str(tmpdir / "test2.tmp"), 0)
|
||||
|
||||
with patch("gns3server.modules.Qemu.get_images_directory", return_value=str(tmpdir),):
|
||||
response = server.post("/qemu/vms/test2", body="TEST", raw=True)
|
||||
|
@ -219,7 +219,6 @@ def test_backup_projects(server, tmpdir, loop):
|
||||
assert response.headers['CONTENT-TYPE'] == 'application/x-gtar'
|
||||
|
||||
with open(str(tmpdir / 'projects.tar'), 'wb+') as f:
|
||||
print(len(response.body))
|
||||
f.write(response.body)
|
||||
|
||||
tar = tarfile.open(str(tmpdir / 'projects.tar'), 'r')
|
||||
|
136
tests/modules/docker/test_docker.py
Normal file
136
tests/modules/docker/test_docker.py
Normal file
@ -0,0 +1,136 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import pytest
|
||||
import asyncio
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from tests.utils import asyncio_patch
|
||||
from gns3server.modules.docker import Docker
|
||||
from gns3server.modules.docker.docker_error import DockerError
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def vm():
|
||||
vm = Docker()
|
||||
vm._connected = True
|
||||
vm._connector = MagicMock()
|
||||
vm._connector.closed = False
|
||||
return vm
|
||||
|
||||
|
||||
def test_query_success(loop, vm):
|
||||
|
||||
response = MagicMock()
|
||||
response.status = 200
|
||||
response.headers = {'CONTENT-TYPE': 'application/json'}
|
||||
|
||||
@asyncio.coroutine
|
||||
def read():
|
||||
return b'{"c": false}'
|
||||
|
||||
response.read.side_effect = read
|
||||
with asyncio_patch("aiohttp.request", return_value=response) as mock:
|
||||
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
|
||||
mock.assert_called_with('POST',
|
||||
'http://docker/test',
|
||||
connector=vm._connector,
|
||||
data='{"a": true}',
|
||||
headers={'content-type': 'application/json'},
|
||||
params={'b': 1})
|
||||
|
||||
assert data == {"c": False}
|
||||
|
||||
|
||||
def test_query_error(loop, vm):
|
||||
|
||||
response = MagicMock()
|
||||
response.status = 404
|
||||
|
||||
@asyncio.coroutine
|
||||
def read():
|
||||
return b"NOT FOUND"
|
||||
|
||||
response.read.side_effect = read
|
||||
with asyncio_patch("aiohttp.request", return_value=response) as mock:
|
||||
with pytest.raises(DockerError):
|
||||
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
|
||||
mock.assert_called_with('POST',
|
||||
'http://docker/test',
|
||||
connector=vm._connector,
|
||||
data='{"a": true}',
|
||||
headers={'content-type': 'application/json'},
|
||||
params={'b': 1})
|
||||
|
||||
|
||||
def test_query_error_json(loop, vm):
|
||||
|
||||
response = MagicMock()
|
||||
response.status = 404
|
||||
|
||||
@asyncio.coroutine
|
||||
def read():
|
||||
return b'{"message": "Error"}'
|
||||
|
||||
response.read.side_effect = read
|
||||
with asyncio_patch("aiohttp.request", return_value=response) as mock:
|
||||
with pytest.raises(DockerError):
|
||||
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
|
||||
mock.assert_called_with('POST',
|
||||
'http://docker/test',
|
||||
connector=vm._connector,
|
||||
data='{"a": true}',
|
||||
headers={'content-type': 'application/json'},
|
||||
params={'b': 1})
|
||||
|
||||
|
||||
def test_list_images(loop):
|
||||
response = [
|
||||
{
|
||||
"RepoTags": [
|
||||
"ubuntu:12.04",
|
||||
"ubuntu:precise",
|
||||
"ubuntu:latest"
|
||||
],
|
||||
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
|
||||
"Created": 1365714795,
|
||||
"Size": 131506275,
|
||||
"VirtualSize": 131506275
|
||||
},
|
||||
{
|
||||
"RepoTags": [
|
||||
"ubuntu:12.10",
|
||||
"ubuntu:quantal",
|
||||
"<none>:<none>"
|
||||
],
|
||||
"ParentId": "27cf784147099545",
|
||||
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
|
||||
"Created": 1364102658,
|
||||
"Size": 24653,
|
||||
"VirtualSize": 180116135
|
||||
}
|
||||
]
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
images = loop.run_until_complete(asyncio.async(Docker.instance().list_images()))
|
||||
mock.assert_called_with("GET", "images/json", params={"all": 0})
|
||||
assert len(images) == 5
|
||||
assert {"image": "ubuntu:12.04"} in images
|
||||
assert {"image": "ubuntu:precise"} in images
|
||||
assert {"image": "ubuntu:latest"} in images
|
||||
assert {"image": "ubuntu:12.10"} in images
|
||||
assert {"image": "ubuntu:quantal"} in images
|
939
tests/modules/docker/test_docker_vm.py
Normal file
939
tests/modules/docker/test_docker_vm.py
Normal file
@ -0,0 +1,939 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import pytest
|
||||
import uuid
|
||||
import asyncio
|
||||
import os
|
||||
from tests.utils import asyncio_patch, AsyncioMagicMock
|
||||
|
||||
from gns3server.ubridge.ubridge_error import UbridgeNamespaceError
|
||||
from gns3server.modules.docker.docker_vm import DockerVM
|
||||
from gns3server.modules.docker.docker_error import *
|
||||
from gns3server.modules.docker import Docker
|
||||
from gns3server.utils.get_resource import get_resource
|
||||
|
||||
|
||||
from unittest.mock import patch, MagicMock, PropertyMock, call
|
||||
from gns3server.config import Config
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def manager(port_manager):
|
||||
m = Docker.instance()
|
||||
m.port_manager = port_manager
|
||||
return m
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def vm(project, manager):
|
||||
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||
vm._cid = "e90e34656842"
|
||||
vm.allocate_aux = False
|
||||
return vm
|
||||
|
||||
|
||||
def test_json(vm, project):
|
||||
assert vm.__json__() == {
|
||||
'container_id': 'e90e34656842',
|
||||
'image': 'ubuntu:latest',
|
||||
'name': 'test',
|
||||
'project_id': project.id,
|
||||
'vm_id': vm.id,
|
||||
'adapters': 1,
|
||||
'console': vm.console,
|
||||
'console_type': 'telnet',
|
||||
'console_resolution': '1024x768',
|
||||
'console_http_port': 80,
|
||||
'console_http_path': '/',
|
||||
'aux': vm.aux,
|
||||
'start_command': vm.start_command,
|
||||
'environment': vm.environment,
|
||||
'vm_directory': vm.working_dir
|
||||
}
|
||||
|
||||
|
||||
def test_start_command(vm):
|
||||
|
||||
vm.start_command = "hello"
|
||||
assert vm.start_command == "hello"
|
||||
vm.start_command = " "
|
||||
assert vm.start_command is None
|
||||
|
||||
|
||||
def test_create(loop, project, manager):
|
||||
|
||||
response = {
|
||||
"Id": "e90e34656806",
|
||||
"Warnings": []
|
||||
}
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||
loop.run_until_complete(asyncio.async(vm.create()))
|
||||
mock.assert_called_with("POST", "containers/create", data={
|
||||
"Tty": True,
|
||||
"OpenStdin": True,
|
||||
"StdinOnce": False,
|
||||
"HostConfig":
|
||||
{
|
||||
"CapAdd": ["ALL"],
|
||||
"Binds": [
|
||||
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||
],
|
||||
"Privileged": True
|
||||
},
|
||||
"Volumes": {},
|
||||
"NetworkDisabled": True,
|
||||
"Name": "test",
|
||||
"Hostname": "test",
|
||||
"Image": "ubuntu:latest",
|
||||
"Env": [
|
||||
"container=docker",
|
||||
"GNS3_MAX_ETHERNET=eth0",
|
||||
"GNS3_VOLUMES=/etc/network"
|
||||
],
|
||||
"Entrypoint": ["/gns3/init.sh"],
|
||||
"Cmd": ["/bin/sh"]
|
||||
})
|
||||
assert vm._cid == "e90e34656806"
|
||||
|
||||
|
||||
def test_create_with_tag(loop, project, manager):
|
||||
|
||||
response = {
|
||||
"Id": "e90e34656806",
|
||||
"Warnings": []
|
||||
}
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest:16.04")
|
||||
loop.run_until_complete(asyncio.async(vm.create()))
|
||||
mock.assert_called_with("POST", "containers/create", data={
|
||||
"Tty": True,
|
||||
"OpenStdin": True,
|
||||
"StdinOnce": False,
|
||||
"HostConfig":
|
||||
{
|
||||
"CapAdd": ["ALL"],
|
||||
"Binds": [
|
||||
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||
],
|
||||
"Privileged": True
|
||||
},
|
||||
"Volumes": {},
|
||||
"NetworkDisabled": True,
|
||||
"Name": "test",
|
||||
"Hostname": "test",
|
||||
"Image": "ubuntu:latest:16.04",
|
||||
"Env": [
|
||||
"container=docker",
|
||||
"GNS3_MAX_ETHERNET=eth0",
|
||||
"GNS3_VOLUMES=/etc/network"
|
||||
],
|
||||
"Entrypoint": ["/gns3/init.sh"],
|
||||
"Cmd": ["/bin/sh"]
|
||||
})
|
||||
assert vm._cid == "e90e34656806"
|
||||
|
||||
|
||||
def test_create_vnc(loop, project, manager):
|
||||
|
||||
response = {
|
||||
"Id": "e90e34656806",
|
||||
"Warnings": []
|
||||
}
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", console_type="vnc", console=5900)
|
||||
vm._start_vnc = MagicMock()
|
||||
vm._display = 42
|
||||
loop.run_until_complete(asyncio.async(vm.create()))
|
||||
mock.assert_called_with("POST", "containers/create", data={
|
||||
"Tty": True,
|
||||
"OpenStdin": True,
|
||||
"StdinOnce": False,
|
||||
"HostConfig":
|
||||
{
|
||||
"CapAdd": ["ALL"],
|
||||
"Binds": [
|
||||
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")),
|
||||
'/tmp/.X11-unix/:/tmp/.X11-unix/'
|
||||
],
|
||||
"Privileged": True
|
||||
},
|
||||
"Volumes": {},
|
||||
"NetworkDisabled": True,
|
||||
"Name": "test",
|
||||
"Hostname": "test",
|
||||
"Image": "ubuntu:latest",
|
||||
"Env": [
|
||||
"container=docker",
|
||||
"GNS3_MAX_ETHERNET=eth0",
|
||||
"GNS3_VOLUMES=/etc/network",
|
||||
"QT_GRAPHICSSYSTEM=native",
|
||||
"DISPLAY=:42"
|
||||
],
|
||||
"Entrypoint": ["/gns3/init.sh"],
|
||||
"Cmd": ["/bin/sh"]
|
||||
})
|
||||
assert vm._start_vnc.called
|
||||
assert vm._cid == "e90e34656806"
|
||||
assert vm._console_type == "vnc"
|
||||
|
||||
|
||||
def test_create_start_cmd(loop, project, manager):
|
||||
|
||||
response = {
|
||||
"Id": "e90e34656806",
|
||||
"Warnings": []
|
||||
}
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||
vm._start_command = "/bin/ls"
|
||||
loop.run_until_complete(asyncio.async(vm.create()))
|
||||
mock.assert_called_with("POST", "containers/create", data={
|
||||
"Tty": True,
|
||||
"OpenStdin": True,
|
||||
"StdinOnce": False,
|
||||
"HostConfig":
|
||||
{
|
||||
"CapAdd": ["ALL"],
|
||||
"Binds": [
|
||||
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||
],
|
||||
"Privileged": True
|
||||
},
|
||||
"Volumes": {},
|
||||
"Entrypoint": ["/gns3/init.sh"],
|
||||
"Cmd": ["/bin/ls"],
|
||||
"NetworkDisabled": True,
|
||||
"Name": "test",
|
||||
"Hostname": "test",
|
||||
"Image": "ubuntu:latest",
|
||||
"Env": [
|
||||
"container=docker",
|
||||
"GNS3_MAX_ETHERNET=eth0",
|
||||
"GNS3_VOLUMES=/etc/network"
|
||||
]
|
||||
})
|
||||
assert vm._cid == "e90e34656806"
|
||||
|
||||
|
||||
def test_create_environment(loop, project, manager):
|
||||
|
||||
response = {
|
||||
"Id": "e90e34656806",
|
||||
"Warnings": []
|
||||
}
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||
vm.environment = "YES=1\nNO=0"
|
||||
loop.run_until_complete(asyncio.async(vm.create()))
|
||||
mock.assert_called_with("POST", "containers/create", data={
|
||||
"Tty": True,
|
||||
"OpenStdin": True,
|
||||
"StdinOnce": False,
|
||||
"HostConfig":
|
||||
{
|
||||
"CapAdd": ["ALL"],
|
||||
"Binds": [
|
||||
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||
],
|
||||
"Privileged": True
|
||||
},
|
||||
"Env": [
|
||||
"container=docker",
|
||||
"GNS3_MAX_ETHERNET=eth0",
|
||||
"GNS3_VOLUMES=/etc/network",
|
||||
"YES=1",
|
||||
"NO=0"
|
||||
],
|
||||
"Volumes": {},
|
||||
"NetworkDisabled": True,
|
||||
"Name": "test",
|
||||
"Hostname": "test",
|
||||
"Image": "ubuntu:latest",
|
||||
"Entrypoint": ["/gns3/init.sh"],
|
||||
"Cmd": ["/bin/sh"]
|
||||
})
|
||||
assert vm._cid == "e90e34656806"
|
||||
|
||||
|
||||
def test_create_image_not_available(loop, project, manager):
|
||||
|
||||
call = 0
|
||||
|
||||
@asyncio.coroutine
|
||||
def informations():
|
||||
nonlocal call
|
||||
if call == 0:
|
||||
call += 1
|
||||
raise DockerHttp404Error("missing")
|
||||
else:
|
||||
return {}
|
||||
|
||||
response = {
|
||||
"Id": "e90e34656806",
|
||||
"Warnings": []
|
||||
}
|
||||
|
||||
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||
vm._get_image_informations = MagicMock()
|
||||
vm._get_image_informations.side_effect = informations
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM.pull_image", return_value=True) as mock_pull:
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
loop.run_until_complete(asyncio.async(vm.create()))
|
||||
mock.assert_called_with("POST", "containers/create", data={
|
||||
"Tty": True,
|
||||
"OpenStdin": True,
|
||||
"StdinOnce": False,
|
||||
"HostConfig":
|
||||
{
|
||||
"CapAdd": ["ALL"],
|
||||
"Binds": [
|
||||
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||
],
|
||||
"Privileged": True
|
||||
},
|
||||
"Volumes": {},
|
||||
"NetworkDisabled": True,
|
||||
"Name": "test",
|
||||
"Hostname": "test",
|
||||
"Image": "ubuntu:latest",
|
||||
"Env": [
|
||||
"container=docker",
|
||||
"GNS3_MAX_ETHERNET=eth0",
|
||||
"GNS3_VOLUMES=/etc/network"
|
||||
],
|
||||
"Entrypoint": ["/gns3/init.sh"],
|
||||
"Cmd": ["/bin/sh"]
|
||||
})
|
||||
assert vm._cid == "e90e34656806"
|
||||
mock_pull.assert_called_with("ubuntu:latest")
|
||||
|
||||
|
||||
def test_get_container_state(loop, vm):
|
||||
response = {
|
||||
"State": {
|
||||
"Error": "",
|
||||
"ExitCode": 9,
|
||||
"FinishedAt": "2015-01-06T15:47:32.080254511Z",
|
||||
"OOMKilled": False,
|
||||
"Paused": False,
|
||||
"Pid": 0,
|
||||
"Restarting": False,
|
||||
"Running": True,
|
||||
"StartedAt": "2015-01-06T15:47:32.072697474Z"
|
||||
}
|
||||
}
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
assert loop.run_until_complete(asyncio.async(vm._get_container_state())) == "running"
|
||||
|
||||
response["State"]["Running"] = False
|
||||
response["State"]["Paused"] = True
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
assert loop.run_until_complete(asyncio.async(vm._get_container_state())) == "paused"
|
||||
|
||||
response["State"]["Running"] = False
|
||||
response["State"]["Paused"] = False
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
assert loop.run_until_complete(asyncio.async(vm._get_container_state())) == "exited"
|
||||
|
||||
|
||||
def test_is_running(loop, vm):
|
||||
response = {
|
||||
"State": {
|
||||
"Running": False,
|
||||
"Paused": False
|
||||
}
|
||||
}
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
assert loop.run_until_complete(asyncio.async(vm.is_running())) is False
|
||||
|
||||
response["State"]["Running"] = True
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
assert loop.run_until_complete(asyncio.async(vm.is_running())) is True
|
||||
|
||||
|
||||
def test_pause(loop, vm):
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock:
|
||||
loop.run_until_complete(asyncio.async(vm.pause()))
|
||||
|
||||
mock.assert_called_with("POST", "containers/e90e34656842/pause")
|
||||
assert vm.status == "paused"
|
||||
|
||||
|
||||
def test_unpause(loop, vm):
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock:
|
||||
loop.run_until_complete(asyncio.async(vm.unpause()))
|
||||
|
||||
mock.assert_called_with("POST", "containers/e90e34656842/unpause")
|
||||
|
||||
|
||||
def test_start(loop, vm, manager, free_console_port):
|
||||
|
||||
assert vm.status != "started"
|
||||
vm.adapters = 1
|
||||
|
||||
vm.allocate_aux = True
|
||||
vm._start_aux = AsyncioMagicMock()
|
||||
|
||||
vm._get_container_state = AsyncioMagicMock(return_value="stopped")
|
||||
vm._start_ubridge = AsyncioMagicMock()
|
||||
vm._get_namespace = AsyncioMagicMock(return_value=42)
|
||||
vm._add_ubridge_connection = AsyncioMagicMock()
|
||||
vm._start_console = AsyncioMagicMock()
|
||||
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
|
||||
mock_query.assert_called_with("POST", "containers/e90e34656842/start")
|
||||
vm._add_ubridge_connection.assert_called_once_with(nio, 0, 42)
|
||||
assert vm._start_ubridge.called
|
||||
assert vm._start_console.called
|
||||
assert vm._start_aux.called
|
||||
assert vm.status == "started"
|
||||
|
||||
|
||||
def test_start_namespace_failed(loop, vm, manager, free_console_port):
|
||||
|
||||
assert vm.status != "started"
|
||||
vm.adapters = 1
|
||||
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._start_ubridge") as mock_start_ubridge:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_namespace", return_value=42) as mock_namespace:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._add_ubridge_connection", side_effect=UbridgeNamespaceError()) as mock_add_ubridge_connection:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_log", return_value='Hello not available') as mock_log:
|
||||
|
||||
with pytest.raises(DockerError):
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
|
||||
mock_query.assert_any_call("POST", "containers/e90e34656842/start")
|
||||
mock_add_ubridge_connection.assert_called_once_with(nio, 0, 42)
|
||||
assert mock_start_ubridge.called
|
||||
assert vm.status == "stopped"
|
||||
|
||||
|
||||
def test_start_without_nio(loop, vm, manager, free_console_port):
|
||||
"""
|
||||
If no nio exists we will create one.
|
||||
"""
|
||||
|
||||
assert vm.status != "started"
|
||||
vm.adapters = 1
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._start_ubridge") as mock_start_ubridge:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_namespace", return_value=42) as mock_namespace:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._add_ubridge_connection") as mock_add_ubridge_connection:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._start_console") as mock_start_console:
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
|
||||
mock_query.assert_called_with("POST", "containers/e90e34656842/start")
|
||||
assert mock_add_ubridge_connection.called
|
||||
assert mock_start_ubridge.called
|
||||
assert mock_start_console.called
|
||||
assert vm.status == "started"
|
||||
|
||||
|
||||
def test_start_unpause(loop, vm, manager, free_console_port):
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="paused"):
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM.unpause", return_value="paused") as mock:
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
assert mock.called
|
||||
assert vm.status == "started"
|
||||
|
||||
|
||||
def test_restart(loop, vm):
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock:
|
||||
loop.run_until_complete(asyncio.async(vm.restart()))
|
||||
|
||||
mock.assert_called_with("POST", "containers/e90e34656842/restart")
|
||||
|
||||
|
||||
def test_stop(loop, vm):
|
||||
vm._ubridge_hypervisor = MagicMock()
|
||||
vm._ubridge_hypervisor.is_running.return_value = True
|
||||
vm._fix_permissions = MagicMock()
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="running"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||
loop.run_until_complete(asyncio.async(vm.stop()))
|
||||
mock_query.assert_called_with("POST", "containers/e90e34656842/stop", params={"t": 5})
|
||||
assert vm._ubridge_hypervisor.stop.called
|
||||
assert vm._fix_permissions.called
|
||||
|
||||
|
||||
def test_stop_paused_container(loop, vm):
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="paused"):
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM.unpause") as mock_unpause:
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||
loop.run_until_complete(asyncio.async(vm.stop()))
|
||||
mock_query.assert_called_with("POST", "containers/e90e34656842/stop", params={"t": 5})
|
||||
assert mock_unpause.called
|
||||
|
||||
|
||||
def test_update(loop, vm):
|
||||
|
||||
response = {
|
||||
"Id": "e90e34656806",
|
||||
"Warnings": []
|
||||
}
|
||||
|
||||
original_console = vm.console
|
||||
original_aux = vm.aux
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
|
||||
loop.run_until_complete(asyncio.async(vm.update()))
|
||||
|
||||
mock_query.assert_any_call("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||
mock_query.assert_any_call("POST", "containers/create", data={
|
||||
"Tty": True,
|
||||
"OpenStdin": True,
|
||||
"StdinOnce": False,
|
||||
"HostConfig":
|
||||
{
|
||||
"CapAdd": ["ALL"],
|
||||
"Binds": [
|
||||
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||
],
|
||||
"Privileged": True
|
||||
},
|
||||
"Volumes": {},
|
||||
"NetworkDisabled": True,
|
||||
"Name": "test",
|
||||
"Hostname": "test",
|
||||
"Image": "ubuntu:latest",
|
||||
"Env": [
|
||||
"container=docker",
|
||||
"GNS3_MAX_ETHERNET=eth0",
|
||||
"GNS3_VOLUMES=/etc/network"
|
||||
],
|
||||
"Entrypoint": ["/gns3/init.sh"],
|
||||
"Cmd": ["/bin/sh"]
|
||||
})
|
||||
assert vm.console == original_console
|
||||
assert vm.aux == original_aux
|
||||
|
||||
|
||||
def test_update_vnc(loop, vm):
|
||||
|
||||
response = {
|
||||
"Id": "e90e34656806",
|
||||
"Warnings": []
|
||||
}
|
||||
|
||||
vm.console_type = "vnc"
|
||||
vm.console = 5900
|
||||
vm._display = "display"
|
||||
original_console = vm.console
|
||||
original_aux = vm.aux
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._start_vnc"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
|
||||
loop.run_until_complete(asyncio.async(vm.update()))
|
||||
|
||||
assert vm.console == original_console
|
||||
assert vm.aux == original_aux
|
||||
|
||||
|
||||
def test_update_running(loop, vm):
|
||||
|
||||
response = {
|
||||
"Id": "e90e34656806",
|
||||
"Warnings": []
|
||||
}
|
||||
|
||||
original_console = vm.console
|
||||
vm.start = MagicMock()
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu:latest"}]) as mock_list_images:
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="running"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
|
||||
loop.run_until_complete(asyncio.async(vm.update()))
|
||||
|
||||
mock_query.assert_any_call("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||
mock_query.assert_any_call("POST", "containers/create", data={
|
||||
"Tty": True,
|
||||
"OpenStdin": True,
|
||||
"StdinOnce": False,
|
||||
"HostConfig":
|
||||
{
|
||||
"CapAdd": ["ALL"],
|
||||
"Binds": [
|
||||
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
|
||||
],
|
||||
"Privileged": True
|
||||
},
|
||||
"Volumes": {},
|
||||
"NetworkDisabled": True,
|
||||
"Name": "test",
|
||||
"Hostname": "test",
|
||||
"Image": "ubuntu:latest",
|
||||
"Env": [
|
||||
"container=docker",
|
||||
"GNS3_MAX_ETHERNET=eth0",
|
||||
"GNS3_VOLUMES=/etc/network"
|
||||
],
|
||||
"Entrypoint": ["/gns3/init.sh"],
|
||||
"Cmd": ["/bin/sh"]
|
||||
})
|
||||
|
||||
assert vm.console == original_console
|
||||
assert vm.start.called
|
||||
|
||||
|
||||
def test_delete(loop, vm):
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||
loop.run_until_complete(asyncio.async(vm.delete()))
|
||||
mock_query.assert_called_with("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||
|
||||
|
||||
def test_close(loop, vm, port_manager):
|
||||
nio = {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||
loop.run_until_complete(asyncio.async(vm.close()))
|
||||
mock_query.assert_called_with("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||
|
||||
assert vm._closed is True
|
||||
assert "4242" not in port_manager.udp_ports
|
||||
|
||||
|
||||
def test_close_vnc(loop, vm, port_manager):
|
||||
|
||||
vm._console_type = "vnc"
|
||||
vm._x11vnc_process = MagicMock()
|
||||
vm._xvfb_process = MagicMock()
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
|
||||
loop.run_until_complete(asyncio.async(vm.close()))
|
||||
mock_query.assert_called_with("DELETE", "containers/e90e34656842", params={"force": 1, "v": 1})
|
||||
|
||||
assert vm._closed is True
|
||||
assert vm._xvfb_process.terminate.called
|
||||
|
||||
|
||||
def test_get_namespace(loop, vm):
|
||||
response = {
|
||||
"State": {
|
||||
"Pid": 42
|
||||
}
|
||||
}
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
|
||||
assert loop.run_until_complete(asyncio.async(vm._get_namespace())) == 42
|
||||
mock_query.assert_called_with("GET", "containers/e90e34656842/json")
|
||||
|
||||
|
||||
def test_add_ubridge_connection(loop, vm):
|
||||
|
||||
nio = {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
nio.startPacketCapture("/tmp/capture.pcap")
|
||||
vm._ubridge_hypervisor = MagicMock()
|
||||
|
||||
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0, 42)))
|
||||
|
||||
calls = [
|
||||
call.send("docker create_veth veth-gns3-e0 veth-gns3-i0"),
|
||||
call.send('docker move_to_ns veth-gns3-i0 42 eth0'),
|
||||
call.send('bridge create bridge0'),
|
||||
call.send('bridge add_nio_linux_raw bridge0 veth-gns3-e0'),
|
||||
call.send('bridge add_nio_udp bridge0 4242 127.0.0.1 4343'),
|
||||
call.send('bridge start_capture bridge0 "/tmp/capture.pcap"'),
|
||||
call.send('bridge start bridge0')
|
||||
]
|
||||
# We need to check any_order ortherwise mock is confused by asyncio
|
||||
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
||||
|
||||
|
||||
def test_add_ubridge_connection_none_nio(loop, vm):
|
||||
|
||||
nio = None
|
||||
vm._ubridge_hypervisor = MagicMock()
|
||||
|
||||
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0, 42)))
|
||||
|
||||
calls = [
|
||||
call.send("docker create_veth veth-gns3-e0 veth-gns3-i0"),
|
||||
call.send('docker move_to_ns veth-gns3-i0 42 eth0'),
|
||||
]
|
||||
# We need to check any_order ortherwise mock is confused by asyncio
|
||||
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
||||
|
||||
|
||||
def test_add_ubridge_connection_invalid_adapter_number(loop, vm):
|
||||
|
||||
nio = {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
with pytest.raises(DockerError):
|
||||
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 12, 42)))
|
||||
|
||||
|
||||
def test_add_ubridge_connection_no_free_interface(loop, vm):
|
||||
|
||||
nio = {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
with pytest.raises(DockerError):
|
||||
|
||||
# We create fake ethernet interfaces for docker
|
||||
interfaces = ["veth-gns3-e{}".format(index) for index in range(4096)]
|
||||
|
||||
with patch("psutil.net_if_addrs", return_value=interfaces):
|
||||
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0, 42)))
|
||||
|
||||
|
||||
def test_delete_ubridge_connection(loop, vm):
|
||||
|
||||
vm._ubridge_hypervisor = MagicMock()
|
||||
nio = {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
|
||||
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0, 42)))
|
||||
loop.run_until_complete(asyncio.async(vm._delete_ubridge_connection(0)))
|
||||
|
||||
calls = [
|
||||
call.send("bridge delete bridge0"),
|
||||
call.send('docker delete_veth veth-gns3-e0')
|
||||
]
|
||||
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
||||
|
||||
|
||||
def test_adapter_add_nio_binding(vm, loop):
|
||||
nio = {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
assert vm._ethernet_adapters[0].get_nio(0) == nio
|
||||
|
||||
|
||||
def test_adapter_add_nio_binding_invalid_adapter(vm, loop):
|
||||
nio = {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
with pytest.raises(DockerError):
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(12, nio)))
|
||||
|
||||
|
||||
def test_adapter_remove_nio_binding(vm, loop):
|
||||
nio = {"type": "nio_udp",
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
with asyncio_patch("gns3server.modules.docker.DockerVM._delete_ubridge_connection") as delete_ubridge_mock:
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0)))
|
||||
assert vm._ethernet_adapters[0].get_nio(0) is None
|
||||
delete_ubridge_mock.assert_called_with(0)
|
||||
|
||||
|
||||
def test_adapter_remove_nio_binding_invalid_adapter(vm, loop):
|
||||
with pytest.raises(DockerError):
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(12)))
|
||||
|
||||
|
||||
def test_pull_image(loop, vm):
|
||||
class Response:
|
||||
"""
|
||||
Simulate a response splitted in multiple packets
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._read = -1
|
||||
|
||||
@asyncio.coroutine
|
||||
def read(self, size):
|
||||
self._read += 1
|
||||
if self._read == 0:
|
||||
return b'{"progress": "0/100",'
|
||||
elif self._read == 1:
|
||||
return '"id": 42}'
|
||||
else:
|
||||
None
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_query.content.return_value = Response()
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.http_query", return_value=mock_query) as mock:
|
||||
images = loop.run_until_complete(asyncio.async(vm.pull_image("ubuntu:latest")))
|
||||
mock.assert_called_with("POST", "images/create", params={"fromImage": "ubuntu:latest"})
|
||||
|
||||
|
||||
def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
loop.run_until_complete(asyncio.async(vm.start_capture(0, output_file)))
|
||||
assert vm._ethernet_adapters[0].get_nio(0).capturing
|
||||
|
||||
|
||||
def test_stop_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
loop.run_until_complete(vm.start_capture(0, output_file))
|
||||
assert vm._ethernet_adapters[0].get_nio(0).capturing
|
||||
loop.run_until_complete(asyncio.async(vm.stop_capture(0)))
|
||||
assert vm._ethernet_adapters[0].get_nio(0).capturing is False
|
||||
|
||||
|
||||
def test_get_log(loop, vm):
|
||||
@asyncio.coroutine
|
||||
def read():
|
||||
return b'Hello\nWorld'
|
||||
|
||||
mock_query = MagicMock()
|
||||
mock_query.read = read
|
||||
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.http_query", return_value=mock_query) as mock:
|
||||
images = loop.run_until_complete(asyncio.async(vm._get_log()))
|
||||
mock.assert_called_with("GET", "containers/e90e34656842/logs", params={"stderr": 1, "stdout": 1}, data={})
|
||||
|
||||
|
||||
def test_get_image_informations(project, manager, loop):
|
||||
response = {
|
||||
}
|
||||
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
|
||||
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest")
|
||||
loop.run_until_complete(asyncio.async(vm._get_image_informations()))
|
||||
mock.assert_called_with("GET", "images/ubuntu:latest/json")
|
||||
|
||||
|
||||
def test_mount_binds(vm, tmpdir):
|
||||
image_infos = {
|
||||
"ContainerConfig": {
|
||||
"Volumes": {
|
||||
"/test/experimental": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dst = os.path.join(vm.working_dir, "test/experimental")
|
||||
assert vm._mount_binds(image_infos) == [
|
||||
"{}:/gns3:ro".format(get_resource("modules/docker/resources")),
|
||||
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")),
|
||||
"{}:/gns3volumes{}".format(dst, "/test/experimental")
|
||||
]
|
||||
|
||||
assert vm._volumes == ["/etc/network", "/test/experimental"]
|
||||
assert os.path.exists(dst)
|
||||
|
||||
|
||||
def test_start_vnc(vm, loop):
|
||||
vm.console_resolution = "1280x1024"
|
||||
with patch("shutil.which", return_value="/bin/x"):
|
||||
with asyncio_patch("gns3server.modules.docker.docker_vm.wait_for_file_creation") as mock_wait:
|
||||
with asyncio_patch("asyncio.create_subprocess_exec") as mock_exec:
|
||||
loop.run_until_complete(asyncio.async(vm._start_vnc()))
|
||||
assert vm._display is not None
|
||||
mock_exec.assert_any_call("Xvfb", "-nolisten", "tcp", ":{}".format(vm._display), "-screen", "0", "1280x1024x16")
|
||||
mock_exec.assert_any_call("x11vnc", "-forever", "-nopw", "-shared", "-geometry", "1280x1024", "-display", "WAIT:{}".format(vm._display), "-rfbport", str(vm.console), "-rfbportv6", str(vm.console), "-noncache", "-listen", "127.0.0.1")
|
||||
mock_wait.assert_called_with("/tmp/.X11-unix/X{}".format(vm._display))
|
||||
|
||||
|
||||
def test_start_vnc_xvfb_missing(vm, loop):
|
||||
with pytest.raises(DockerError):
|
||||
loop.run_until_complete(asyncio.async(vm._start_vnc()))
|
||||
|
||||
|
||||
def test_start_aux(vm, loop):
|
||||
|
||||
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=MagicMock()) as mock_exec:
|
||||
loop.run_until_complete(asyncio.async(vm._start_aux()))
|
||||
mock_exec.assert_called_with('docker', 'exec', '-i', 'e90e34656842', '/gns3/bin/busybox', 'script', '-qfc', 'while true; do /gns3/bin/busybox sh; done', '/dev/null', stderr=asyncio.subprocess.STDOUT, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE)
|
||||
|
||||
|
||||
def test_create_network_interfaces(vm):
|
||||
|
||||
vm.adapters = 5
|
||||
network_config = vm._create_network_config()
|
||||
assert os.path.exists(os.path.join(network_config, "interfaces"))
|
||||
assert os.path.exists(os.path.join(network_config, "if-up.d"))
|
||||
|
||||
with open(os.path.join(network_config, "interfaces")) as f:
|
||||
content = f.read()
|
||||
assert "eth0" in content
|
||||
assert "eth4" in content
|
||||
assert "eth5" not in content
|
||||
|
||||
|
||||
def test_fix_permission(vm, loop):
|
||||
vm._volumes = ["/etc"]
|
||||
process = MagicMock()
|
||||
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=process) as mock_exec:
|
||||
loop.run_until_complete(vm._fix_permissions())
|
||||
mock_exec.assert_called_with('docker', 'exec', 'e90e34656842', '/gns3/bin/busybox', 'sh', '-c', '(/gns3/bin/busybox find "/etc" -depth -print0 | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c \'%a:%u:%g:%n\' > "/etc/.gns3_perms") && /gns3/bin/busybox chmod -R u+rX "/etc" && /gns3/bin/busybox chown {}:{} -R "/etc"'.format(os.getuid(), os.getgid()))
|
||||
assert process.wait.called
|
@ -118,6 +118,7 @@ def test_start(loop, vm, monkeypatch):
|
||||
assert vm.is_running()
|
||||
assert vm.command_line == ' '.join(mock_exec.call_args[0])
|
||||
|
||||
|
||||
def test_start_with_iourc(loop, vm, monkeypatch, tmpdir):
|
||||
|
||||
fake_file = str(tmpdir / "iourc")
|
||||
@ -215,7 +216,7 @@ def test_path(vm, fake_iou_bin):
|
||||
def test_path_12_location(vm, fake_iou_bin):
|
||||
|
||||
# In 1.2 users uploaded images to the images roots
|
||||
# after the migration their images are inside images/IOU
|
||||
# after the migration their images are inside images/IOU
|
||||
# but old topologies use old path
|
||||
with patch("gns3server.config.Config.get_section_config", return_value={"local": True}):
|
||||
vm.path = fake_iou_bin.replace("/IOU", "")
|
||||
@ -341,7 +342,7 @@ def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(vm.iouyap_path, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
vm.adapter_add_nio_binding(0, 0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
|
||||
loop.run_until_complete(asyncio.async(vm.start_capture(0, 0, output_file)))
|
||||
assert vm._adapters[0].get_nio(0).capturing
|
||||
|
||||
@ -350,7 +351,7 @@ def test_stop_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(vm.iouyap_path, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
vm.adapter_add_nio_binding(0, 0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
|
||||
loop.run_until_complete(vm.start_capture(0, 0, output_file))
|
||||
assert vm._adapters[0].get_nio(0).capturing
|
||||
loop.run_until_complete(asyncio.async(vm.stop_capture(0, 0)))
|
||||
@ -412,6 +413,14 @@ def test_iourc_content(vm):
|
||||
assert f.read() == "test"
|
||||
|
||||
|
||||
def test_iourc_content_fix_carriage_return(vm):
|
||||
|
||||
vm.iourc_content = "test\r\n12"
|
||||
|
||||
with open(os.path.join(vm.temporary_directory, "iourc")) as f:
|
||||
assert f.read() == "test\n12"
|
||||
|
||||
|
||||
def test_extract_configs(vm):
|
||||
assert vm.extract_configs() == (None, None)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user