Compare commits

..

96 Commits

Author SHA1 Message Date
c2684f056c Release 2.1.0 2017-11-09 07:29:46 +01:00
009677f019 Fix typo in vcpus on VirtualBoxVM, fixes: #1213 2017-11-07 11:15:49 +01:00
9c3ee02797 Development on v2.1.0dev10 2017-11-07 10:19:19 +01:00
50c8fe070d Release v2.1.0rc4 2017-11-07 08:47:40 +01:00
ace7b0c69f Merge pull request #1212 from GNS3/fix-vcpus-controll-on-vmware
Fix GNS3VM vCPUs control on VMware, fixes: #2324
2017-11-06 23:46:04 +08:00
255dc7ed9b Fix GNS3VM vCPUs control on VMware, fixes: #2324 2017-11-06 15:15:37 +01:00
75ceffb458 Fix typo in sample gns3_server.conf. Fixes #1210. 2017-11-03 16:15:03 +08:00
21c2429aee Merge pull request #1209 from GNS3/ignore-env
Ignore env files
2017-10-30 17:43:39 +07:00
8cc04f25e2 Merge pull request #1208 from GNS3/info-about-api
Warning for getting endpoint of compute
2017-10-30 17:41:31 +07:00
815dca101f Ignore env files 2017-10-30 11:30:06 +01:00
022d76e88a Warning for getting endpoint of compute 2017-10-30 11:26:57 +01:00
2c0804d4ee Merge pull request #1204 from GNS3/loop-speed-on-win
Disable loop debug on default
2017-10-27 16:43:54 +07:00
b2c5e175d7 Enable debug loop only on debug mode 2017-10-27 11:19:06 +02:00
2171072ae4 Enable debug mode on async loop only in dev/debug mode 2017-10-26 12:24:01 +02:00
8fcfed53ed Add warning when using IOU with a hostname length above 15 characters. 2017-10-24 15:07:32 +07:00
df468bf2af Improve VMware VMs discovery process. Ref #1201. 2017-10-22 17:33:21 +07:00
8930b4c6e5 Improve error message when IOU VM process is unexpectedly stopped. 2017-10-22 16:28:54 +07:00
b4307e6035 Improved error message when the number of network adapters is above the maximum for VirtualBox VMs.
Better support for potential future chipsets in addition of PIIX3 and ICH9.
2017-10-22 15:23:43 +07:00
6a87047c18 Added localhost and gns3.github.io CORS 2017-10-20 13:21:43 +02:00
d918330472 Back to dev. at 2.1.0dev9 2017-10-20 08:37:47 +02:00
8fde8789f1 Fix Travis deployment 2017-10-19 09:57:52 +02:00
aaa8c4895f Development on 2.1.0dev9 2017-10-19 08:58:28 +02:00
66d5cb520d Release 2.1.0 rc3 2017-10-19 08:56:06 +02:00
d2a7f724e3 Merge pull request #1198 from GNS3/vmware-config
Set vhv.enable before run for VMware. Fixes #1184
2017-10-18 17:42:56 +07:00
c7015435c9 Update vmware_gns3_vm.py 2017-10-18 15:53:37 +07:00
44efab109e Set vhv.enable before run for VMWare 14. Fixes #1184 2017-10-18 10:32:56 +02:00
98b201e9e5 Tweak how VMware VMs are found with fallback to search in the default location for VMs. 2017-10-18 15:27:03 +07:00
1f4b87ae9c Merge pull request #1197 from GNS3/fix-win-tests
fix passing tests on Windows
2017-10-16 19:44:22 +08:00
bc782b6896 fix passing tests on Windows 2017-10-16 13:02:44 +02:00
4cae6b678a Merge pull request #1194 from GNS3/direct-file-uploads
Endpoint for obtaining direct action on compute
2017-10-16 17:27:35 +08:00
3a5a771b19 Merge pull request #1195 from ddragic/qemu-log-macaddress
QEMU: fix logging of base mac address when creating a new node
2017-10-14 16:00:22 +02:00
44bb912139 QEMU: fix logging of base mac address when creating a new node 2017-10-14 13:55:16 +02:00
74e7201ea6 Sync appliance files. 2017-10-13 17:56:42 +08:00
4f13c63a52 Do not use builtin name. 2017-10-13 17:51:19 +08:00
02ad98664f Fix creation of an VMware VM failed with invalid JSON. Fixes #2282. 2017-10-13 17:50:23 +08:00
b654d78ea6 Endpoint for obtaining direct action on compute 2017-10-13 11:03:56 +02:00
244a86bcbc Merge remote-tracking branch 'origin/2.1' into 2.1 2017-10-12 23:33:00 +08:00
1524493c33 Fix IOU detection of layer 1 keepalive support. Fixes #1183. 2017-10-12 23:32:45 +08:00
e52b83be11 Merge pull request #1192 from GNS3/travis-docker-tests
Tests on TravisCI with docker compose
2017-10-12 01:52:03 -07:00
0c7a92b710 Tests on TravisCI with docker compose 2017-10-12 10:40:49 +02:00
67c78ba2e3 Merge pull request #1188 from GNS3/fix-post-file-normalization
Fixes path normalization during file upload on nodes (Fixes: #2276)
2017-10-05 02:07:02 -07:00
9619827f44 Reenable tests and back to development on 2.1.0dev8 2017-10-04 13:05:00 +02:00
5b645cd455 Temporary disable passing tests 2017-10-04 12:58:53 +02:00
7d19312d44 Development on 2.1.0dev8 2017-10-04 11:39:52 +02:00
81048f44d6 Release 2.1.0 rc2 2017-10-04 11:37:12 +02:00
be35ad6874 Fixes path normalization during file upload on nodes (Fixes: #2276) 2017-10-03 18:13:19 +02:00
897f7656da Merge pull request #1185 from GNS3/fix-delete-project
Don't create directory structure during json dump. Fixes #2270
2017-10-02 13:49:46 -07:00
3d85bba9d4 Don't create directory structure during json dump. Fixes #2270 2017-10-02 10:41:57 +02:00
dbd41e44f8 Bump version to 2.1.0dev7 2017-10-02 04:04:38 +08:00
7269285b0f Add more information when qemu-img fails. 2017-10-02 00:47:16 +08:00
2a4ed9ba89 Fix issue with multidict when upgrading GNS3 VM to use dev channel. 2017-09-23 18:09:10 +08:00
a567b394f3 Restore file permissions fails for volumes with soft links. Fixes #1180. 2017-09-21 22:45:17 +08:00
d76bcf7078 Use RAW sockets by default on Linux. 2017-09-19 12:39:55 +07:00
fcd2c7b6d4 Add missing https console keyword in JSON schema. Fixes #1179. 2017-09-19 12:38:35 +07:00
60bc386d6b Merge remote-tracking branch 'origin/2.1' into 2.1 2017-09-14 17:58:25 +07:00
e1c8df170a Allow projects to be opened even when a node port is already used. 2017-09-14 17:57:58 +07:00
cdfce26b61 Development on v2.1.0dev6 2017-09-13 09:31:30 +02:00
f415d38ccd Release v2.1.0rc1 2017-09-13 09:29:31 +02:00
6a8f220ff1 Fix NAT test. 2017-09-11 15:09:32 +07:00
9e4cfaf787 Fix NAT node not working on Windows. Fixes #1163. 2017-09-11 13:02:26 +07:00
4a2dfb0037 Do not prevent a project to be deleted. Fixes #2237. 2017-09-06 18:12:22 +07:00
b538bd0a29 Back to development on v2.1.0dev5 2017-09-05 15:22:49 +02:00
36210b10c6 Re-release 2.1.0b2 2017-09-05 11:22:43 +02:00
fd623a5ad2 Fix test_ubridge_apply_filters 2017-09-05 11:07:10 +02:00
e5a7eb74ed Fix project tests 2017-09-05 10:38:17 +02:00
daf7ed8c95 Development on 2.1.0dev5 2017-09-05 08:39:53 +02:00
1f87d8222b Release 2.1.0 beta 2 2017-09-05 08:38:42 +02:00
90e3a8d612 Round-robin nodes across all available compute resources. Fixes #1165. 2017-09-03 22:51:51 +07:00
c22229101f Try to improve error reporting when streaming a PCAP file. Ref #2235. 2017-09-01 17:10:24 +07:00
ee486b32bd Use Npcap DLL on Windows when checking for uBridge version. 2017-09-01 16:22:43 +07:00
7492899b95 Merge pull request #1173 from GNS3/console-tests
Windows console bugfix tests
2017-08-29 17:45:29 +07:00
807bea40d4 Fixes running switch console inside PyCharm terminal (Ref. #1172) 2017-08-29 12:28:53 +02:00
4e3eee33fc Windows console bugfix tests 2017-08-29 10:08:10 +02:00
a7297ffca4 Merge pull request #1169 from GNS3/checking-nio-existance
Added checking if NIO exists (Fixes #1160)
2017-08-25 16:34:44 +08:00
2806b7b286 Merge pull request #1170 from GNS3/load-meta-when-project-opens
Load meta of the project on loading time (Fixes #2225)
2017-08-25 16:30:23 +08:00
b0783eeccc Load meta of the project on loading time (Fixes #2225) 2017-08-22 09:36:52 +02:00
4fc29504f2 Merge pull request #1168 from GNS3/nat-on-windows
Fixes NAT node not working on Windows (#1163)
2017-08-21 14:24:05 +02:00
efae614af2 Added checking if NIO exists (Fixes #1160) 2017-08-21 11:03:23 +02:00
7523e9c4bc Fixes NAT node not working on Windows (#1163) 2017-08-21 10:17:56 +02:00
3dd5dbe0c1 Merge pull request #1161 from GNS3/empty-link-on-compute
Fixes loading project when link_id is not set (#1159)
2017-08-10 22:09:49 +08:00
e15d1cd350 Fixes loading project when link_id is not set (#1159) 2017-08-10 10:02:18 +02:00
fc7377e999 Merge pull request #1158 from GNS3/platform-on-appliances-list
Return platform value on appliances list (Fixes #2211)
2017-08-08 22:16:40 +08:00
3fd86f5807 Return platform value on appliances list (Fixes #2211) 2017-08-08 13:58:43 +02:00
fc9d44011b Merge pull request #1157 from GNS3/bugfix-1156
Fixes not known category in Appliances (Fixes #1156)
2017-08-07 16:46:14 +08:00
3a1fd892ac Fixes not known category in Appliances (Fixes #1156) 2017-08-07 10:00:46 +02:00
3caba334c0 Development on 2.1.0dev4 2017-08-04 11:39:31 +02:00
e2de872036 Release 2.1.0 beta 1 2017-08-04 11:38:33 +02:00
44fc87a24c Sync appliances 2017-08-04 10:59:05 +02:00
2dc83d7894 Interface starting with gns3 are not display by default in the cloud
Fix https://github.com/GNS3/gns3-gui/issues/2199
2017-08-03 16:33:42 +02:00
211f48d981 Catch error when something that is not the GNS3 server answer to virtualbox requests
Fix #1155
2017-08-03 16:31:28 +02:00
3bb8cdafb2 Catch KeyError: <aiohttp.connector._TransportPlaceholder
Fix #1152
2017-08-02 10:16:49 +02:00
e74eaa8ea8 Add a warning when you try to load and the server is not started with --local
Ref #1151
2017-08-02 09:52:31 +02:00
8a028a6dce Merge pull request #1147 from GNS3/fix_docker_permission
Fix permission on exited container
2017-08-01 00:30:56 +08:00
1cde787ffa Sync appliances 2017-07-31 15:12:06 +02:00
81d77cabf9 Development on 2.1.0dev3 2017-07-31 11:57:45 +02:00
a93f3b0576 Fix permission on exited container
If a container is exited we quickly start it to fix
the permissions.

Fix https://github.com/GNS3/gns3-gui/issues/2181
2017-07-26 14:52:16 +02:00
95 changed files with 1835 additions and 239 deletions

2
.gitignore vendored
View File

@ -55,5 +55,5 @@ startup.vpcs
.gns3_shell_history
# Virtualenv
env.ropeproject
env
.ropeproject

View File

@ -1,17 +1,19 @@
language: python
python:
- '3.4'
- '3.5'
- '3.6'
sudo: false
cache: pip
sudo: required
services:
- docker
install:
- pip install -U setuptools pip
- python setup.py install
- pip install -rdev-requirements.txt
- sed -e 's/${PYTHON_VERSION}/'${PYTHON_VERSION}/g DockerfileTests.tpl > /tmp/DockerfileTests
- docker-compose build
script:
- mkdir ‡
- py.test -v -s tests --basetemp=‡
- docker-compose run tests
before_deploy:
- sudo pip install twine
- sudo pip install urllib3[secure]
deploy:
provider: pypi
user: noplay
@ -20,3 +22,9 @@ deploy:
on:
tags: true
repo: GNS3/gns3-server
env:
matrix:
- PYTHON_VERSION=3.4
- PYTHON_VERSION=3.5
- PYTHON_VERSION=3.6

View File

@ -1,5 +1,70 @@
# Change Log
## 2.1.0 09/11/2017
* Fix typo in vcpus on VirtualBoxVM, fixes: #1213
## 2.1.0rc4 07/11/2017
* Fix GNS3VM vCPUs control on VMware, fixes: #2324
* Fix typo in sample gns3_server.conf. Fixes #1210.
* Warning for getting endpoint of compute
* Enable debug mode on async loop only in dev/debug mode
* Add warning when using IOU with a hostname length above 15 characters.
* Improve VMware VMs discovery process. Ref #1201.
* Improve error message when IOU VM process is unexpectedly stopped.
* Improved error message when the number of network adapters is above the maximum for VirtualBox VMs. Better support for potential future chipsets in addition of PIIX3 and ICH9.
* Added localhost and gns3.github.io CORS
## 2.1.0rc3 19/10/2017
* Set vhv.enable before run for VMWare 14. Fixes #1184
* Tweak how VMware VMs are found with fallback to search in the default location for VMs.
* QEMU: fix logging of base mac address when creating a new node
* Sync appliance files.
* Fix creation of an VMware VM failed with invalid JSON. Fixes #2282.
* Endpoint for obtaining direct action on compute
* Fix IOU detection of layer 1 keepalive support. Fixes #1183.
* Fixes path normalization during file upload on nodes (Fixes: #2276)
## 2.1.0rc2 04/10/2017
* Don't create directory structure during json dump. Fixes #2270
* Add more information when qemu-img fails.
* Fix issue with multidict when upgrading GNS3 VM to use dev channel.
* Restore file permissions fails for volumes with soft links. Fixes #1180.
* Use RAW sockets by default on Linux.
* Add missing https console keyword in JSON schema. Fixes #1179.
* Allow projects to be opened even when a node port is already used.
## 2.1.0rc1 13/09/2017
* Fix NAT node not working on Windows. Fixes #1163.
* Do not prevent a project to be deleted. Fixes #2237.
## 2.1.0b2 05/09/2017
* Round-robin nodes across all available compute resources. Fixes #1165.
* Try to improve error reporting when streaming a PCAP file. Ref #2235.
* Use Npcap DLL on Windows when checking for uBridge version.
* Fixes running switch console inside PyCharm terminal (Ref. #1172)
* Load meta of the project on loading time (Fixes #2225)
* Added checking if NIO exists (Fixes #1160)
* Fixes NAT node not working on Windows (#1163)
* Fixes loading project when link_id is not set (#1159)
* Return platform value on appliances list (Fixes #2211)
* Fixes not known category in Appliances (Fixes #1156)
## 2.1.0b1 04/08/2017
* Sync appliances
* Interface starting with gns3 are not display by default in the cloud
* Catch error when something that is not the GNS3 server answer to virtualbox requests
* Catch KeyError: <aiohttp.connector._TransportPlaceholder
* Add a warning when you try to load and the server is not started with --local
* Sync appliances
* Fix permission on exited container
## 2.1.0a2 31/07/2017
* Handle invalid appliances files

15
DockerfileTests.tpl Normal file
View File

@ -0,0 +1,15 @@
FROM python:${PYTHON_VERSION}
RUN pip install -U setuptools pip
ADD requirements.txt /server/requirements.txt
ADD dev-requirements.txt /server/dev-requirements.txt
RUN pip install -r/server/dev-requirements.txt
RUN useradd -ms /bin/bash gns3
USER gns3
ADD . /server
WORKDIR /server

View File

@ -28,7 +28,7 @@ console_end_port_range = 10000
; First port of the range allocated for inter-device communication. Two ports are allocated per link.
udp_start_port_range = 10000
; Last port of the range allocated for inter-device communication. Two ports are allocated per link
udp_start_end_range = 20000
udp_end_port_range = 20000
; uBridge executable location, default: search in PATH
;ubridge_path = ubridge

4
docker-compose.yml Normal file
View File

@ -0,0 +1,4 @@
tests:
build: .
dockerfile: /tmp/DockerfileTests
command: py.test -v -s tests

View File

@ -26,9 +26,15 @@
"kvm": "require"
},
"images": [
{
"filename": "vThunder_410_P9.qcow2",
"version": "4.1.0.P9",
"md5sum": "6ef0f69ba7a099a7f43b5815c2abc691",
"filesize": 6311706624,
"download_url": "https://www.a10networks.com/vthunder-embed" },
{
"filename": "vThunder_410_P3.qcow2",
"version": "4.1.0",
"version": "4.1.0.P3",
"md5sum": "daacefa4e0eb1cad9b253926624be4b9",
"filesize": 6098780160,
"download_url": "https://www.a10networks.com/vthunder-embed"
@ -43,7 +49,13 @@
],
"versions": [
{
"name": "4.1.0",
"name": "4.1.0.P9",
"images": {
"hda_disk_image": "vThunder_410_P9.qcow2"
}
},
{
"name": "4.1.0.P3",
"images": {
"hda_disk_image": "vThunder_410_P3.qcow2"
}

View File

@ -63,9 +63,9 @@
{
"filename": "asav971.qcow2",
"version": "9.7.1",
"md5sum": "cb31f53e70a9e409829d2f832ff09191",
"filesize": 199688192,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286119613&flowid=&softwareid=280775065&release=9.7.1&relind=AVAILABLE&rellifecycle=&reltype=latest"
"md5sum": "07eef9b8ca489a8ad37448fadf45a673",
"filesize": 198443008,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "asav963-8.qcow2",
@ -91,36 +91,8 @@
{
"filename": "asav962.qcow2",
"version": "9.6.2",
"md5sum": "a4c892afe610776dde8a176f1049ae96",
"md5sum": "dfb8110ce38da4588e994865d5a9656a",
"filesize": 177274880,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286119613&flowid=&softwareid=280775065&release=9.6.2&relind=AVAILABLE&rellifecycle=&reltype=latest"
},
{
"filename": "asav961.qcow2",
"version": "9.6.1",
"md5sum": "c8726827cb72f4eed8cb52a64bca091c",
"filesize": 173801472,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286119613&flowid=&softwareid=280775065&release=9.6.1&relind=AVAILABLE&rellifecycle=&reltype=latest"
},
{
"filename": "asav952-207.qcow2",
"version": "9.5.2-207",
"md5sum": "75b7d55e01ebd2e79ad5cdac2c811426",
"filesize": 169345024,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "asav952-204.qcow2",
"version": "9.5.2-204",
"md5sum": "73a1126283de6b70c4cc12edfc46d547",
"filesize": 169345024,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "asav951-201.qcow2",
"version": "9.5.1-200",
"md5sum": "ca071370278ecbd5dfdb1c5a4161571a",
"filesize": 160038912,
"download_url": "https://virl.mediuscorp.com/my-account/"
}
],
@ -184,30 +156,6 @@
"images": {
"hda_disk_image": "asav962.qcow2"
}
},
{
"name": "9.6.1",
"images": {
"hda_disk_image": "asav961.qcow2"
}
},
{
"name": "9.5.2-207",
"images": {
"hda_disk_image": "asav952-207.qcow2"
}
},
{
"name": "9.5.2-204",
"images": {
"hda_disk_image": "asav952-204.qcow2"
}
},
{
"name": "9.5.1-201",
"images": {
"hda_disk_image": "asav951-201.qcow2"
}
}
]
}

View File

@ -22,6 +22,13 @@
"kvm": "require"
},
"images": [
{
"filename": "csr1000v-universalk9.16.5.1b-serial.qcow2",
"version": "16.5.1b",
"md5sum": "ac11d33041b8ff6dc3553e324d02cccb",
"filesize": 1209543680,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "csr1000v-universalk9.03.17.00.S.156-1.S-ext.qcow2",
"version": "3.17",
@ -29,13 +36,6 @@
"filesize": 1346305024,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "csr1000v-universalk9.16.2.2-ext.qcow2",
"version": "16.2.2",
"md5sum": "6c7e61b2f091ce1e9562dc3f2da43ebe",
"filesize": 1586637824,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "csr1000v-universalk9.16.3.1-build2.qcow2",
"version": "16.3.1-build2",
@ -53,29 +53,29 @@
{
"filename": "csr1000v-universalk9.16.03.02.qcow2",
"version": "16.3.2",
"md5sum": "01868950c960b1eeaa0fe9e035b25e48",
"filesize": 1327693824,
"download_url": "https://software.cisco.com/download/release.html?mdfid=284364978&softwareid=282046477&release=Denali-16.3.2"
"md5sum": "2e5803d23cd52cba5d55fa8306be5f13",
"filesize": 1167720448,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "csr1000v-universalk9.16.04.01-serial.qcow2",
"filename": "csr1000v-universalk9.16.4.1.qcow2",
"version": "16.4.1",
"md5sum": "8f190db9fd06a34d66f0c82812e56fd2",
"filesize": 1457651712,
"download_url": "https://software.cisco.com/download/release.html?mdfid=284364978&softwareid=282046477&release=Everest-16.4.1"
"md5sum": "3428e0dcf5132a1b11ab7696d8c61b2e",
"filesize": 1261961216,
"download_url": "https://virl.mediuscorp.com/my-account/"
}
],
"versions": [
{
"name": "3.17",
"name": "16.5.1b",
"images": {
"hda_disk_image": "csr1000v-universalk9.03.17.00.S.156-1.S-ext.qcow2"
"hda_disk_image": "csr1000v-universalk9.16.5.1b-serial.qcow2"
}
},
{
"name": "16.2.2",
"name": "3.17",
"images": {
"hda_disk_image": "csr1000v-universalk9.16.2.2-ext.qcow2"
"hda_disk_image": "csr1000v-universalk9.03.17.00.S.156-1.S-ext.qcow2"
}
},
{
@ -99,7 +99,7 @@
{
"name": "16.4.1",
"images": {
"hda_disk_image": "csr1000v-universalk9.16.04.01-serial.qcow2"
"hda_disk_image": "csr1000v-universalk9.16.4.1.qcow2"
}
}
]

View File

@ -0,0 +1,125 @@
{
"name": "Cisco FMCv",
"category": "firewall",
"description": "This is your administrative nerve center for managing critical Cisco network security solutions. It provides complete and unified management over firewalls, application control, intrusion prevention, URL filtering, and advanced malware protection. Easily go from managing a firewall to controlling applications to investigating and remediating malware outbreaks.",
"vendor_name": "Cisco Systems",
"vendor_url": "http://www.cisco.com/",
"documentation_url": "http://www.cisco.com/c/en/us/td/docs/security/firepower/quick_start/kvm/fmcv-kvm-qsg.html",
"product_name": "Cisco Firepower Management Center Virtual",
"product_url": "http://www.cisco.com/c/en/us/td/docs/security/firepower/quick_start/kvm/fmcv-kvm-qsg.html",
"registry_version": 4,
"status": "experimental",
"maintainer": "Community",
"maintainer_email":"",
"usage": "BE PATIENT\nOn first boot FMCv generates about 6GB of data. This can take 30 minutes or more. Plan on a long wait after the following line in the boot up:\n\n usbcore: registered new interface driver usb-storage\n\nInitial IP address: 192.168.45.45.\n\nDefault username/password: admin/Admin123.",
"symbol": "cisco-fmcv.svg",
"first_port_name": "eth0",
"port_name_format": "eth{port1}",
"qemu": {
"cpus": 4,
"adapter_type": "virtio-net-pci",
"adapters": 1,
"ram": 8192,
"arch": "x86_64",
"console_type": "telnet",
"hda_disk_interface": "scsi",
"kvm": "require",
"options": ""
},
"images": [
{
"filename": "Cisco_Firepower_Management_Center_Virtual-6.0.0-1005-disk1.vmdk",
"version": "6.0.0 (1005) vmdk",
"md5sum": "3fed60f1e7d6910c22d13e966acebd7f",
"filesize": 1681540608,
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "Cisco_Firepower_Management_Center_Virtual-6.1.0-330.qcow2",
"version": "6.1.0 (330)",
"md5sum": "e3c64179ec46671caeb7ac3e4e58064f",
"filesize": 1909391360,
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "Cisco_Firepower_Management_Center_Virtual_VMware-6.1.0-330-disk1.vmdk",
"version": "6.1.0 (330) vmdk",
"md5sum": "8bc77b317cf0007dcbb0f187c1a0c01f",
"filesize": 1938142720,
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "Cisco_Firepower_Management_Center_Virtual-6.2.0-362.qcow2",
"version": "6.2.0 (362)",
"md5sum": "26e66882bf5f68adc0eca2f6bef7b613",
"filesize": 1949302784,
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "Cisco_Firepower_Management_Center_Virtual_VMware-6.2.0-362-disk1.vmdk",
"version": "6.2.0 (362) vmdk",
"md5sum": "772165cbda3c183bb0e77a1923dd4d09",
"filesize": 1983376384,
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "Cisco_Firepower_Management_Center_Virtual-6.2.1-342.qcow2",
"version": "6.2.1 (342)",
"md5sum": "29ebbbe71a6b766f6dea81e5ca32c275",
"filesize": 2113732608,
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "Cisco_Firepower_Management_Center_Virtual_VMware-6.2.1-342-disk1.vmdk",
"version": "6.2.1 (342) vmdk",
"md5sum": "4cf5b7fd68075b6f7ee0dd41a4029ca0",
"filesize": 2150017536,
"download_url": "https://software.cisco.com/download/"
}
],
"versions": [
{
"name": "6.0.0 (1005) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_Management_Center_Virtual-6.0.0-1005-disk1.vmdk"
}
},
{
"name": "6.1.0 (330)",
"images": {
"hda_disk_image": "Cisco_Firepower_Management_Center_Virtual-6.1.0-330.qcow2"
}
},
{
"name": "6.1.0 (330) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_Management_Center_Virtual_VMware-6.1.0-330-disk1.vmdk"
}
},
{
"name": "6.2.0 (362)",
"images": {
"hda_disk_image": "Cisco_Firepower_Management_Center_Virtual-6.2.0-362.qcow2"
}
},
{
"name": "6.2.0 (362) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_Management_Center_Virtual_VMware-6.2.0-362-disk1.vmdk"
}
},
{
"name": "6.2.1 (342)",
"images": {
"hda_disk_image": "Cisco_Firepower_Management_Center_Virtual-6.2.1-342.qcow2"
}
},
{
"name": "6.2.1 (342) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_Management_Center_Virtual_VMware-6.2.1-342-disk1.vmdk"
}
}
]
}

View File

@ -0,0 +1,98 @@
{
"name": "Cisco FTDv",
"category": "firewall",
"description": "Cisco Firepower Threat Defense Virtual NGFW appliances combine Cisco's proven network firewall with the industrys most effective next-gen IPS and advanced malware protection. All so you can get more visibility, be more flexible, save more, and protect better.",
"vendor_name": "Cisco Systems",
"vendor_url": "http://www.cisco.com/",
"documentation_url": "http://www.cisco.com/c/en/us/td/docs/security/firepower/quick_start/kvm/ftdv-kvm-qsg.html",
"product_name": "Cisco FTDv",
"product_url": "http://www.cisco.com/c/en/us/td/docs/security/firepower/quick_start/kvm/ftdv-kvm-qsg.html",
"registry_version": 4,
"status": "experimental",
"maintainer": "Community",
"maintainer_email": "",
"usage": "Default username/password: admin/Admin123.",
"symbol": ":/symbols/asa.svg",
"first_port_name": "Gigabit0/0 (Mgmt)",
"port_name_format": "Gigabit0/{port1}",
"qemu": {
"cpus": 4,
"adapter_type": "e1000",
"adapters": 10,
"ram": 8192,
"arch": "x86_64",
"console_type": "telnet",
"hda_disk_interface": "ide",
"kvm": "require"
},
"images": [
{
"filename": "Cisco_Firepower_Threat_Defense_Virtual-6.2.0-363.qcow2",
"version": "6.2.0 (363)",
"md5sum": "fafdae94ead07b23d6c8dc5f7a731e74",
"filesize": 1022885888,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286306503&catid=268438162&softwareid=286306337&release=6.2.0&relind=AVAILABLE&rellifecycle=&reltype=latest"
},
{
"filename": "Cisco_Firepower_Threat_Defense_Virtual-6.2.0-363.vmdk",
"version": "6.2.0 (363) vmdk",
"md5sum": "10297ab20526a8b1586c6ce1cd3d9cbd",
"filesize": 1042470912,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286306503&catid=268438162&softwareid=286306337&release=6.2.0&relind=AVAILABLE&rellifecycle=&reltype=latest"
},
{
"filename": "Cisco_Firepower_Threat_Defense_Virtual-6.1.0-330.qcow2",
"version": "6.1.0 (330)",
"md5sum": "386ab2b3d6d1d28fd2cd03a83df5e00f",
"filesize": 1004601344,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286306503&catid=268438162&softwareid=286306337&release=6.1.0&relind=AVAILABLE&rellifecycle=&reltype=latest"
},
{
"filename": "Cisco_Firepower_Threat_Defense_Virtual-6.1.0-330.vmdk",
"version": "6.1.0 (330) vmdk",
"md5sum": "c1fa58448841b33d5eed6854dc608816",
"filesize": 1024162816,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286306503&catid=268438162&softwareid=286306337&release=6.1.0&relind=AVAILABLE&rellifecycle=&reltype=latest"
},
{
"filename": "Cisco_Firepower_Threat_Defense_Virtual-6.0.1-1213.vmdk",
"version": "6.0.1 (1213) vmdk",
"md5sum": "bc53461e2ec344814e41a6a8d3a5f774",
"filesize": 714577408,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286306503&catid=268438162&softwareid=286306337&release=6.0.1&relind=AVAILABLE&rellifecycle=&reltype=latest"
}
],
"versions": [
{
"name": "6.2.0 (363)",
"images": {
"hda_disk_image": "Cisco_Firepower_Threat_Defense_Virtual-6.2.0-363.qcow2"
}
},
{
"name": "6.2.0 (363) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_Threat_Defense_Virtual-6.2.0-363.vmdk"
}
},
{
"name": "6.1.0 (330)",
"images": {
"hda_disk_image": "Cisco_Firepower_Threat_Defense_Virtual-6.1.0-330.qcow2"
}
},
{
"name": "6.1.0 (330) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_Threat_Defense_Virtual-6.1.0-330.vmdk"
}
},
{
"name": "6.0.1 (1213) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_Threat_Defense_Virtual-6.0.1-1213.vmdk"
}
}
]
}

View File

@ -23,6 +23,13 @@
"kvm": "require"
},
"images": [
{
"filename": "vios_l2-adventerprisek9-m.03.2017.qcow2",
"version": "15.2(20170321:233949)",
"md5sum": "8f14b50083a14688dec2fc791706bb3e",
"filesize": 41157632,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "vios_l2-adventerprisek9-m.vmdk.SSA.152-4.0.55.E",
"version": "15.2.4055",
@ -32,6 +39,12 @@
}
],
"versions": [
{
"name": "15.2(20170321:233949)",
"images": {
"hda_disk_image": "vios_l2-adventerprisek9-m.03.2017.qcow2"
}
},
{
"name": "15.2.4055",
"images": {

View File

@ -23,33 +23,33 @@
"kvm": "require"
},
"images": [
{
"filename": "iosxrv-k9-demo-6.1.3.qcow2",
"version": "6.1.3",
"md5sum": "1693b5d22a398587dd0fed2877d8dfac",
"filesize": 428588544,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "iosxrv-k9-demo-6.0.1.qcow2",
"version": "6.0.1",
"md5sum": "0831ecf43628eccb752ebb275de9a62a",
"filesize": 908132352,
"download_url": "https://virl.mediuscorp.com/my-account/"
},
{
"filename": "iosxrv-k9-demo-6.0.0.qcow2",
"version": "6.0.0",
"md5sum": "f0dccd86d64e370e22f144e681d202b6",
"filesize": 860815360,
"download_url": "https://virl.mediuscorp.com/my-account/"
}
],
"versions": [
{
"name": "6.1.3",
"images": {
"hda_disk_image": "iosxrv-k9-demo-6.1.3.qcow2"
}
},
{
"name": "6.0.1",
"images": {
"hda_disk_image": "iosxrv-k9-demo-6.0.1.qcow2"
}
},
{
"name": "6.0.0",
"images": {
"hda_disk_image": "iosxrv-k9-demo-6.0.0.qcow2"
}
}
]
}

View File

@ -0,0 +1,72 @@
{
"name": "Cisco NGIPSv",
"category": "firewall",
"description": "Cisco Firepower Next-Generation IPS (NGIPS) threat appliances combine superior visibility, embedded security intelligence, automated analysis, and industry-leading threat effectiveness.",
"vendor_name": "Cisco Systems",
"vendor_url": "http://www.cisco.com/",
"documentation_url": "http://www.cisco.com/c/en/us/support/security/ngips-virtual-appliance/tsd-products-support-series-home.html",
"product_name": "Cisco Firepower NGIPS Virtual",
"product_url": "http://www.cisco.com/c/en/us/support/security/ngips-virtual-appliance/tsd-products-support-series-home.html",
"registry_version": 4,
"status": "experimental",
"maintainer": "Community",
"maintainer_email": "",
"usage": "Default username/password: admin/Admin123.",
"symbol": ":/symbols/ids.svg",
"first_port_name": "eth0 (Mgmt)",
"port_name_format": "eth{port1}",
"qemu": {
"cpus": 4,
"adapter_type": "vmxnet3",
"adapters": 10,
"ram": 8192,
"arch": "x86_64",
"console_type": "telnet",
"hda_disk_interface": "scsi",
"kvm": "require"
},
"images": [
{
"filename": "Cisco_Firepower_NGIPSv_VMware-6.0.0-1005-disk1.vmdk",
"version": "6.0.0 (1005) vmdk",
"md5sum": "72ed34d39c58a9d5ad1c6197d1ff9a62",
"filesize": 804301312,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286259690&softwareid=286271056&release=6.0.0.0&relind=AVAILABLE&rellifecycle=&reltype=latest"
},
{
"filename": "Cisco_Firepower_NGIPSv_VMware-6.1.0-330-disk1.vmdk",
"version": "6.1.0 (330) vmdk",
"md5sum": "7a771cc8c37a0371285f24c25f9886f0",
"filesize": 860411392,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286259690&softwareid=286271056&release=6.1.0&relind=AVAILABLE&rellifecycle=&reltype=latest"
},
{
"filename": "Cisco_Firepower_NGIPSv_VMware-6.2.0-362-disk1.vmdk",
"version": "6.2.0 (362) vmdk",
"md5sum": "46f629149e11ac5c224bae0486c7e406",
"filesize": 877626368,
"download_url": "https://software.cisco.com/download/release.html?mdfid=286259690&softwareid=286271056&release=6.2.0&relind=AVAILABLE&rellifecycle=&reltype=latest"
}
],
"versions": [
{
"name": "6.2.0 (362) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_NGIPSv_VMware-6.2.0-362-disk1.vmdk"
}
},
{
"name": "6.1.0 (330) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_NGIPSv_VMware-6.1.0-330-disk1.vmdk"
}
},
{
"name": "6.0.0 (1005) vmdk",
"images": {
"hda_disk_image": "Cisco_Firepower_NGIPSv_VMware-6.0.0-1005-disk1.vmdk"
}
}
]
}

View File

@ -12,7 +12,7 @@
"maintainer_email": "developers@gns3.net",
"usage": "The default username/password is admin/admin. A default configuration is present.",
"first_port_name": "mgmt0",
"port_name_format": "Ethernet2/{0}",
"port_name_format": "Ethernet2/{port1}",
"qemu": {
"adapter_type": "e1000",
"adapters": 16,

View File

@ -25,6 +25,13 @@
"kvm": "require"
},
"images": [
{
"filename": "nxosv-final.7.0.3.I7.1.qcow2",
"version": "7.0.3.I7.1",
"md5sum": "3c122f27d0c3684c63657207eadf4d06",
"filesize": 903151616,
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "nxosv-final.7.0.3.I6.1.qcow2",
"version": "7.0.3.I6.1",
@ -57,6 +64,13 @@
}
],
"versions": [
{
"name": "7.0.3.I7.1",
"images": {
"bios_image": "OVMF-20160813.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.1.qcow2"
}
},
{
"name": "7.0.3.I6.1",
"images": {

View File

@ -21,6 +21,15 @@
"kvm": "allow"
},
"images": [
{
"filename": "coreos_production_qemu_image.1465.7.0.img",
"version": "1465.7.0",
"md5sum": "1db77d47e76d3d9082846584e0f4b4bc",
"filesize": 796590080,
"download_url": "http://stable.release.core-os.net/amd64-usr/1465.7.0/",
"direct_download_url": "http://stable.release.core-os.net/amd64-usr/1465.7.0/coreos_production_qemu_image.img.bz2",
"compression": "bzip2"
},
{
"filename": "coreos_production_qemu_image.1409.7.0.img",
"version": "1409.7.0",
@ -122,6 +131,18 @@
}
],
"versions": [
{
"name": "1465.7.0",
"images": {
"hda_disk_image": "coreos_production_qemu_image.1465.7.0.img"
}
},
{
"name": "1409.7.0",
"images": {
"hda_disk_image": "coreos_production_qemu_image.1409.7.0.img"
}
},
{
"name": "1353.8.0",
"images": {

View File

@ -13,16 +13,37 @@
"maintainer_email": "developers@gns3.net",
"usage": "Default username is cumulus and password is CumulusLinux!",
"first_port_name": "eth0",
"port_name_format": "swp{0}",
"port_name_format": "swp{port1}",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 7,
"ram": 256,
"ram": 512,
"arch": "x86_64",
"console_type": "telnet",
"kvm": "require"
},
"images": [
{
"filename": "cumulus-linux-3.4.2-vx-amd64.qcow2",
"version": "3.4.2",
"md5sum": "ca844684784ceeee893d0cd76dc44e3b",
"filesize": 1060700160,
"download_url": "https://cumulusnetworks.com/cumulus-vx/download/"
},
{
"filename": "cumulus-linux-3.4.1-vx-amd64.qcow2",
"version": "3.4.1",
"md5sum": "38319aa04533d91b1121a02f6ed99993",
"filesize": 975503360,
"download_url": "https://cumulusnetworks.com/cumulus-vx/download/"
},
{
"filename": "cumulus-linux-3.4.0-vx-amd64.qcow2",
"version": "3.4.0",
"md5sum": "d93a15072bc7f8d15268f5e43f735a5e",
"filesize": 918355968,
"download_url": "https://cumulusnetworks.com/cumulus-vx/download/"
},
{
"filename": "cumulus-linux-3.3.2-vx-amd64.qcow2",
"version": "3.3.2",
@ -95,6 +116,24 @@
}
],
"versions": [
{
"name": "3.4.2",
"images": {
"hda_disk_image": "cumulus-linux-3.4.2-vx-amd64.qcow2"
}
},
{
"name": "3.4.1",
"images": {
"hda_disk_image": "cumulus-linux-3.4.1-vx-amd64.qcow2"
}
},
{
"name": "3.4.0",
"images": {
"hda_disk_image": "cumulus-linux-3.4.0-vx-amd64.qcow2"
}
},
{
"name": "3.3.2",
"images": {

View File

@ -27,6 +27,13 @@
"options": "-smp 2 -cpu host"
},
"images": [
{
"filename": "BIGIP-13.0.0.2.0.1671.qcow2",
"version": "13.0.0 HF2",
"md5sum": "62d27f37c66118710c69c07a2ee78d67",
"filesize": 4435476480,
"download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-ip/big-ip_v13.x/13.0.0/english/virtual-edition_base-plus-hf2/&sw=BIG-IP&pro=big-ip_v13.x&ver=13.0.0&container=Virtual-Edition_Base-Plus-HF2&file=BIGIP-13.0.0.2.0.1671.LTM.qcow2.zip"
},
{
"filename": "BIGIP-13.0.0.0.0.1645.qcow2",
"version": "13.0.0",
@ -34,6 +41,13 @@
"filesize": 3833135104,
"download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-ip/big-ip_v13.x/13.0.0/english/virtual-edition/&sw=BIG-IP&pro=big-ip_v13.x&ver=13.0.0&container=Virtual-Edition&file=BIGIP-13.0.0.0.0.1645.ALL.qcow2.zip"
},
{
"filename": "BIGIP-12.1.2.1.0.271.LTM.qcow2",
"version": "12.1.2 HF1",
"md5sum": "b34301c3945b7ddb88f41195efef1104",
"filesize": 3764846592,
"download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-ip/big-ip_v12.x/12.1.2/english/virtual-edition_base-plus-hf1/&sw=BIG-IP&pro=big-ip_v12.x&ver=12.1.2&container=Virtual-Edition_Base-Plus-HF1&file=BIGIP-12.1.2.1.0.271.LTM.qcow2.zip"
},
{
"filename": "BIGIP-12.1.2.0.0.249.qcow2",
"version": "12.1.2",
@ -93,6 +107,13 @@
}
],
"versions": [
{
"name": "13.0.0 HF2",
"images": {
"hda_disk_image": "BIGIP-13.0.0.2.0.1671.qcow2",
"hdb_disk_image": "empty100G.qcow2"
}
},
{
"name": "13.0.0",
"images": {
@ -100,6 +121,13 @@
"hdb_disk_image": "empty100G.qcow2"
}
},
{
"name": "12.1.2 HF1",
"images": {
"hda_disk_image": "BIGIP-12.1.2.1.0.271.LTM.qcow2",
"hdb_disk_image": "empty100G.qcow2"
}
},
{
"name": "12.1.2",
"images": {

View File

@ -29,6 +29,13 @@
"options": "-smp 2 -cpu host"
},
"images": [
{
"filename": "BIG-IQ-5.3.0.0.0.1119.qcow2",
"version": "5.3.0",
"md5sum": "75f06ba59f858c3828d47dcf8caf3775",
"filesize": 3269263360,
"download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-iq/big-iq_cm/5.3.0/english/v5.3.0/&sw=BIG-IQ&pro=big-iq_CM&ver=5.3.0&container=v5.3.0&file=BIG-IQ-5.3.0.0.0.1119.qcow2.zip"
},
{
"filename": "BIG-IQ-5.2.0.0.0.5741.qcow2",
"version": "5.2.0",
@ -67,6 +74,13 @@
}
],
"versions": [
{
"name": "5.3.0",
"images": {
"hda_disk_image": "BIG-IQ-5.3.0.0.0.1119.qcow2",
"hdb_disk_image": "empty100G.qcow2"
}
},
{
"name": "5.2.0",
"images": {

View File

@ -14,6 +14,7 @@
"usage": "Default username is admin, no password is set. Silent boot, it might take a while.",
"symbol": "loadbalancer.svg",
"port_name_format": "Port{port1}",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 10,

View File

@ -13,6 +13,7 @@
"maintainer_email": "developers@gns3.net",
"usage": "Default username is admin, no password is set.",
"port_name_format": "Port{port1}",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 4,

View File

@ -0,0 +1,53 @@
{
"name": "FortiAuthenticator",
"category": "guest",
"description": "FortiAuthenticator user identity management appliances strengthen enterprise security by simplifying and centralizing the management and storage of user identity information.",
"vendor_name": "Fortinet",
"vendor_url": "http://www.fortinet.com/",
"documentation_url": "http://docs.fortinet.com/fortiauthenticator/admin-guides",
"product_name": "FortiAuthenticator",
"product_url": "https://www.fortinet.com/products/identity-access-management/fortiauthenticator.html",
"registry_version": 3,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Default username is admin, no password is set. First book takes longer.",
"port_name_format": "Port{port1}",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 4,
"ram": 1024,
"hda_disk_interface": "virtio",
"hdb_disk_interface": "virtio",
"arch": "x86_64",
"console_type": "telnet",
"boot_priority": "c",
"kvm": "allow"
},
"images": [
{
"filename": "FAC_VM_KVM-v500-build0012-FORTINET.out.kvm.qcow2",
"version": "5.0.0",
"md5sum": "2af90bdad68a37f38fda39ee04cf2fba",
"filesize": 62771200,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FAC_VM_KVM-v500-DATADRIVE.qcow2",
"version": "5.x.x",
"md5sum": "09bad6cfe6301930adbc829eb8a67149",
"filesize": 258048,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
}
],
"versions": [
{
"name": "5.0.0",
"images": {
"hda_disk_image": "FAC_VM_KVM-v500-build0012-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "FAC_VM_KVM-v500-DATADRIVE.qcow2"
}
}
]
}

View File

@ -13,6 +13,7 @@
"maintainer_email": "developers@gns3.net",
"usage": "Default username is admin, no password is set.",
"port_name_format": "Port{port1}",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 3,

View File

@ -13,6 +13,7 @@
"maintainer_email": "developers@gns3.net",
"usage": "Default username is admin, no password is set.",
"port_name_format": "Port{port1}",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 10,
@ -25,6 +26,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "FGT_VM64_KVM-v5-build1486-FORTINET.out.kvm.qcow2",
"version": "5.6.2",
"md5sum": "afb9f237de2545db8663f4a2c5805355",
"filesize": 39231488,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v5-build1484-FORTINET.out.kvm.qcow2",
"version": "5.6.1",
@ -133,6 +141,13 @@
}
],
"versions": [
{
"name": "5.6.2",
"images": {
"hda_disk_image": "FGT_VM64_KVM-v5-build1486-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.6.1",
"images": {

View File

@ -13,6 +13,7 @@
"maintainer_email": "developers@gns3.net",
"usage": "First boot takes a few minutes. Admin URL is https://x.x.x.x/admin, default username is admin, no password is set.",
"port_name_format": "Port{port1}",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 4,
@ -32,6 +33,13 @@
"filesize": 101253120,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FML_VMKV-64-v53-build0643-FORTINET.out.kvm.qcow2",
"version": "5.3.10",
"md5sum": "08f3258533ac2b4f15e86ca3973be17e",
"filesize": 88801280,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FML_VMKV-64-v53-build0634-FORTINET.out.kvm.qcow2",
"version": "5.3.9",
@ -105,6 +113,13 @@
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.3.10",
"images": {
"hda_disk_image": "FML_VMKV-64-v53-build0643-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.3.9",
"images": {

View File

@ -13,7 +13,7 @@
"maintainer_email": "developers@gns3.net",
"usage": "Default username is admin, no password is set.",
"port_name_format": "Port{port1}",
"symbol": "mgmt_station.svg",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 4,
@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "FMG_VM64_KVM-v5-build1557-FORTINET.out.kvm.qcow2",
"version": "5.6.0",
"md5sum": "f8bd600796f894f4ca1ea2d6b4066d3d",
"filesize": 108363776,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FMG_VM64_KVM-v5-build1187-FORTINET.out.kvm.qcow2",
"version": "5.4.3",
@ -92,6 +99,13 @@
}
],
"versions": [
{
"name": "5.6.0",
"images": {
"hda_disk_image": "FMG_VM64_KVM-v5-build1557-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.4.3",
"images": {

View File

@ -13,6 +13,7 @@
"maintainer_email": "developers@gns3.net",
"usage": "First boot will take some time without console output. Default username is admin, no password is set.",
"port_name_format": "Port{port1}",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 3,

View File

@ -13,6 +13,7 @@
"maintainer_email": "developers@gns3.net",
"usage": "This is the Super/Worker component. Default credentials:\n- admin / admin*1\n - root / ProspectHills\n\nIf you get a 503 error on the WebUI, run /opt/phoenix/deployment/jumpbox/phinitsuper as root.",
"port_name_format": "Port{port1}",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 2,
@ -27,6 +28,27 @@
"options": "-smp 4"
},
"images": [
{
"filename": "FortiSIEM-VA-KVM-4.10.0.1102.qcow2",
"version": "4.10.0",
"md5sum": "636d94c78ea02e5a39eadb9d44210dfb",
"filesize": 8622505984,
"download_url": "https://www.fortinet.com/offers/fortisiem-free-trial.html"
},
{
"filename": "FortiSIEM-VA-KVM-4.10.0.1102-1.qcow2",
"version": "4.10.0",
"md5sum": "52fee02e94fd220275b613a4ec5b46eb",
"filesize": 46858240,
"download_url": "https://www.fortinet.com/offers/fortisiem-free-trial.html"
},
{
"filename": "FortiSIEM-VA-KVM-4.10.0.1102-2.qcow2",
"version": "4.10.0",
"md5sum": "088a34864e30abdb95385b089574baba",
"filesize": 46858240,
"download_url": "https://www.fortinet.com/offers/fortisiem-free-trial.html"
},
{
"filename": "FortiSIEM-VA-KVM-4.9.0.1041.qcow2",
"version": "4.9.0",
@ -50,6 +72,14 @@
}
],
"versions": [
{
"name": "4.10.0",
"images": {
"hda_disk_image": "FortiSIEM-VA-KVM-4.10.0.1102.qcow2",
"hdb_disk_image": "FortiSIEM-VA-KVM-4.10.0.1102-1.qcow2",
"hdc_disk_image": "FortiSIEM-VA-KVM-4.10.0.1102-2.qcow2"
}
},
{
"name": "4.9.0",
"images": {

View File

@ -13,6 +13,7 @@
"maintainer_email": "developers@gns3.net",
"usage": "Default username is admin, no password is set. Console keeps sending 'access uuid file failed, error number 2' messages; ignore it.",
"port_name_format": "Port{port1}",
"symbol": "fortinet.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 4,

View File

@ -22,13 +22,22 @@
"kvm": "require"
},
"images": [
{
"filename": "FreeBSD-11.1-RELEASE-amd64.qcow2",
"version": "11.1",
"md5sum": "d78b2a7d05ec62f799e14ded4817ea69",
"filesize": 1533345792,
"download_url": "https://www.freebsd.org/where.html",
"direct_download_url": "https://download.freebsd.org/ftp/releases/VM-IMAGES/11.1-RELEASE/amd64/Latest/FreeBSD-11.1-RELEASE-amd64.qcow2.xz",
"compression": "xz"
},
{
"filename": "FreeBSD-11.0-RELEASE-amd64.qcow2",
"version": "11.0",
"md5sum": "1b04999198f492afd6dc4935b8c7cc22",
"filesize": 1384382464,
"download_url": "https://www.freebsd.org/where.html",
"direct_download_url": "ftp://ftp.freebsd.org/pub/FreeBSD/releases/VM-IMAGES/11.0-RELEASE/amd64/Latest/FreeBSD-11.0-RELEASE-amd64.qcow2.xz",
"direct_download_url": "https://download.freebsd.org/ftp/releases/VM-IMAGES/11.0-RELEASE/amd64/Latest/FreeBSD-11.0-RELEASE-amd64.qcow2.xz",
"compression": "xz"
},
{
@ -37,11 +46,17 @@
"md5sum": "1a00cebef520dfac8d2bda10ea16a951",
"filesize": 974651392,
"download_url": "https://www.freebsd.org/where.html",
"direct_download_url": "ftp://ftp.freebsd.org/pub/FreeBSD/releases/VM-IMAGES/10.3-RELEASE/amd64/Latest/FreeBSD-10.3-RELEASE-amd64.qcow2.xz",
"direct_download_url": "https://download.freebsd.org/ftp/releases/VM-IMAGES/10.3-RELEASE/amd64/Latest/FreeBSD-10.3-RELEASE-amd64.qcow2.xz",
"compression": "xz"
}
],
"versions": [
{
"name": "11.1",
"images": {
"hda_disk_image": "FreeBSD-11.1-RELEASE-amd64.qcow2"
}
},
{
"name": "11.0",
"images": {

View File

@ -23,6 +23,20 @@
"options": "-smp 2"
},
"images": [
{
"filename": "media-vsrx-vmdisk-17.3R1.10.qcow2",
"version": "17.3R1",
"md5sum": "49b276e9ccdd8588f9e2ff38cccc884a",
"filesize": 3782541312,
"download_url": "https://www.juniper.net/us/en/dm/free-vsrx-trial/"
},
{
"filename": "media-vsrx-vmdisk-15.1X49-D110.4.qcow2",
"version": "15.1X49-D110",
"md5sum": "8d74641594eb036b2e2c6b462d541156",
"filesize": 3280011264,
"download_url": "https://www.juniper.net/us/en/dm/free-vsrx-trial/"
},
{
"filename": "media-vsrx-vmdisk-15.1X49-D100.6.qcow2",
"version": "15.1X49-D100",
@ -95,6 +109,18 @@
}
],
"versions": [
{
"name": "17.3R1",
"images": {
"hda_disk_image": "media-vsrx-vmdisk-17.3R1.10.qcow2"
}
},
{
"name": "15.1X49-D110",
"images": {
"hda_disk_image": "media-vsrx-vmdisk-15.1X49-D110.4.qcow2"
}
},
{
"name": "15.1X49-D100",
"images": {

View File

@ -20,6 +20,14 @@
"kvm": "require"
},
"images": [
{
"filename": "kali-linux-2017.2-amd64.iso",
"version": "2017.2",
"md5sum": "541654f8f818450dc0db866a0a0f6eec",
"filesize": 3020619776,
"download_url": "http://cdimage.kali.org/kali-2017.2/",
"direct_download_url": "http://cdimage.kali.org/kali-2017.2/kali-linux-2017.2-amd64.iso"
},
{
"filename": "kali-linux-2017.1-amd64.iso",
"version": "2017.1",
@ -54,6 +62,12 @@
}
],
"versions": [
{
"name": "2017.2",
"images": {
"cdrom_image": "kali-linux-2017.2-amd64.iso"
}
},
{
"name": "2017.1",
"images": {

View File

@ -23,13 +23,22 @@
"kvm": "require"
},
"images": [
{
"filename": "kerio-control-appliance-9.2.3-2219-vmware-disk1.vmdk",
"version": "9.2.3",
"md5sum": "767d5b25bdca2b45c2ba269189ea9bd0",
"filesize": 191716352,
"download_url": "http://www.kerio.com/support/kerio-control",
"direct_download_url": "http://cdn.kerio.com/dwn/control/control-9.2.3-2219/kerio-control-appliance-9.2.3-2219-vmware-disk1.vmdk"
},
{
"filename": "kerio-control-appliance-9.2.2-2172-vmware-disk1.vmdk",
"version": "9.2.2",
"md5sum": "4efeacbc39db1b3e53ef96af1338cf52",
"filesize": 190841856,
"download_url": "http://www.kerio.com/support/kerio-control",
"direct_download_url": "http://cdn.kerio.com/dwn/control/control-9.2.2-2172/kerio-control-appliance-9.2.2-2172-vmware-disk1.vmdk" },
"direct_download_url": "http://cdn.kerio.com/dwn/control/control-9.2.2-2172/kerio-control-appliance-9.2.2-2172-vmware-disk1.vmdk"
},
{
"filename": "kerio-control-appliance-9.2.1-2019-vmware-disk1.vmdk",
"version": "9.2.1",
@ -46,6 +55,12 @@
"direct_download_url": "http://cdn.kerio.com/dwn/control/control-9.1.4-1535/kerio-control-appliance-9.1.4-1535-vmware.vmdk" }
],
"versions": [
{
"name": "9.2.3",
"images": {
"hda_disk_image": "kerio-control-appliance-9.2.3-2219-vmware-disk1.vmdk"
}
},
{
"name": "9.2.2",
"images": {

View File

@ -24,6 +24,14 @@
"kvm": "require"
},
"images": [
{
"filename": "kerio-operator-appliance-2.6.0-8413-vmware-disk1.vmdk",
"version": "2.6.0",
"md5sum": "3eddbb73d685ac4666841f5df2c6bec9",
"filesize": 291227136,
"download_url": "http://www.kerio.com/support/kerio-operator",
"direct_download_url": "http://cdn.kerio.com/dwn/operator/operator-2.6.0-8413/kerio-operator-appliance-2.6.0-8413-vmware-disk1.vmdk"
},
{
"filename": "kerio-operator-appliance-2.5.5-8309-p2-vmware-disk1.vmdk",
"version": "2.5.5p2",
@ -58,6 +66,12 @@
}
],
"versions": [
{
"name": "2.6.0",
"images": {
"hda_disk_image": "kerio-operator-appliance-2.6.0-8413-vmware-disk1.vmdk"
}
},
{
"name": "2.5.5p2",
"images": {

View File

@ -26,6 +26,15 @@
"options": "-nographic"
},
"images": [
{
"filename": "chr-6.40.3.img",
"version": "6.40.3",
"md5sum": "1861df67e9bbf17433f11f33f7dedd1e",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download2.mikrotik.com/routeros/6.40.3/chr-6.40.3.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.39.2.img",
"version": "6.39.2",
@ -53,6 +62,15 @@
"direct_download_url": "https://download2.mikrotik.com/routeros/6.39/chr-6.39.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.38.7.img",
"version": "6.38.7",
"md5sum": "69a51c96b1247bbaf1253d2873617122",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download2.mikrotik.com/routeros/6.38.7/chr-6.38.7.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.38.5.img",
"version": "6.38.5",
@ -208,6 +226,12 @@
}
],
"versions": [
{
"name": "6.40.3",
"images": {
"hda_disk_image": "chr-6.40.3.img"
}
},
{
"name": "6.39.2",
"images": {
@ -226,6 +250,12 @@
"hda_disk_image": "chr-6.39.img"
}
},
{
"name": "6.38.7",
"images": {
"hda_disk_image": "chr-6.38.7.img"
}
},
{
"name": "6.38.5",
"images": {

View File

@ -0,0 +1,44 @@
{
"name": "OP5 Monitor",
"category": "guest",
"description": "Over 200,000 IT staff across medium to large enterprises worldwide are currently using OP5 Monitor as their preferred network monitoring software.\nOP5 Monitor allows you to take control of your IT, enabling your network to be more responsive, more reliable and even faster than ever before. With unparalleled scalability, OP5 Monitor grows as your company grows, so youll understand why we say this is the last network monitor youll ever need to purchase.",
"vendor_name": "OP5",
"vendor_url": "https://www.op5.com/",
"documentation_url": "https://kb.op5.com/display/MAN/Documentation+Home#sthash.pohb5bis.dpbs",
"product_name": "OP5 Monitor",
"product_url": "https://www.op5.com/op5-monitor/",
"registry_version": 3,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Interface eth0 is set to DHCP. Default credentials:\n- CLI: root / monitor\n- Web access: admin / monitor\n- Logserver Extension: admin / admin",
"port_name_format": "eth{0}",
"symbol": "mgmt_station.svg",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 2,
"ram": 1024,
"hda_disk_interface": "virtio",
"arch": "x86_64",
"console_type": "vnc",
"boot_priority": "c",
"kvm": "require"
},
"images": [
{
"filename": "op5-Monitor-Virtual-Appliance-7.3.15.x86_64.vmdk",
"version": "7.3.15",
"md5sum": "634acc6266237d99bf1bfbcf9284beca",
"filesize": 779687424,
"download_url": "https://www.op5.com/download/"
}
],
"versions": [
{
"name": "7.3.15",
"images": {
"hda_disk_image": "op5-Monitor-Virtual-Appliance-7.3.15.x86_64.vmdk"
}
}
]
}

View File

@ -0,0 +1,44 @@
{
"name": "openSUSE",
"category": "guest",
"description": "openSUSE is a free and Linux-based operating system for PC, Laptop or Server. The openSUSE project is a community program sponsored by Novell. It is a general purpose operating system built on top of the Linux kernel, developed by the community-supported openSUSE Project and sponsored by SUSE and a number of other companies.",
"vendor_name": "SUSE LLC.",
"vendor_url": "https://www.opensuse.org/",
"documentation_url": "https://en.opensuse.org/Main_Page",
"product_name": "openSUSE",
"product_url": "https://www.opensuse.org/#Leap",
"registry_version": 4,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Username: osboxes\nPassword: osboxes.org\n\nroot password: osboxes.org",
"port_name_format": "eth{0}",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 1,
"ram": 1024,
"hda_disk_interface": "sata",
"arch": "x86_64",
"console_type": "vnc",
"boot_priority": "c",
"kvm": "require",
"options": "-vga virtio"
},
"images": [
{
"filename": "openSUSE_42.3-Leap-VM-64bit.vmdk",
"version": "Leap 42.3",
"md5sum": "ab777cf90557460ff35aedfbf2befc5d",
"filesize": 5891293184,
"download_url": "http://www.osboxes.org/opensuse/"
}
],
"versions": [
{
"name": "Leap 42.3",
"images": {
"hda_disk_image": "openSUSE_42.3-Leap-VM-64bit.vmdk"
}
}
]
}

View File

@ -0,0 +1,43 @@
{
"name": "OPNsense",
"category": "firewall",
"description": "OPNsense is an open source, easy-to-use and easy-to-build FreeBSD based firewall and routing platform. OPNsense includes most of the features available in expensive commercial firewalls, and more in many cases. It brings the rich feature set of commercial offerings with the benefits of open and verifiable sources.\n\nOPNsense started as a fork of pfSense® and m0n0wall in 2014, with its first official release in January 2015. The project has evolved very quickly while still retaining familiar aspects of both m0n0wall and pfSense. A strong focus on security and code quality drives the development of the project.",
"vendor_name": "Deciso B.V.",
"vendor_url": "https://opnsense.org/",
"documentation_url": "https://wiki.opnsense.org/",
"product_name": "OPNsense",
"product_url": "https://opnsense.org/about/about-opnsense/",
"registry_version": 3,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Default credentials: root / opnsense\nDefault IP address: 192.168.1.1",
"port_name_format": "em{0}",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 4,
"ram": 1024,
"hda_disk_interface": "virtio",
"arch": "x86_64",
"console_type": "telnet",
"boot_priority": "c",
"kvm": "require"
},
"images": [
{
"filename": "OPNsense-17.7-OpenSSL-nano-amd64.img",
"version": "17.7",
"md5sum": "14cde5c7a15b2298a242238ad3c3b65a",
"filesize": 3221233664,
"download_url": "https://opnsense.org/download/"
}
],
"versions": [
{
"name": "17.7",
"images": {
"hda_disk_image": "OPNsense-17.7-OpenSSL-nano-amd64.img"
}
}
]
}

View File

@ -46,7 +46,15 @@
"md5sum": "e044dc649b7146ee4f619edb0e5f6675",
"filesize": 1871149056,
"download_url": "https://support.paloaltonetworks.com/Updates/SoftwareUpdates/"
},
{
"filename": "PA-VM-KVM-8.0.0.qcow2",
"version": "8.0.0",
"md5sum": "b6a1ddc8552aff87f05f9c0d4cb54dc3",
"filesize": 1987444736,
"download_url": "https://support.paloaltonetworks.com/Updates/SoftwareUpdates/"
}
],
"versions": [
{
@ -66,6 +74,12 @@
"images": {
"hda_disk_image": "PA-VM-ESX-7.1.0-disk1.vmdk"
}
},
{
"name": "8.0.0",
"images": {
"hda_disk_image": "PA-VM-KVM-8.0.0.qcow2"
}
}
]
}

View File

@ -0,0 +1,70 @@
{
"name": "Ubuntu",
"category": "guest",
"description": "Ubuntu is a full-featured Linux operating system which is based on Debian distribution and freely available with both community and professional support, it comes with Unity as its default desktop environment. There are other flavors of Ubuntu available with other desktops as default like Ubuntu Gnome, Lubuntu, Xubuntu, and so on. A tightly-integrated selection of excellent applications is included, and an incredible variety of add-on software is just a few clicks away. A default installation of Ubuntu contains a wide range of software that includes LibreOffice, Firefox, Empathy, Transmission, etc.",
"vendor_name": "Canonical Inc.",
"vendor_url": "https://www.ubuntu.com",
"documentation_url": "https://help.ubuntu.com",
"product_name": "Ubuntu",
"product_url": "https://www.ubuntu.com/desktop",
"registry_version": 3,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Username: osboxes\nPassword: osboxes.org",
"port_name_format": "eth{0}",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 1,
"ram": 1024,
"hda_disk_interface": "virtio",
"arch": "x86_64",
"console_type": "vnc",
"boot_priority": "c",
"kvm": "require",
"options": "-vga virtio"
},
"images": [
{
"filename": "Ubuntu_17.04-VM-64bit.vmdk",
"version": "17.04",
"md5sum": "5c82d69c49ba08179e9a94901f67da1f",
"filesize": 4792123392,
"download_url": "http://www.osboxes.org/ubuntu/"
},
{
"filename": "Ubuntu_16.10_Yakkety-VM-64bit.vmdk",
"version": "16.10",
"md5sum": "c835f24dbb86f5f61c78d992ed38b6b1",
"filesize": 9133293568,
"download_url": "http://www.osboxes.org/ubuntu/"
},
{
"filename": "Ubuntu_16.04.3-VM-64bit.vmdk",
"version": "16.04",
"md5sum": "45bccf63f2777e492f022dbf025f67d0",
"filesize": 4302110720,
"download_url": "http://www.osboxes.org/ubuntu/"
}
],
"versions": [
{
"name": "17.04",
"images": {
"hda_disk_image": "Ubuntu_17.04-VM-64bit.vmdk"
}
},
{
"name": "16.10",
"images": {
"hda_disk_image": "Ubuntu_16.10_Yakkety-VM-64bit.vmdk"
}
},
{
"name": "16.04",
"images": {
"hda_disk_image": "Ubuntu_16.04.3-VM-64bit.vmdk"
}
}
]
}

View File

@ -24,6 +24,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "untangle_1310_x64.iso",
"version": "13.1.0",
"md5sum": "dc35aa96e954992e53a8cb244a932ae6",
"filesize": 588251136,
"download_url": "https://www.untangle.com/get-untangle/"
},
{
"filename": "untangle_1300_x64.iso",
"version": "13.0.0",
@ -83,6 +90,13 @@
}
],
"versions": [
{
"name": "13.1.0",
"images": {
"hda_disk_image": "empty30G.qcow2",
"cdrom_image": "untangle_1310_x64.iso"
}
},
{
"name": "13.0.0",
"images": {

View File

@ -0,0 +1,109 @@
{
"name": "Windows",
"category": "guest",
"description": "Microsoft Windows, or simply Windows, is a metafamily of graphical operating systems developed, marketed, and sold by Microsoft. It consists of several families of operating systems, each of which cater to a certain sector of the computing industry with the OS typically associated with IBM PC compatible architecture.",
"vendor_name": "Microsoft",
"vendor_url": "http://www.microsoft.com/",
"documentation_url": "https://technet.microsoft.com/en-us/library/cc498727.aspx",
"product_name": "Windows",
"product_url": "https://www.microsoft.com/en-us/windows",
"registry_version": 4,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "These virtual machines expire after 90 days; i.e. you have to re-create them in your project after this time but you don't have to re-import the appliance.\n\nDefault credentials: IEUser / Passw0rd!",
"symbol": "microsoft.svg",
"port_name_format": "NIC{port1}",
"qemu": {
"adapter_type": "e1000",
"adapters": 1,
"ram": 1024,
"hda_disk_interface": "sata",
"arch": "x86_64",
"console_type": "vnc",
"boot_priority": "c",
"kvm": "require"
},
"images": [
{
"filename": "MSEdge_-_Win10_preview.vmdk",
"version": "10 w/ Edge",
"md5sum": "e06d97b871581d91b7363bf72a81553d",
"filesize": 10907287552,
"download_url": "https://developer.microsoft.com/en-us/microsoft-edge/tools/vms/"
},
{
"filename": "IE11_-_Win8.1-disk1.vmdk",
"version": "8.1 w/ IE11",
"md5sum": "6c8691c7d58bf2c33f6ca242ace6b9bd",
"filesize": 5704344064,
"download_url": "https://developer.microsoft.com/en-us/microsoft-edge/tools/vms/"
},
{
"filename": "IE11_-_Win7-disk1.vmdk",
"version": "7 w/ IE11",
"md5sum": "5733cc93a6ed756c2358f0a383b411a8",
"filesize": 4101495296,
"download_url": "https://developer.microsoft.com/en-us/microsoft-edge/tools/vms/"
},
{
"filename": "IE10_-_Win7-disk1.vmdk",
"version": "7 w/ IE10",
"md5sum": "ed18b5903fb7d778b847c8d1cef807c4",
"filesize": 4062174208,
"download_url": "https://developer.microsoft.com/en-us/microsoft-edge/tools/vms/"
},
{
"filename": "IE9_-_Win7-disk1.vmdk",
"version": "7 w/ IE9",
"md5sum": "82370cfa215002a49651b773a3a569f2",
"filesize": 4040829440,
"download_url": "https://developer.microsoft.com/en-us/microsoft-edge/tools/vms/"
},
{
"filename": "IE8_-_Win7-disk1.vmdk",
"version": "7 w/ IE8",
"md5sum": "63456b42eb8e184b3e7c675645a3c32c",
"filesize": 4228026368,
"download_url": "https://developer.microsoft.com/en-us/microsoft-edge/tools/vms/"
}
],
"versions": [
{
"name": "10 w/ Edge",
"images": {
"hda_disk_image": "MSEdge_-_Win10_preview.vmdk"
}
},
{
"name": "8.1 w/ IE11",
"images": {
"hda_disk_image": "IE11_-_Win8.1-disk1.vmdk"
}
},
{
"name": "7 w/ IE11",
"images": {
"hda_disk_image": "IE11_-_Win7-disk1.vmdk"
}
},
{
"name": "7 w/ IE10",
"images": {
"hda_disk_image": "IE10_-_Win7-disk1.vmdk"
}
},
{
"name": "7 w/ IE9",
"images": {
"hda_disk_image": "IE9_-_Win7-disk1.vmdk"
}
},
{
"name": "7 w/ IE8",
"images": {
"hda_disk_image": "IE8_-_Win7-disk1.vmdk"
}
}
]
}

View File

@ -0,0 +1,66 @@
{
"name": "Windows Server",
"category": "guest",
"description": "Microsoft Windows, or simply Windows, is a metafamily of graphical operating systems developed, marketed, and sold by Microsoft. It consists of several families of operating systems, each of which cater to a certain sector of the computing industry with the OS typically associated with IBM PC compatible architecture.",
"vendor_name": "Microsoft",
"vendor_url": "http://www.microsoft.com/",
"documentation_url": "https://technet.microsoft.com/en-us/library/cc498727.aspx",
"product_name": "Windows Server",
"product_url": "https://www.microsoft.com/en-us/windows",
"registry_version": 4,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"symbol": "microsoft.svg",
"port_name_format": "NIC{port1}",
"qemu": {
"adapter_type": "e1000",
"adapters": 1,
"ram": 2048,
"hda_disk_interface": "sata",
"arch": "x86_64",
"console_type": "vnc",
"boot_priority": "c",
"kvm": "require"
},
"images": [
{
"filename": "Win2k16_14393.0.161119-1705.RS1_REFRESH_SERVER_EVAL_X64FRE_EN-US.ISO",
"version": "2016",
"md5sum": "70721288bbcdfe3239d8f8c0fae55f1f",
"filesize": 6972221440,
"download_url": "https://www.microsoft.com/en-us/evalcenter/evaluate-windows-server-2016"
},
{
"filename": "Win2k12_9600.16415.amd64fre.winblue_refresh.130928-2229_server_serverdatacentereval_en-us.vhd",
"version": "2012 R2",
"md5sum": "b0a988a2e1f401c99c7c18a00391c4cc",
"filesize": 8024756224,
"download_url": "https://www.microsoft.com/en-us/evalcenter/evaluate-windows-server-2012-r2"
},
{
"filename": "empty100G.qcow2",
"version": "1.0",
"md5sum": "1e6409a4523ada212dea2ebc50e50a65",
"filesize": 198656,
"download_url": "https://sourceforge.net/projects/gns-3/files/Empty%20Qemu%20disk/",
"direct_download_url": "http://sourceforge.net/projects/gns-3/files/Empty%20Qemu%20disk/empty100G.qcow2/download"
}
],
"versions": [
{
"name": "2016",
"images": {
"hda_disk_image": "empty100G.qcow2",
"cdrom_image": "Win2k16_14393.0.161119-1705.RS1_REFRESH_SERVER_EVAL_X64FRE_EN-US.ISO"
}
},
{
"name": "2012 R2",
"images": {
"hda_disk_image": "Win2k12_9600.16415.amd64fre.winblue_refresh.130928-2229_server_serverdatacentereval_en-us.vhd"
}
}
]
}

View File

@ -233,6 +233,14 @@ class BaseNode:
return self._project.node_working_directory(self)
@property
def working_path(self):
"""
Return the node working path. Doesn't create structure of directories when not present.
"""
return self._project.node_working_path(self)
@property
def temporary_directory(self):
if self._temporary_directory is None:
@ -622,7 +630,7 @@ class BaseNode:
i += 1
@asyncio.coroutine
def _add_ubridge_ethernet_connection(self, bridge_name, ethernet_interface, block_host_traffic=True):
def _add_ubridge_ethernet_connection(self, bridge_name, ethernet_interface, block_host_traffic=False):
"""
Creates a connection with an Ethernet interface in uBridge.

View File

@ -86,7 +86,7 @@ class Cloud(BaseNode):
"ports_mapping": self._ports_mapping,
"interfaces": host_interfaces,
"status": self.status,
"node_directory": self.working_dir
"node_directory": self.working_path
}
@property
@ -210,7 +210,7 @@ class Cloud(BaseNode):
if port_info["type"] in ("ethernet", "tap"):
if sys.platform.startswith("win"):
yield from self._add_ubridge_ethernet_connection(bridge_name, port_info["interface"], block_host_traffic=False)
yield from self._add_ubridge_ethernet_connection(bridge_name, port_info["interface"])
else:

View File

@ -36,9 +36,11 @@ class Nat(Cloud):
raise NodeError("virbr0 is missing. You need to install libvirt")
interface = "virbr0"
else:
if "vmnet8" not in [interface["name"] for interface in gns3server.utils.interfaces.interfaces()]:
interfaces = list(filter(lambda x: 'vmnet8' in x.lower(),
[interface["name"] for interface in gns3server.utils.interfaces.interfaces()]))
if not len(interfaces):
raise NodeError("vmnet8 is missing. You need to install VMware or use the NAT node on GNS3 VM")
interface = "vmnet8"
interface = interfaces[0] # take the first available interface containing the vmnet8 name
ports = [
{

View File

@ -114,7 +114,7 @@ class DockerVM(BaseNode):
"start_command": self.start_command,
"status": self.status,
"environment": self.environment,
"node_directory": self.working_dir
"node_directory": self.working_path
}
def _get_free_display_port(self):
@ -416,16 +416,30 @@ class DockerVM(BaseNode):
Because docker run as root we need to fix permission and ownership to allow user to interact
with it from their filesystem and do operation like file delete
"""
state = yield from self._get_container_state()
if state == "stopped" or state == "exited":
# We need to restart it to fix permissions
yield from self.manager.query("POST", "containers/{}/start".format(self._cid))
for volume in self._volumes:
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(
name=self._name, image=self._image, path=volume))
process = yield from asyncio.subprocess.create_subprocess_exec("docker",
"exec",
self._cid,
"/gns3/bin/busybox",
"sh",
"-c",
"(/gns3/bin/busybox find \"{path}\" -depth -print0 | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c '%a:%u:%g:%n' > \"{path}/.gns3_perms\") && /gns3/bin/busybox chmod -R u+rX \"{path}\" && /gns3/bin/busybox chown {uid}:{gid} -R \"{path}\"".format(uid=os.getuid(), gid=os.getgid(), path=volume))
process = yield from asyncio.subprocess.create_subprocess_exec(
"docker",
"exec",
self._cid,
"/gns3/bin/busybox",
"sh",
"-c",
"("
"/gns3/bin/busybox find \"{path}\" -depth -print0"
" | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c '%a:%u:%g:%n' > \"{path}/.gns3_perms\""
")"
" && /gns3/bin/busybox chmod -R u+rX \"{path}\""
" && /gns3/bin/busybox chown {uid}:{gid} -R \"{path}\""
.format(uid=os.getuid(), gid=os.getgid(), path=volume),
)
yield from process.wait()
@asyncio.coroutine
@ -564,13 +578,15 @@ class DockerVM(BaseNode):
try:
state = yield from self._get_container_state()
except DockerHttp404Error:
state = "stopped"
self.status = "stopped"
return
if state == "paused":
yield from self.unpause()
if state != "stopped":
yield from self._fix_permissions()
yield from self._fix_permissions()
state = yield from self._get_container_state()
if state != "stopped" or state != "exited":
# t=5 number of seconds to wait before killing the container
try:
yield from self.manager.query("POST", "containers/{}/stop".format(self._cid), params={"t": 5})

View File

@ -42,8 +42,8 @@ do
then
while IFS=: read PERMS OWNER GROUP FILE
do
chmod "$PERMS" "$FILE"
chown "${OWNER}:${GROUP}" "$FILE"
[ -L "$FILE" ] || chmod "$PERMS" "$FILE"
chown -h "${OWNER}:${GROUP}" "$FILE"
done < "$i/.gns3_perms"
fi
done

View File

@ -194,7 +194,7 @@ class IOUVM(BaseNode):
iou_vm_info = {"name": self.name,
"node_id": self.id,
"node_directory": self.working_dir,
"node_directory": self.working_path,
"console": self._console,
"console_type": "telnet",
"status": self.status,
@ -380,6 +380,8 @@ class IOUVM(BaseNode):
if "license" not in config:
raise IOUError("License section not found in iourc file {}".format(self.iourc_path))
hostname = socket.gethostname()
if len(hostname) > 15:
log.warning("Older IOU images may not boot because hostname '{}' length is above 15 characters".format(hostname))
if hostname not in config["license"]:
raise IOUError("Hostname \"{}\" not found in iourc file {}".format(hostname, self.iourc_path))
user_ioukey = config["license"][hostname]
@ -564,13 +566,14 @@ class IOUVM(BaseNode):
"""
self._terminate_process_iou()
if returncode != 0:
if returncode == -11:
message = "{} process has stopped, return code: {}. This could be an issue with the image using a different image can fix the issue.\n{}".format(process_name, returncode, self.read_iou_stdout())
message = 'IOU VM "{}" process has stopped with return code: {} (segfault). This could be an issue with the IOU image, using a different image may fix this.\n{}'.format(self.name,
returncode,
self.read_iou_stdout())
else:
message = "{} process has stopped, return code: {}\n{}".format(process_name, returncode, self.read_iou_stdout())
log.warn(message)
message = 'IOU VM "{}" process has stopped with return code: {}\n{}'.format(self.name, returncode, self.read_iou_stdout())
log.warning(message)
self.project.emit("log.error", {"message": message})
if self._telnet_server:
self._telnet_server.close()
@ -610,7 +613,7 @@ class IOUVM(BaseNode):
yield from gns3server.utils.asyncio.wait_for_process_termination(self._iou_process, timeout=3)
except asyncio.TimeoutError:
if self._iou_process.returncode is None:
log.warn("IOU process {} is still running... killing it".format(self._iou_process.pid))
log.warning("IOU process {} is still running... killing it".format(self._iou_process.pid))
try:
self._iou_process.kill()
except ProcessLookupError:
@ -945,13 +948,13 @@ class IOUVM(BaseNode):
if "IOURC" not in os.environ:
env["IOURC"] = self.iourc_path
try:
output = yield from gns3server.utils.asyncio.subprocess_check_output(self._path, "-h", cwd=self.working_dir, env=env)
output = yield from gns3server.utils.asyncio.subprocess_check_output(self._path, "-h", cwd=self.working_dir, env=env, stderr=True)
if re.search("-l\s+Enable Layer 1 keepalive messages", output):
command.extend(["-l"])
else:
raise IOUError("layer 1 keepalive messages are not supported by {}".format(os.path.basename(self._path)))
except (OSError, subprocess.SubprocessError) as e:
log.warn("could not determine if layer 1 keepalive messages are supported by {}: {}".format(os.path.basename(self._path), e))
log.warning("could not determine if layer 1 keepalive messages are supported by {}: {}".format(os.path.basename(self._path), e))
@property
def startup_config_content(self):

View File

@ -207,7 +207,7 @@ class Project:
:returns: Node working directory
"""
workdir = os.path.join(self._path, "project-files", node.manager.module_name.lower(), node.id)
workdir = self.node_working_path(node)
if not self._deleted:
try:
os.makedirs(workdir, exist_ok=True)
@ -215,6 +215,15 @@ class Project:
raise aiohttp.web.HTTPInternalServerError(text="Could not create the node working directory: {}".format(e))
return workdir
def node_working_path(self, node):
"""
Returns a node working path for node. It doesn't create structure if not present on system.
:param node: Node instance
:return: Node working path
"""
return os.path.join(self._path, "project-files", node.manager.module_name.lower(), node.id)
def tmp_working_directory(self):
"""
A temporary directory. Will be clean at project open and close

View File

@ -537,7 +537,7 @@ class QemuVM(BaseNode):
log.info('QEMU VM "{name}" [{id}]: MAC address changed to {mac_addr}'.format(name=self._name,
id=self._id,
mac_addr=mac_address))
mac_addr=self._mac_address))
@property
def legacy_networking(self):
@ -1386,15 +1386,17 @@ class QemuVM(BaseNode):
if not os.path.exists(disk):
# create the disk
try:
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
"backing_file={}".format(disk_image),
"-f", "qcow2", disk)
command = [qemu_img_path, "create", "-o", "backing_file={}".format(disk_image), "-f", "qcow2", disk]
command_string = " ".join(shlex.quote(s) for s in command)
log.info("Executing qemu-img with: {}".format(command_string))
process = yield from asyncio.create_subprocess_exec(*command)
retcode = yield from process.wait()
if retcode is not None and retcode != 0:
raise QemuError("Could not create {} disk image".format(disk_name))
raise QemuError("Could not create {} disk image: qemu-img returned with {}".format(disk_name,
retcode))
log.info("{} returned with {}".format(qemu_img_path, retcode))
except (OSError, subprocess.SubprocessError) as e:
raise QemuError("Could not create {} disk image {}".format(disk_name, e))
raise QemuError("Could not create {} disk image: {}".format(disk_name, e))
else:
# The disk exists we check if the clone work
try:
@ -1626,7 +1628,7 @@ class QemuVM(BaseNode):
answer = {
"project_id": self.project.id,
"node_id": self.id,
"node_directory": self.working_dir
"node_directory": self.working_path
}
# Qemu has a long list of options. The JSON schema is the single source of information
for field in QEMU_OBJECT_SCHEMA["required"]:

View File

@ -89,7 +89,7 @@ class VirtualBoxVM(BaseNode):
"use_any_adapter": self.use_any_adapter,
"linked_clone": self.linked_clone}
if self.linked_clone:
json["node_directory"] = self.working_dir
json["node_directory"] = self.working_path
else:
json["node_directory"] = None
return json
@ -665,9 +665,27 @@ class VirtualBoxVM(BaseNode):
"""
# check for the maximum adapters supported by the VM
self._maximum_adapters = yield from self._get_maximum_supported_adapters()
vm_info = yield from self._get_vm_info()
chipset = "piix3" # default chipset for VirtualBox VMs
self._maximum_adapters = 8 # default maximum network adapter count for PIIX3 chipset
if "chipset" in vm_info:
chipset = vm_info["chipset"]
max_adapter_string = "Maximum {} Network Adapter count".format(chipset.upper())
if max_adapter_string in self._system_properties:
try:
self._maximum_adapters = int(self._system_properties[max_adapter_string])
except ValueError:
log.error("Could not convert system property to integer: {} = {}".format(max_adapter_string, self._system_properties[max_adapter_string]))
else:
log.warning("Could not find system property '{}' for chipset {}".format(max_adapter_string, chipset))
log.info("VirtualBox VM '{name}' [{id}] can have a maximum of {max} network adapters for chipset {chipset}".format(name=self.name,
id=self.id,
max=self._maximum_adapters,
chipset=chipset.upper()))
if adapters > self._maximum_adapters:
raise VirtualBoxError("Number of adapters above the maximum supported of {}".format(self._maximum_adapters))
raise VirtualBoxError("The configured {} chipset limits the VM to {} network adapters. The chipset can be changed outside GNS3 in the VirtualBox VM settings.".format(chipset.upper(),
self._maximum_adapters))
self._ethernet_adapters.clear()
for adapter_number in range(0, adapters):
@ -743,23 +761,6 @@ class VirtualBoxVM(BaseNode):
vm_info[name.strip('"')] = value.strip('"')
return vm_info
@asyncio.coroutine
def _get_maximum_supported_adapters(self):
"""
Returns the maximum adapters supported by this VM.
:returns: maximum number of supported adapters (int)
"""
# check the maximum number of adapters supported by the VM
vm_info = yield from self._get_vm_info()
maximum_adapters = 8
if "chipset" in vm_info:
chipset = vm_info["chipset"]
if chipset == "ich9":
maximum_adapters = int(self._system_properties["Maximum ICH9 Network Adapter count"])
return maximum_adapters
def _get_pipe_name(self):
"""
Returns the pipe name to create a serial connection.

View File

@ -571,8 +571,8 @@ class VMware(BaseManager):
vm_entries = {}
vmware_vms = []
log.info('Searching for VMware VMs in inventory file "{}"'.format(inventory_path))
try:
log.debug('Reading VMware inventory file "{}"'.format(inventory_path))
pairs = self.parse_vmware_file(inventory_path)
for key, value in pairs.items():
if key.startswith("vmlist"):
@ -603,6 +603,7 @@ class VMware(BaseManager):
"""
vmware_vms = []
log.info('Searching for VMware VMs in directory "{}"'.format(directory))
for path, _, filenames in os.walk(directory):
for filename in filenames:
if os.path.splitext(filename)[1] == ".vmx":
@ -649,9 +650,9 @@ class VMware(BaseManager):
return os.path.expanduser("~/.vmware/preferences")
@staticmethod
def get_vmware_default_vm_path():
def get_vmware_default_vm_paths():
"""
Returns VMware default VM directory path.
Returns VMware default VM directory paths.
:returns: path to the default VM directory
"""
@ -662,15 +663,11 @@ class VMware(BaseManager):
path = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(None, 5, None, 0, path)
documents_folder = path.value
windows_type = sys.getwindowsversion().product_type
if windows_type == 2 or windows_type == 3:
return '{}\My Virtual Machines'.format(documents_folder)
else:
return '{}\Virtual Machines'.format(documents_folder)
return ['{}\My Virtual Machines'.format(documents_folder), '{}\Virtual Machines'.format(documents_folder)]
elif sys.platform.startswith("darwin"):
return os.path.expanduser("~/Documents/Virtual Machines.localized")
return [os.path.expanduser("~/Documents/Virtual Machines.localized")]
else:
return os.path.expanduser("~/vmware")
return [os.path.expanduser("~/vmware")]
@asyncio.coroutine
def list_vms(self):
@ -680,15 +677,14 @@ class VMware(BaseManager):
# check for the right VMware version
yield from self.check_vmware_version()
vmware_vms = []
inventory_path = self.get_vmware_inventory_path()
if os.path.exists(inventory_path) and self.host_type != "player":
# inventory may exist for VMware player if VMware workstation has been previously installed
return self._get_vms_from_inventory(inventory_path)
else:
# VMware player has no inventory file, let's search the default location for VMs
vmware_vms = self._get_vms_from_inventory(inventory_path)
if not vmware_vms:
# backup methods when no VMware inventory file exists or for VMware player which has no inventory file
vmware_preferences_path = self.get_vmware_preferences_path()
default_vm_path = self.get_vmware_default_vm_path()
pairs = {}
if os.path.exists(vmware_preferences_path):
# the default vm path may be present in VMware preferences file.
@ -698,11 +694,22 @@ class VMware(BaseManager):
log.warning('Could not read VMware preferences file "{}": {}'.format(vmware_preferences_path, e))
if "prefvmx.defaultvmpath" in pairs:
default_vm_path = pairs["prefvmx.defaultvmpath"]
if not os.path.isdir(default_vm_path):
raise VMwareError('Could not find the default VM directory: "{}". Due to limitation of the free version of VMware Workstation you need to import the GNS3 VM in the default location.'.format(default_vm_path))
vmware_vms = self._get_vms_from_directory(default_vm_path)
if not os.path.isdir(default_vm_path):
raise VMwareError('Could not find or access the default VM directory: "{default_vm_path}". Please change "prefvmx.defaultvmpath={default_vm_path}" in "{vmware_preferences_path}"'.format(default_vm_path=default_vm_path,
vmware_preferences_path=vmware_preferences_path))
vmware_vms = self._get_vms_from_directory(default_vm_path)
# looks for VMX paths in the preferences file in case not all VMs are in the default directory
if not vmware_vms:
# the default vm path is not in the VMware preferences file or that directory is empty
# let's search the default locations for VMs
for default_vm_path in self.get_vmware_default_vm_paths():
if os.path.isdir(default_vm_path):
vmware_vms.extend(self._get_vms_from_directory(default_vm_path))
if not vmware_vms:
log.warning("Could not find any VMware VM in default locations")
# look for VMX paths in the preferences file in case not all VMs are in a default directory
for key, value in pairs.items():
m = re.match(r'pref.mruVM(\d+)\.filename', key)
if m:
@ -714,7 +721,7 @@ class VMware(BaseManager):
found = True
if found is False:
vmware_vms.append({"vmname": pairs[display_name], "vmx_path": value})
return vmware_vms
return vmware_vms
@staticmethod
def _get_linux_vmware_binary():

View File

@ -86,7 +86,7 @@ class VMwareVM(BaseNode):
"adapter_type": self.adapter_type,
"use_any_adapter": self.use_any_adapter,
"status": self.status,
"node_directory": self.working_dir,
"node_directory": self.working_path,
"linked_clone": self.linked_clone}
return json

View File

@ -127,7 +127,7 @@ class VPCSVM(BaseNode):
return {"name": self.name,
"node_id": self.id,
"node_directory": self.working_dir,
"node_directory": self.working_path,
"status": self.status,
"console": self._console,
"console_type": "telnet",

View File

@ -118,6 +118,10 @@ class Controller:
vms.append(vm)
for vm in vms:
# remove deprecated properties
for prop in vm.copy():
if prop in ["enable_remote_console", "use_ubridge"]:
del vm[prop]
vm.setdefault("appliance_id", str(uuid.uuid4()))
appliance = Appliance(vm["appliance_id"], vm)
self._appliances[appliance.id] = appliance

View File

@ -73,13 +73,19 @@ class Appliance:
"""
Appliance data (a hash)
"""
try:
category = ID_TO_CATEGORY[self._data["category"]]
except KeyError:
category = self._data["category"]
return {
"appliance_id": self._id,
"node_type": self._data["node_type"],
"name": self._data["name"],
"default_name_format": self._data.get("default_name_format", "{name}-{0}"),
"category": ID_TO_CATEGORY[self._data["category"]],
"category": category,
"symbol": self._data.get("symbol", ":/symbols/computer.svg"),
"compute_id": self.compute_id,
"builtin": self._builtin
"builtin": self._builtin,
"platform": self._data.get("platform", None)
}

View File

@ -348,6 +348,12 @@ class Compute:
response = yield from self._session().request("GET", url, auth=self._auth, timeout=None)
if response.status == 404:
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(path))
elif response.status == 403:
raise aiohttp.web.HTTPForbidden(text="forbidden to open {} on compute".format(path))
elif response.status != 200:
raise aiohttp.web.HTTPInternalServerError(text="Unexpected error {}: {}: while opening {} on compute".format(response.status,
response.reason,
path))
return StreamResponse(response)
@asyncio.coroutine
@ -471,6 +477,10 @@ class Compute:
host = "127.0.0.1"
return "{}://{}:{}/v2/compute{}".format(self._protocol, host, self._port, path)
def get_url(self, path):
""" Returns URL for specific path at Compute"""
return self._getUrl(path)
@asyncio.coroutine
def _run_http_query(self, method, path, data=None, timeout=20, raw=False):
with Timeout(timeout):
@ -499,7 +509,7 @@ class Compute:
response = yield from self._session().request(method, url, headers=headers, data=data, auth=self._auth, chunked=chunked, timeout=timeout)
except asyncio.TimeoutError as e:
raise ComputeError("Timeout error when connecting to {}".format(url))
except (aiohttp.ClientError, aiohttp.ServerDisconnectedError, ValueError) as e:
except (aiohttp.ClientError, aiohttp.ServerDisconnectedError, ValueError, KeyError) as e:
raise ComputeError(str(e))
body = yield from response.read()
if body and not raw:

View File

@ -272,11 +272,10 @@ class GNS3VM:
if not engine.running:
if self._settings["vmname"] is None:
return
log.info("Start the GNS3 VM")
engine.vmname = self._settings["vmname"]
engine.ram = self._settings["ram"]
engine.vpcus = self._settings["vcpus"]
engine.vcpus = self._settings["vcpus"]
engine.headless = self._settings["headless"]
compute = yield from self._controller.add_compute(compute_id="vm",
name="GNS3 VM is starting ({})".format(engine.vmname),

View File

@ -15,7 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json.decoder
import aiohttp
import logging
import asyncio
@ -176,7 +175,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
log.info('"{}" state is {}'.format(self._vmname, vm_state))
if vm_state == "poweroff":
yield from self.set_vcpus(self.vpcus)
yield from self.set_vcpus(self.vcpus)
yield from self.set_ram(self.ram)
if vm_state in ("poweroff", "saved"):
@ -232,10 +231,11 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
pass
if resp:
try:
json_data = yield from resp.json()
except ValueError:
pass
if resp.status < 300:
try:
json_data = yield from resp.json()
except ValueError:
pass
resp.close()
session.close()

View File

@ -85,6 +85,30 @@ class VMwareGNS3VM(BaseGNS3VM):
except OSError as e:
raise GNS3VMError('Could not read/write VMware VMX file "{}": {}'.format(self._vmx_path, e))
@asyncio.coroutine
def _set_extra_options(self):
try:
"""
Due to bug/change in VMWare 14 we're not able to pass Hardware Virtualization in GNS3VM.
We only enable this when it's not present in current configuration and user hasn't deactivated that.
"""
extra_config = (
("vhv.enable", "TRUE"),
)
pairs = VMware.parse_vmware_file(self._vmx_path)
updated = False
for key, value in extra_config:
if key not in pairs.keys():
pairs[key] = value
updated = True
log.info("GNS3 VM VMX `{}` set to `{}`".format(key, value))
if updated:
VMware.write_vmx_file(self._vmx_path, pairs)
log.info("GNS3 VM VMX has been updated.")
except OSError as e:
raise GNS3VMError('Could not read/write VMware VMX file "{}": {}'.format(self._vmx_path, e))
@asyncio.coroutine
def list(self):
"""
@ -126,6 +150,7 @@ class VMwareGNS3VM(BaseGNS3VM):
log.info("Update GNS3 VM settings")
# set the number of vCPUs and amount of RAM
yield from self._set_vcpus_ram(self.vcpus, self.ram)
yield from self._set_extra_options()
# start the VM
args = [self._vmx_path]

View File

@ -23,6 +23,7 @@ import shutil
import asyncio
import zipfile
import aiohttp
import itertools
from .topology import load_topology
@ -116,8 +117,10 @@ def import_project(controller, project_id, stream, location=None, name=None, kee
if node["node_type"] in ("docker", "qemu", "iou", "nat"):
node["compute_id"] = "vm"
else:
# Round-robin through available compute resources.
compute_nodes = itertools.cycle(controller.computes)
for node in topology["topology"]["nodes"]:
node["compute_id"] = "local"
node["compute_id"] = next(compute_nodes)
compute_created = set()
for node in topology["topology"]["nodes"]:

View File

@ -132,6 +132,13 @@ class Link:
"""
return self._filters
@property
def nodes(self):
"""
Get the current nodes attached to this link
"""
return self._nodes
def get_active_filters(self):
"""
Return the active filters.
@ -290,7 +297,12 @@ class Link:
Dump a pcap file on disk
"""
stream_content = yield from self.read_pcap_from_source()
try:
stream_content = yield from self.read_pcap_from_source()
except aiohttp.web.HTTPException as e:
log.error("Could not stream pcap file: error {}: {}".format(e.status, e.text))
self._capturing = False
self._project.controller.notification.emit("link.updated", self.__json__())
with stream_content as stream:
with open(self.capture_file_path, "wb+") as f:
while self._capturing:

View File

@ -680,8 +680,13 @@ class Project:
@asyncio.coroutine
def delete(self):
if self._status != "opened":
yield from self.open()
try:
yield from self.open()
except aiohttp.web.HTTPConflict as e:
# ignore missing images or other conflicts when deleting a project
log.warning("Conflict while deleting project: {}".format(e.text))
yield from self.delete_on_computes()
yield from self.close()
try:
@ -739,7 +744,28 @@ class Project:
except OSError:
pass
try:
topology = load_topology(path)["topology"]
project_data = load_topology(path)
#load meta of project
keys_to_load = [
"auto_start",
"auto_close",
"auto_open",
"scene_height",
"scene_width",
"zoom",
"show_layers",
"snap_to_grid",
"show_grid",
"show_interface_labels"
]
for key in keys_to_load:
val = project_data.get(key, None)
if val is not None:
setattr(self, key, val)
topology = project_data["topology"]
for compute in topology.get("computes", []):
yield from self.controller.add_compute(**compute)
for node in topology.get("nodes", []):
@ -748,12 +774,22 @@ class Project:
node_id = node.pop("node_id", str(uuid.uuid4()))
yield from self.add_node(compute, name, node_id, dump=False, **node)
for link_data in topology.get("links", []):
if 'link_id' not in link_data.keys():
# skip the link
continue
link = yield from self.add_link(link_id=link_data["link_id"])
if "filters" in link_data:
yield from link.update_filters(link_data["filters"])
for node_link in link_data["nodes"]:
node = self.get_node(node_link["node_id"])
port = node.get_port(node_link["adapter_number"], node_link["port_number"])
if port.link is not None:
# the node port is already attached to another link
continue
yield from link.add_node(node, node_link["adapter_number"], node_link["port_number"], label=node_link.get("label"), dump=False)
if len(link.nodes) != 2:
# a link should have 2 attached nodes, this can happen with corrupted projects
yield from self.delete_link(link.id)
for drawing_data in topology.get("drawings", []):
yield from self.add_drawing(dump=False, **drawing_data)

View File

@ -57,7 +57,7 @@ class CrashReport:
Report crash to a third party service
"""
DSN = "sync+https://a387df116ea64abb9561c6b1df84afd3:2c420d1690e242ada5307fc9e5f01d06@sentry.io/38482"
DSN = "sync+https://6ea2fd77178749dea96d725eb4b1b4d1:307d5441e2d7405fa0bd668042392b02@sentry.io/38482"
if hasattr(sys, "frozen"):
cacert = get_resource("cacert.pem")
if cacert is not None and os.path.isfile(cacert):

View File

@ -16,6 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from aiohttp.web import HTTPConflict
from gns3server.web.route import Route
from gns3server.schemas.node import NODE_CAPTURE_SCHEMA
@ -219,7 +220,13 @@ class CloudHandler:
builtin_manager = Builtin.instance()
node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
nio = node.nios[int(request.match_info["adapter_number"])]
adapter_number = int(request.match_info["adapter_number"])
try:
nio = node.nios[adapter_number]
except KeyError:
raise HTTPConflict(text="NIO `{}` doesn't exist".format(adapter_number))
if "filters" in request.json and nio:
nio.filters = request.json["filters"]
yield from node.update_nio(int(request.match_info["port_number"]), nio)

View File

@ -21,7 +21,8 @@ from gns3server.controller import Controller
from gns3server.schemas.compute import (
COMPUTE_CREATE_SCHEMA,
COMPUTE_OBJECT_SCHEMA,
COMPUTE_UPDATE_SCHEMA
COMPUTE_UPDATE_SCHEMA,
COMPUTE_ENDPOINT_OUTPUT_OBJECT_SCHEMA
)
import logging
@ -93,6 +94,34 @@ class ComputeHandler:
res = yield from compute.images(request.match_info["emulator"])
response.json(res)
@Route.get(
r"/computes/endpoint/{compute_id}/{emulator}/{action:.+}",
parameters={
"compute_id": "Compute UUID"
},
status_codes={
200: "OK",
404: "Instance doesn't exist"
},
raw=True,
output=COMPUTE_ENDPOINT_OUTPUT_OBJECT_SCHEMA,
description="Returns the endpoint for particular `compute` to specific action. "
"WARNING: This is experimental feature and may change anytime. Please don't rely on this endpoint.")
def endpoint(request, response):
controller = Controller.instance()
compute = controller.get_compute(request.match_info["compute_id"])
path = '/{emulator}/{action}'.format(
emulator=request.match_info['emulator'],
action=request.match_info['action'])
endpoint = compute.get_url(path)
response.set_status(200)
response.json(dict(
endpoint=endpoint
))
@Route.get(
r"/computes/{compute_id}/{emulator}/{action:.+}",
parameters={

View File

@ -367,6 +367,7 @@ class NodeHandler:
path = request.match_info["path"]
path = force_unix_path(path)
# Raise error if user try to escape
if path[0] == ".":
raise aiohttp.web.HTTPForbidden
@ -401,7 +402,7 @@ class NodeHandler:
project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"])
node = project.get_node(request.match_info["node_id"])
path = request.match_info["path"]
path = os.path.normpath(path)
path = force_unix_path(path)
# Raise error if user try to escape
if path[0] == ".":

View File

@ -167,6 +167,7 @@ class ProjectHandler:
controller = Controller.instance()
config = Config.instance()
if config.get_section_config("Server").getboolean("local", False) is False:
log.error("Can't load the project the server is not started with --local")
response.set_status(403)
return
project = yield from controller.load_project(request.json.get("path"),)

View File

@ -109,3 +109,17 @@ COMPUTE_OBJECT_SCHEMA = {
"additionalProperties": False,
"required": ["compute_id", "protocol", "host", "port", "name"]
}
COMPUTE_ENDPOINT_OUTPUT_OBJECT_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Output schema for obtaining endpoint on compute",
"type": "object",
"properties": {
"endpoint": {
"description": "URL to endpoint on specific compute and to particular action",
"type": "string"
},
},
"additionalProperties": False,
}

View File

@ -42,7 +42,7 @@ GNS3VM_SETTINGS_SCHEMA = {
"enum": ["vmware", "virtualbox", None]
},
"vcpus": {
"description": "Number of VPCUS affected to the VM",
"description": "Number of vCPUS affected to the VM",
"type": "integer"
},
"ram": {

View File

@ -144,7 +144,7 @@ NODE_OBJECT_SCHEMA = {
},
"console_type": {
"description": "Console type",
"enum": ["vnc", "telnet", "http", "spice", None]
"enum": ["vnc", "telnet", "http", "https", "spice", None]
},
"properties": {
"description": "Properties specific to an emulator",

View File

@ -129,12 +129,12 @@ class Hypervisor(UBridgeHypervisor):
return self._version
@asyncio.coroutine
def _check_ubridge_version(self):
def _check_ubridge_version(self, env=None):
"""
Checks if the ubridge executable version
"""
try:
output = yield from subprocess_check_output(self._path, "-v", cwd=self._working_dir)
output = yield from subprocess_check_output(self._path, "-v", cwd=self._working_dir, env=env)
match = re.search("ubridge version ([0-9a-z\.]+)", output)
if match:
self._version = match.group(1)
@ -151,13 +151,13 @@ class Hypervisor(UBridgeHypervisor):
Starts the uBridge hypervisor process.
"""
yield from self._check_ubridge_version()
env = os.environ.copy()
if sys.platform.startswith("win"):
# add the Npcap directory to $PATH to force uBridge to use npcap DLL instead of Winpcap (if installed)
system_root = os.path.join(os.path.expandvars("%SystemRoot%"), "System32", "Npcap")
if os.path.isdir(system_root):
env["PATH"] = system_root + ';' + env["PATH"]
yield from self._check_ubridge_version(env)
try:
command = self._build_command()
log.info("starting ubridge: {}".format(command))

View File

@ -41,18 +41,23 @@ def wait_run_in_executor(func, *args, **kwargs):
@asyncio.coroutine
def subprocess_check_output(*args, cwd=None, env=None):
def subprocess_check_output(*args, cwd=None, env=None, stderr=False):
"""
Run a command and capture output
:param *args: List of command arguments
:param cwd: Current working directory
:param env: Command environment
:param stderr: Read on stderr
:returns: Command output
"""
proc = yield from asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE, cwd=cwd, env=env)
output = yield from proc.stdout.read()
if stderr:
proc = yield from asyncio.create_subprocess_exec(*args, stderr=asyncio.subprocess.PIPE, cwd=cwd, env=env)
output = yield from proc.stderr.read()
else:
proc = yield from asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE, cwd=cwd, env=env)
output = yield from proc.stdout.read()
if output is None:
return ""
# If we received garbage we ignore invalid characters
@ -60,7 +65,6 @@ def subprocess_check_output(*args, cwd=None, env=None):
# and the code of VPCS, dynamips... Will detect it's not the correct binary
return output.decode("utf-8", errors="ignore")
@asyncio.coroutine
def wait_for_process_termination(process, timeout=10):
"""

View File

@ -19,6 +19,7 @@
import sys
import asyncio
import inspect
import io
from prompt_toolkit import prompt
from prompt_toolkit.history import InMemoryHistory
@ -29,6 +30,7 @@ from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.layout.screen import Size
from prompt_toolkit.shortcuts import create_prompt_application, create_asyncio_eventloop
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from prompt_toolkit.input import StdinInput
from .telnet_server import AsyncioTelnetServer, TelnetConnection
from .input_stream import InputStream
@ -151,6 +153,24 @@ class EmbedShell:
return commands
class PatchedStdinInput(StdinInput):
"""
`prompt_toolkit.input.StdinInput` checks whether stdin is tty or not, we don't need do that.
Fixes issue when PyCharm runs own terminal without emulation.
https://github.com/GNS3/gns3-server/issues/1172
"""
def __init__(self, stdin=None):
self.stdin = stdin or sys.stdin
try:
self.stdin.fileno()
except io.UnsupportedOperation:
if 'idlelib.run' in sys.modules:
raise io.UnsupportedOperation(
'Stdin is not a terminal. Running from Idle is not supported.')
else:
raise io.UnsupportedOperation('Stdin is not a terminal.')
class UnstoppableEventLoop(EventLoop):
"""
Partially fake event loop which cannot be stopped by CommandLineInterface
@ -190,12 +210,18 @@ class ShellConnection(TelnetConnection):
@asyncio.coroutine
def connected(self):
# prompt_toolkit internally checks if it's on windows during output rendering but
# we need to force that we use Vt100_Output not Win32_Output
from prompt_toolkit import renderer
renderer.is_windows = lambda: False
def get_size():
return self._size
self._cli = CommandLineInterface(
application=create_prompt_application(self._shell.prompt),
eventloop=UnstoppableEventLoop(create_asyncio_eventloop(self._loop)),
input=PatchedStdinInput(sys.stdin),
output=Vt100_Output(self, get_size))
self._cb = self._cli.create_eventloop_callbacks()

View File

@ -240,7 +240,9 @@ def interfaces():
# This interface have special behavior
for result in results:
result["special"] = False
for special_interface in ("lo", "vmnet", "vboxnet", "docker", "lxcbr", "virbr", "ovs-system", "veth", "fw", "p2p", "bridge", "vmware", "virtualbox"):
for special_interface in ("lo", "vmnet", "vboxnet", "docker", "lxcbr",
"virbr", "ovs-system", "veth", "fw", "p2p",
"bridge", "vmware", "virtualbox", "gns3"):
if result["name"].lower().startswith(special_interface):
result["special"] = True
for special_interface in ("-nic"):

View File

@ -23,8 +23,10 @@
# or negative for a release candidate or beta (after the base version
# number has been incremented)
__version__ = "2.1.0a2"
__version_info__ = (2, 1, 0, -99)# If it's a git checkout try to add the commit
__version__ = "2.1.0"
__version_info__ = (2, 1, 0, 0)
# If it's a git checkout try to add the commit
if "dev" in __version__:
try:
import os

View File

@ -211,12 +211,12 @@ class Route(object):
response = Response(request=request, route=route)
response.set_status(409)
response.json({"message": str(e), "status": 409, "exception": e.__class__.__name__})
except (ImageMissingError) as e:
except ImageMissingError as e:
log.error("Image missing error detected: {}".format(e.image))
response = Response(request=request, route=route)
response.set_status(409)
response.json({"message": str(e), "status": 409, "image": e.image, "exception": e.__class__.__name__})
except asyncio.futures.CancelledError as e:
except asyncio.futures.CancelledError:
response = Response(request=request, route=route)
response.set_status(408)
response.json({"message": "Request canceled", "status": 408})

View File

@ -36,7 +36,6 @@ from ..compute.port_manager import PortManager
from ..compute.qemu import Qemu
from ..controller import Controller
# do not delete this import
import gns3server.handlers
@ -79,7 +78,7 @@ class WebServer:
try:
srv = self._loop.create_server(handler, self._host, self._port, ssl=ssl_context)
self._server, startup_res = self._loop.run_until_complete(asyncio.gather(srv, self._app.startup(), loop=self._loop))
except (OSError, asyncio.CancelledError) as e:
except (RuntimeError, OSError, asyncio.CancelledError) as e:
log.critical("Could not start the server: {}".format(e))
return False
return True
@ -234,8 +233,11 @@ class WebServer:
ssl_context = self._create_ssl_context(server_config)
self._loop = asyncio.get_event_loop()
# Asyncio will raise error if coroutine is not called
self._loop.set_debug(True)
if log.getEffectiveLevel() == logging.DEBUG:
# On debug version we enable info that
# coroutine is not called in a way await/yield from
self._loop.set_debug(True)
for key, val in os.environ.items():
log.debug("ENV %s=%s", key, val)
@ -249,7 +251,10 @@ class WebServer:
# Default web server for web gui dev
"http://127.0.0.1:8080": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://localhost:8080": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://gns3.github.io": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*")
"http://127.0.0.1:4200": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://localhost:4200": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"http://gns3.github.io": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*"),
"https://gns3.github.io": aiohttp_cors.ResourceOptions(expose_headers="*", allow_headers="*")
})
PortManager.instance().console_host = self._host

View File

@ -6,5 +6,6 @@ Jinja2>=2.7.3
raven>=5.23.0
psutil>=3.0.0
zipstream>=1.1.4
typing>=3.5.3.0 # Otherwise yarl fail with python 3.4
typing>=3.5.3.0 # Otherwise yarl fails with python 3.4
multidict<3.2.0 # Otherwise fails when upgraded to v3.2.0
prompt-toolkit

View File

@ -59,3 +59,23 @@ def test_json_darwin(darwin_platform, project):
}
]
}
def test_json_windows_with_full_name_of_interface(windows_platform, project):
with patch("gns3server.utils.interfaces.interfaces", return_value=[
{"name": "VMware Network Adapter VMnet8", "special": True, "type": "ethernet"}]):
nat = Nat("nat1", str(uuid.uuid4()), project, MagicMock())
assert nat.__json__() == {
"name": "nat1",
"node_id": nat.id,
"project_id": project.id,
"status": "started",
"ports_mapping": [
{
"interface": "VMware Network Adapter VMnet8",
"name": "nat0",
"port_number": 0,
"type": "ethernet"
}
]
}

View File

@ -934,6 +934,7 @@ def test_create_network_interfaces(vm):
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
def test_fix_permission(vm, loop):
vm._volumes = ["/etc"]
vm._get_container_state = AsyncioMagicMock(return_value="running")
process = MagicMock()
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=process) as mock_exec:
loop.run_until_complete(vm._fix_permissions())
@ -941,6 +942,19 @@ def test_fix_permission(vm, loop):
assert process.wait.called
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
def test_fix_permission_not_running(vm, loop):
vm._volumes = ["/etc"]
vm._get_container_state = AsyncioMagicMock(return_value="stopped")
process = MagicMock()
with asyncio_patch("gns3server.compute.docker.Docker.query") as mock_start:
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=process) as mock_exec:
loop.run_until_complete(vm._fix_permissions())
mock_exec.assert_called_with('docker', 'exec', 'e90e34656842', '/gns3/bin/busybox', 'sh', '-c', '(/gns3/bin/busybox find "/etc" -depth -print0 | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c \'%a:%u:%g:%n\' > "/etc/.gns3_perms") && /gns3/bin/busybox chmod -R u+rX "/etc" && /gns3/bin/busybox chown {}:{} -R "/etc"'.format(os.getuid(), os.getgid()))
assert mock_start.called
assert process.wait.called
def test_read_console_output_with_binary_mode(vm, loop):
class InputStreamMock(object):
def __init__(self):

View File

@ -14,6 +14,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import pytest
import aiohttp
@ -137,10 +138,10 @@ def test_update_ubridge_udp_connection(node, async_run):
def test_ubridge_apply_filters(node, async_run):
filters = {
"latency": [10],
"bpf": ["icmp[icmptype] == 8\ntcp src port 53"]
}
filters = OrderedDict((
('latency', [10]),
('bpf', ["icmp[icmptype] == 8\ntcp src port 53"])
))
node._ubridge_send = AsyncioMagicMock()
async_run(node._ubridge_apply_filters("VPCS-10", filters))
node._ubridge_send.assert_any_call("bridge reset_packet_filters VPCS-10")

View File

@ -286,7 +286,8 @@ def test_duplicate_vpcs(async_run, vpcs, project):
destination_node = async_run(vpcs.create_node("PC-2", project.id, destination_node_id, console=2223))
async_run(vpcs.duplicate_node(source_node_id, destination_node_id))
with open(os.path.join(destination_node.working_dir, "startup.vpc")) as f:
assert f.read() == "set pcname PC-2\nip dhcp\n"
startup = f.read().strip()
assert startup == "set pcname PC-2\nip dhcp\n".strip()
def test_duplicate_ethernet_switch(async_run, project):

View File

@ -106,6 +106,16 @@ def test_node_working_directory(tmpdir, node):
assert os.path.exists(p.node_working_directory(node))
def test_node_working_path(tmpdir, node):
directory = Config.instance().get_section_config("Server").get("projects_path")
with patch("gns3server.compute.project.Project.is_local", return_value=True):
p = Project(project_id=str(uuid4()))
assert p.node_working_path(node) == os.path.join(directory, p.id, 'project-files', node.module_name, node.id)
# after this execution directory structure should not be created
assert not os.path.exists(p.node_working_path(node))
def test_project_delete(loop):
project = Project(project_id=str(uuid4()))
directory = project.path

View File

@ -83,7 +83,7 @@ def http_server(request, loop, port_manager, monkeypatch, controller):
for method, route, handler in Route.get_routes():
app.router.add_route(method, route, handler)
host = "localhost"
host = "127.0.0.1"
# We try multiple time. Because on Travis test can fail when because the port is taken by someone else
for i in range(0, 5):
@ -290,7 +290,7 @@ def darwin_platform():
@pytest.yield_fixture
def windows_platform():
"""
Change sys.plaform to Windows
Change sys.platform to Windows
"""
old_platform = sys.platform
sys.platform = "win10"
@ -301,7 +301,7 @@ def windows_platform():
@pytest.yield_fixture
def linux_platform():
"""
Change sys.plaform to Linux
Change sys.platform to Linux
"""
old_platform = sys.platform
sys.platform = "linuxdebian"

View File

@ -0,0 +1,57 @@
#!/usr/bin/env python
#
# Copyright (C) 2017 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from tests.utils import asyncio_patch
from gns3server.controller.gns3vm.vmware_gns3_vm import VMwareGNS3VM
@pytest.fixture
def gns3vm(controller):
vm = VMwareGNS3VM(controller)
vm.vmname = "GNS3 VM"
return vm
@pytest.fixture
def tmx_path(tmpdir):
return str(tmpdir / "vmware.tmx")
def test_set_extra_options(gns3vm, async_run, tmx_path):
gns3vm._vmx_path = tmx_path
# when there is not an entry, we modify it
with open(tmx_path, 'w') as f:
f.write("")
async_run(gns3vm._set_extra_options())
with open(tmx_path, 'r') as f:
assert f.read() == 'vhv.enable = "TRUE"\n'
# when there is an entry, we don't modify it
with open(tmx_path, 'w') as f:
f.write('vhv.enable = "FALSE"\n')
async_run(gns3vm._set_extra_options())
with open(tmx_path, 'r') as f:
assert f.read() == 'vhv.enable = "FALSE"\n'

View File

@ -25,7 +25,8 @@ def test_appliance_json():
"default_name_format": "{name}-{0}",
"category": 0,
"symbol": "qemu.svg",
"server": "local"
"server": "local",
"platform": None
})
assert a.__json__() == {
"appliance_id": a.id,
@ -35,7 +36,54 @@ def test_appliance_json():
"default_name_format": "{name}-{0}",
"category": "router",
"symbol": "qemu.svg",
"compute_id": "local"
"compute_id": "local",
"platform": None
}
def test_appliance_json_with_not_known_category():
a = Appliance(None, {
"node_type": "qemu",
"name": "Test",
"default_name_format": "{name}-{0}",
"category": 'Not known',
"symbol": "qemu.svg",
"server": "local",
"platform": None
})
assert a.__json__() == {
"appliance_id": a.id,
"node_type": "qemu",
"builtin": False,
"name": "Test",
"default_name_format": "{name}-{0}",
"category": "Not known",
"symbol": "qemu.svg",
"compute_id": "local",
"platform": None
}
def test_appliance_json_with_platform():
a = Appliance(None, {
"node_type": "dynamips",
"name": "Test",
"default_name_format": "{name}-{0}",
"category": 0,
"symbol": "dynamips.svg",
"server": "local",
"platform": "c3725"
})
assert a.__json__() == {
"appliance_id": a.id,
"node_type": "dynamips",
"builtin": False,
"name": "Test",
"default_name_format": "{name}-{0}",
"category": "router",
"symbol": "dynamips.svg",
"compute_id": "local",
"platform": "c3725"
}

View File

@ -55,6 +55,13 @@ def test_getUrl(controller):
assert compute._getUrl("/test") == "https://[::1]:84/v2/compute/test"
def test_get_url(controller):
compute = Compute("my_compute_id", protocol="https", host="localhost", port=84, controller=controller)
with patch('gns3server.controller.compute.Compute._getUrl', return_value="returned") as getURL:
assert compute.get_url("/test") == 'returned'
getURL.assert_called_once_with('/test')
def test_host_ip(controller):
compute = Compute("my_compute_id", protocol="https", host="localhost", port=84, controller=controller)
assert compute.host_ip == "127.0.0.1"

View File

@ -176,6 +176,8 @@ def test_import_iou_linux_no_vm(linux_platform, async_run, tmpdir, controller):
"""
project_id = str(uuid.uuid4())
controller._computes["local"] = AsyncioMagicMock()
topology = {
"project_id": str(uuid.uuid4()),
"name": "test",
@ -361,6 +363,8 @@ def test_import_node_id(linux_platform, async_run, tmpdir, controller):
"""
project_id = str(uuid.uuid4())
controller._computes["local"] = AsyncioMagicMock()
topology = {
"project_id": str(uuid.uuid4()),
"name": "test",

View File

@ -425,8 +425,7 @@ def test_open_close(async_run, controller):
def test_open_auto_start(async_run, controller):
project = Project(controller=controller, status="closed", name="Test")
project.auto_start = True
project = Project(controller=controller, status="closed", name="Test", auto_start=True)
project.start_all = AsyncioMagicMock()
async_run(project.open())
assert project.start_all.called

View File

@ -23,6 +23,7 @@ import aiohttp
from tests.utils import asyncio_patch, AsyncioMagicMock
from gns3server.controller.compute import Compute
from gns3server.controller.project import Project
@pytest.fixture
@ -34,6 +35,8 @@ def demo_topology():
"auto_close": True,
"auto_open": False,
"auto_start": False,
"scene_height": 500,
"scene_width": 700,
"name": "demo",
"project_id": "3c1be6f9-b4ba-4737-b209-63c47c23359f",
"revision": 5,
@ -142,7 +145,7 @@ def demo_topology():
}
def test_open(controller, tmpdir, demo_topology, async_run, http_server):
def test_load_project(controller, tmpdir, demo_topology, async_run, http_server):
with open(str(tmpdir / "demo.gns3"), "w+") as f:
json.dump(demo_topology, f)
@ -160,6 +163,45 @@ def test_open(controller, tmpdir, demo_topology, async_run, http_server):
assert len(project.drawings) == 1
assert project.name == "demo"
assert project.scene_height == 500
assert project.scene_width == 700
def test_open(controller, tmpdir, demo_topology, async_run, http_server):
simple_topology = {
"auto_close": True,
"auto_open": False,
"auto_start": False,
"scene_height": 500,
"scene_width": 700,
"name": "demo",
"project_id": "3c1be6f9-b4ba-4737-b209-63c47c23359f",
"revision": 5,
"topology": {
"computes": [],
"drawings": [],
"links": [],
"nodes": []
},
"type": "topology",
"version": "2.0.0"
}
with open(str(tmpdir / "demo.gns3"), "w+") as f:
json.dump(simple_topology, f)
project = Project(
name="demo",
project_id="64ba8408-afbf-4b66-9cdd-1fd854427478",
path=str(tmpdir), controller=controller, filename="demo.gns3", status="closed")
async_run(project.open())
assert project.status == "opened"
assert project.name == "demo"
assert project.scene_height == 500
assert project.scene_width == 700
def test_open_missing_compute(controller, tmpdir, demo_topology, async_run, http_server):

View File

@ -240,4 +240,19 @@ def test_compute_autoidlepc(http_controller, controller):
assert response.status == 200
def test_compute_endpoint(http_controller, controller):
params = {
"compute_id": "my_compute",
"protocol": "http",
"host": "localhost",
"port": 84,
"user": "julien",
"password": "secure"
}
response = http_controller.post("/computes", params)
assert response.status == 201
response = http_controller.get("/computes/endpoint/my_compute/virtualbox/images")
assert response.status == 200
assert response.json['endpoint'] == 'http://localhost:84/v2/compute/virtualbox/images'

View File

@ -258,3 +258,20 @@ def test_post_file(http_controller, tmpdir, project, node, compute):
response = http_controller.get("/projects/{project_id}/nodes/{node_id}/files/../hello".format(project_id=project.id, node_id=node.id), raw=True)
assert response.status == 404
def test_get_and_post_with_nested_paths_normalization(http_controller, tmpdir, project, node, compute):
response = MagicMock()
response.body = b"world"
compute.http_query = AsyncioMagicMock(return_value=response)
response = http_controller.get("/projects/{project_id}/nodes/{node_id}/files/hello\\nested".format(project_id=project.id, node_id=node.id), raw=True)
assert response.status == 200
assert response.body == b'world'
compute.http_query.assert_called_with("GET", "/projects/{project_id}/files/project-files/vpcs/{node_id}/hello/nested".format(project_id=project.id, node_id=node.id), timeout=None, raw=True)
compute.http_query = AsyncioMagicMock()
response = http_controller.post("/projects/{project_id}/nodes/{node_id}/files/hello\\nested".format(project_id=project.id, node_id=node.id), body=b"hello", raw=True)
assert response.status == 201
compute.http_query.assert_called_with("POST", "/projects/{project_id}/files/project-files/vpcs/{node_id}/hello/nested".format(project_id=project.id, node_id=node.id), data=b'hello', timeout=None, raw=True)