mirror of
https://github.com/GNS3/gns3-server.git
synced 2024-12-23 14:42:28 +00:00
Merge pull request #1125 from GNS3/docker_filters
Filter support for Docker
This commit is contained in:
commit
3487b657a7
@ -373,7 +373,7 @@ class BaseManager:
|
|||||||
sock.connect(sa)
|
sock.connect(sa)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise aiohttp.web.HTTPInternalServerError(text="Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
|
raise aiohttp.web.HTTPInternalServerError(text="Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
|
||||||
filters = nio_settings.get("filters", [])
|
filters = nio_settings.get("filters", {})
|
||||||
nio = NIOUDP(lport, rhost, rport, filters)
|
nio = NIOUDP(lport, rhost, rport, filters)
|
||||||
elif nio_settings["type"] == "nio_tap":
|
elif nio_settings["type"] == "nio_tap":
|
||||||
tap_device = nio_settings["tap_device"]
|
tap_device = nio_settings["tap_device"]
|
||||||
|
@ -1,248 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
|
||||||
#
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Docker server module.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import json
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import aiohttp
|
|
||||||
from gns3server.utils import parse_version
|
|
||||||
from gns3server.utils.asyncio import locked_coroutine
|
|
||||||
from gns3server.compute.base_manager import BaseManager
|
|
||||||
from gns3server.compute.docker.docker_vm import DockerVM
|
|
||||||
from gns3server.compute.docker.docker_error import DockerError, DockerHttp304Error, DockerHttp404Error
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
# Be carefull to keep it consistent
|
|
||||||
DOCKER_MINIMUM_API_VERSION = "1.25"
|
|
||||||
DOCKER_MINIMUM_VERSION = "1.13"
|
|
||||||
DOCKER_PREFERRED_API_VERSION = "1.30"
|
|
||||||
|
|
||||||
|
|
||||||
class Docker(BaseManager):
|
|
||||||
|
|
||||||
_NODE_CLASS = DockerVM
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self._server_url = '/var/run/docker.sock'
|
|
||||||
self._connected = False
|
|
||||||
# Allow locking during ubridge operations
|
|
||||||
self.ubridge_lock = asyncio.Lock()
|
|
||||||
self._connector = None
|
|
||||||
self._session = None
|
|
||||||
self._api_version = DOCKER_MINIMUM_API_VERSION
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
|
||||||
def _check_connection(self):
|
|
||||||
if not self._connected:
|
|
||||||
try:
|
|
||||||
self._connected = True
|
|
||||||
connector = self.connector()
|
|
||||||
version = yield from self.query("GET", "version")
|
|
||||||
except (aiohttp.ClientOSError, FileNotFoundError):
|
|
||||||
self._connected = False
|
|
||||||
raise DockerError("Can't connect to docker daemon")
|
|
||||||
|
|
||||||
docker_version = parse_version(version['ApiVersion'])
|
|
||||||
|
|
||||||
if docker_version < parse_version(DOCKER_MINIMUM_API_VERSION):
|
|
||||||
raise DockerError(
|
|
||||||
"Docker version is {}. GNS3 requires a minimum version of {}".format(
|
|
||||||
version["Version"], DOCKER_MINIMUM_VERSION))
|
|
||||||
|
|
||||||
preferred_api_version = parse_version(DOCKER_PREFERRED_API_VERSION)
|
|
||||||
if docker_version >= preferred_api_version:
|
|
||||||
self._api_version = DOCKER_PREFERRED_API_VERSION
|
|
||||||
|
|
||||||
def connector(self):
|
|
||||||
if self._connector is None or self._connector.closed:
|
|
||||||
if not sys.platform.startswith("linux"):
|
|
||||||
raise DockerError("Docker is supported only on Linux")
|
|
||||||
try:
|
|
||||||
self._connector = aiohttp.connector.UnixConnector(self._server_url, limit=None)
|
|
||||||
except (aiohttp.ClientOSError, FileNotFoundError):
|
|
||||||
raise DockerError("Can't connect to docker daemon")
|
|
||||||
return self._connector
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
|
||||||
def unload(self):
|
|
||||||
yield from super().unload()
|
|
||||||
if self._connected:
|
|
||||||
if self._connector and not self._connector.closed:
|
|
||||||
self._connector.close()
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
|
||||||
def query(self, method, path, data={}, params={}):
|
|
||||||
"""
|
|
||||||
Make a query to the docker daemon and decode the request
|
|
||||||
|
|
||||||
:param method: HTTP method
|
|
||||||
:param path: Endpoint in API
|
|
||||||
:param data: Dictionnary with the body. Will be transformed to a JSON
|
|
||||||
:param params: Parameters added as a query arg
|
|
||||||
"""
|
|
||||||
|
|
||||||
response = yield from self.http_query(method, path, data=data, params=params)
|
|
||||||
body = yield from response.read()
|
|
||||||
if body and len(body):
|
|
||||||
if response.headers['CONTENT-TYPE'] == 'application/json':
|
|
||||||
body = json.loads(body.decode("utf-8"))
|
|
||||||
else:
|
|
||||||
body = body.decode("utf-8")
|
|
||||||
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
|
|
||||||
return body
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
|
||||||
def http_query(self, method, path, data={}, params={}, timeout=300):
|
|
||||||
"""
|
|
||||||
Make a query to the docker daemon
|
|
||||||
|
|
||||||
:param method: HTTP method
|
|
||||||
:param path: Endpoint in API
|
|
||||||
:param data: Dictionnary with the body. Will be transformed to a JSON
|
|
||||||
:param params: Parameters added as a query arg
|
|
||||||
:param timeout: Timeout
|
|
||||||
:returns: HTTP response
|
|
||||||
"""
|
|
||||||
data = json.dumps(data)
|
|
||||||
if timeout is None:
|
|
||||||
timeout = 60 * 60 * 24 * 31 # One month timeout
|
|
||||||
|
|
||||||
if path == 'version':
|
|
||||||
url = "http://docker/v1.12/" + path # API of docker v1.0
|
|
||||||
else:
|
|
||||||
url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path
|
|
||||||
try:
|
|
||||||
if path != "version": # version is use by check connection
|
|
||||||
yield from self._check_connection()
|
|
||||||
if self._session is None or self._session.closed:
|
|
||||||
connector = self.connector()
|
|
||||||
self._session = aiohttp.ClientSession(connector=connector)
|
|
||||||
response = yield from self._session.request(
|
|
||||||
method,
|
|
||||||
url,
|
|
||||||
params=params,
|
|
||||||
data=data,
|
|
||||||
headers={"content-type": "application/json", },
|
|
||||||
timeout=timeout
|
|
||||||
)
|
|
||||||
except (aiohttp.ClientResponseError, aiohttp.ClientOSError) as e:
|
|
||||||
raise DockerError("Docker has returned an error: {}".format(str(e)))
|
|
||||||
except (asyncio.TimeoutError):
|
|
||||||
raise DockerError("Docker timeout " + method + " " + path)
|
|
||||||
if response.status >= 300:
|
|
||||||
body = yield from response.read()
|
|
||||||
try:
|
|
||||||
body = json.loads(body.decode("utf-8"))["message"]
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
|
|
||||||
if response.status == 304:
|
|
||||||
raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body))
|
|
||||||
elif response.status == 404:
|
|
||||||
raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body))
|
|
||||||
else:
|
|
||||||
raise DockerError("Docker has returned an error: {} {}".format(response.status, body))
|
|
||||||
return response
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
|
||||||
def websocket_query(self, path, params={}):
|
|
||||||
"""
|
|
||||||
Open a websocket connection
|
|
||||||
|
|
||||||
:param path: Endpoint in API
|
|
||||||
:param params: Parameters added as a query arg
|
|
||||||
:returns: Websocket
|
|
||||||
"""
|
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path
|
|
||||||
connection = yield from self._session.ws_connect(url,
|
|
||||||
origin="http://docker",
|
|
||||||
autoping=True)
|
|
||||||
=======
|
|
||||||
url = "http://docker/v" + self._api_version + "/" + path
|
|
||||||
connection = yield from aiohttp.ws_connect(url,
|
|
||||||
connector=self.connector(),
|
|
||||||
origin="http://docker",
|
|
||||||
autoping=True)
|
|
||||||
>>>>>>> master
|
|
||||||
return connection
|
|
||||||
|
|
||||||
@locked_coroutine
|
|
||||||
def pull_image(self, image, progress_callback=None):
|
|
||||||
"""
|
|
||||||
Pull image from docker repository
|
|
||||||
|
|
||||||
:params image: Image name
|
|
||||||
:params progress_callback: A function that receive a log message about image download progress
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
yield from self.query("GET", "images/{}/json".format(image))
|
|
||||||
return # We already have the image skip the download
|
|
||||||
except DockerHttp404Error:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if progress_callback:
|
|
||||||
progress_callback("Pull {} from docker hub".format(image))
|
|
||||||
response = yield from self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
|
|
||||||
# The pull api will stream status via an HTTP JSON stream
|
|
||||||
content = ""
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
chunk = yield from response.content.read(1024)
|
|
||||||
except aiohttp.ServerDisconnectedError:
|
|
||||||
break
|
|
||||||
if not chunk:
|
|
||||||
break
|
|
||||||
content += chunk.decode("utf-8")
|
|
||||||
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
content = content.lstrip(" \r\n\t")
|
|
||||||
answer, index = json.JSONDecoder().raw_decode(content)
|
|
||||||
if "progress" in answer and progress_callback:
|
|
||||||
progress_callback("Pulling image {}:{}: {}".format(image, answer["id"], answer["progress"]))
|
|
||||||
content = content[index:]
|
|
||||||
except ValueError: # Partial JSON
|
|
||||||
pass
|
|
||||||
response.close()
|
|
||||||
if progress_callback:
|
|
||||||
progress_callback("Success pulling image {}".format(image))
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
|
||||||
def list_images(self):
|
|
||||||
"""Gets Docker image list.
|
|
||||||
|
|
||||||
:returns: list of dicts
|
|
||||||
:rtype: list
|
|
||||||
"""
|
|
||||||
images = []
|
|
||||||
for image in (yield from self.query("GET", "images/json", params={"all": 0})):
|
|
||||||
if image['RepoTags']:
|
|
||||||
for tag in image['RepoTags']:
|
|
||||||
if tag != "<none>:<none>":
|
|
||||||
images.append({'image': tag})
|
|
||||||
return sorted(images, key=lambda i: i['image'])
|
|
@ -85,6 +85,8 @@ class DockerVM(BaseNode):
|
|||||||
self._console_http_port = console_http_port
|
self._console_http_port = console_http_port
|
||||||
self._console_websocket = None
|
self._console_websocket = None
|
||||||
self._volumes = []
|
self._volumes = []
|
||||||
|
# Keep a list of created bridge
|
||||||
|
self._bridges = set()
|
||||||
|
|
||||||
if adapters is None:
|
if adapters is None:
|
||||||
self.adapters = 1
|
self.adapters = 1
|
||||||
@ -127,6 +129,10 @@ class DockerVM(BaseNode):
|
|||||||
return display
|
return display
|
||||||
display += 1
|
display += 1
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ethernet_adapters(self):
|
||||||
|
return self._ethernet_adapters
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def start_command(self):
|
def start_command(self):
|
||||||
return self._start_command
|
return self._start_command
|
||||||
@ -657,8 +663,9 @@ class DockerVM(BaseNode):
|
|||||||
if adapter.host_ifc is None:
|
if adapter.host_ifc is None:
|
||||||
raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name,
|
raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name,
|
||||||
adapter_number=adapter_number))
|
adapter_number=adapter_number))
|
||||||
|
bridge_name = 'bridge{}'.format(adapter_number)
|
||||||
yield from self._ubridge_send('bridge create bridge{}'.format(adapter_number))
|
yield from self._ubridge_send('bridge create {}'.format(bridge_name))
|
||||||
|
self._bridges.add(bridge_name)
|
||||||
yield from self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number,
|
yield from self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number,
|
||||||
hostif=adapter.host_ifc))
|
hostif=adapter.host_ifc))
|
||||||
log.debug("Move container %s adapter %s to namespace %s", self.name, adapter.host_ifc, self._namespace)
|
log.debug("Move container %s adapter %s to namespace %s", self.name, adapter.host_ifc, self._namespace)
|
||||||
@ -679,20 +686,23 @@ class DockerVM(BaseNode):
|
|||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _connect_nio(self, adapter_number, nio):
|
def _connect_nio(self, adapter_number, nio):
|
||||||
yield from self._ubridge_send('bridge add_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number,
|
bridge_name = 'bridge{}'.format(adapter_number)
|
||||||
lport=nio.lport,
|
yield from self._ubridge_send('bridge add_nio_udp {bridge_name} {lport} {rhost} {rport}'.format(bridge_name=bridge_name,
|
||||||
rhost=nio.rhost,
|
lport=nio.lport,
|
||||||
rport=nio.rport))
|
rhost=nio.rhost,
|
||||||
|
rport=nio.rport))
|
||||||
|
|
||||||
if nio.capturing:
|
if nio.capturing:
|
||||||
yield from self._ubridge_send('bridge start_capture bridge{adapter} "{pcap_file}"'.format(adapter=adapter_number,
|
yield from self._ubridge_send('bridge start_capture {bridge_name} "{pcap_file}"'.format(bridge_name=bridge_name,
|
||||||
pcap_file=nio.pcap_output_file))
|
pcap_file=nio.pcap_output_file))
|
||||||
yield from self._ubridge_send('bridge start bridge{adapter}'.format(adapter=adapter_number))
|
yield from self._ubridge_send('bridge start {bridge_name}'.format(bridge_name=bridge_name))
|
||||||
|
yield from self._ubridge_apply_filters(bridge_name, nio.filters)
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def adapter_add_nio_binding(self, adapter_number, nio):
|
def adapter_add_nio_binding(self, adapter_number, nio):
|
||||||
"""Adds an adapter NIO binding.
|
"""Adds an adapter NIO binding.
|
||||||
|
|
||||||
|
|
||||||
:param adapter_number: adapter number
|
:param adapter_number: adapter number
|
||||||
:param nio: NIO instance to add to the slot/port
|
:param nio: NIO instance to add to the slot/port
|
||||||
"""
|
"""
|
||||||
@ -711,6 +721,19 @@ class DockerVM(BaseNode):
|
|||||||
nio=nio,
|
nio=nio,
|
||||||
adapter_number=adapter_number))
|
adapter_number=adapter_number))
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def adapter_update_nio_binding(self, adapter_number, nio):
|
||||||
|
"""
|
||||||
|
Update a port NIO binding.
|
||||||
|
|
||||||
|
:param adapter_number: adapter number
|
||||||
|
:param nio: NIO instance to add to the adapter
|
||||||
|
"""
|
||||||
|
|
||||||
|
bridge_name = 'bridge{}'.format(adapter_number)
|
||||||
|
if bridge_name in self._bridges:
|
||||||
|
yield from self._ubridge_apply_filters(bridge_name, nio.filters)
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def adapter_remove_nio_binding(self, adapter_number):
|
def adapter_remove_nio_binding(self, adapter_number):
|
||||||
"""
|
"""
|
||||||
@ -728,7 +751,8 @@ class DockerVM(BaseNode):
|
|||||||
|
|
||||||
if self.ubridge:
|
if self.ubridge:
|
||||||
nio = adapter.get_nio(0)
|
nio = adapter.get_nio(0)
|
||||||
yield from self._ubridge_send("bridge stop bridge{name}".format(name=adapter_number))
|
bridge_name = 'bridge{}'.format(adapter_number)
|
||||||
|
yield from self._ubridge_send("bridge stop {}".format(bridge_name))
|
||||||
yield from self._ubridge_send('bridge remove_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number,
|
yield from self._ubridge_send('bridge remove_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number,
|
||||||
lport=nio.lport,
|
lport=nio.lport,
|
||||||
rhost=nio.rhost,
|
rhost=nio.rhost,
|
||||||
|
@ -365,7 +365,7 @@ class Link:
|
|||||||
:returns: None if no node support filtering else the node
|
:returns: None if no node support filtering else the node
|
||||||
"""
|
"""
|
||||||
for node in self._nodes:
|
for node in self._nodes:
|
||||||
if node["node"].node_type in ('vpcs', 'dynamips', 'qemu'):
|
if node["node"].node_type in ('vpcs', 'dynamips', 'qemu', 'docker'):
|
||||||
return node["node"]
|
return node["node"]
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -204,6 +204,33 @@ class DockerHandler:
|
|||||||
response.set_status(201)
|
response.set_status(201)
|
||||||
response.json(nio)
|
response.json(nio)
|
||||||
|
|
||||||
|
@Route.put(
|
||||||
|
r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||||
|
parameters={
|
||||||
|
"project_id": "Project UUID",
|
||||||
|
"node_id": "Node UUID",
|
||||||
|
"adapter_number": "Network adapter where the nio is located",
|
||||||
|
"port_number": "Port from where the nio should be updated"
|
||||||
|
},
|
||||||
|
status_codes={
|
||||||
|
201: "NIO updated",
|
||||||
|
400: "Invalid request",
|
||||||
|
404: "Instance doesn't exist"
|
||||||
|
},
|
||||||
|
input=NIO_SCHEMA,
|
||||||
|
output=NIO_SCHEMA,
|
||||||
|
description="Update a NIO from a Docker instance")
|
||||||
|
def update_nio(request, response):
|
||||||
|
|
||||||
|
docker_manager = Docker.instance()
|
||||||
|
container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||||
|
nio = container.ethernet_adapters[int(request.match_info["adapter_number"])].get_nio(0)
|
||||||
|
if "filters" in request.json and nio:
|
||||||
|
nio.filters = request.json["filters"]
|
||||||
|
yield from container.adapter_update_nio_binding(int(request.match_info["port_number"]), nio)
|
||||||
|
response.set_status(201)
|
||||||
|
response.json(request.json)
|
||||||
|
|
||||||
@Route.delete(
|
@Route.delete(
|
||||||
r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
|
||||||
parameters={
|
parameters={
|
||||||
|
@ -704,6 +704,7 @@ def test_add_ubridge_connection(loop, vm):
|
|||||||
call.send('bridge start_capture bridge0 "/tmp/capture.pcap"'),
|
call.send('bridge start_capture bridge0 "/tmp/capture.pcap"'),
|
||||||
call.send('bridge start bridge0')
|
call.send('bridge start bridge0')
|
||||||
]
|
]
|
||||||
|
assert 'bridge0' in vm._bridges
|
||||||
# We need to check any_order ortherwise mock is confused by asyncio
|
# We need to check any_order ortherwise mock is confused by asyncio
|
||||||
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
||||||
|
|
||||||
@ -722,6 +723,7 @@ def test_add_ubridge_connection_none_nio(loop, vm):
|
|||||||
call.send('docker move_to_ns tap-gns3-e0 42 eth0'),
|
call.send('docker move_to_ns tap-gns3-e0 42 eth0'),
|
||||||
|
|
||||||
]
|
]
|
||||||
|
assert 'bridge0' in vm._bridges
|
||||||
# We need to check any_order ortherwise mock is confused by asyncio
|
# We need to check any_order ortherwise mock is confused by asyncio
|
||||||
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
|
||||||
|
|
||||||
@ -763,6 +765,35 @@ def test_adapter_add_nio_binding(vm, loop):
|
|||||||
assert vm._ethernet_adapters[0].get_nio(0) == nio
|
assert vm._ethernet_adapters[0].get_nio(0) == nio
|
||||||
|
|
||||||
|
|
||||||
|
def test_adapter_udpate_nio_binding(vm, loop):
|
||||||
|
vm._ubridge_apply_filters = AsyncioMagicMock()
|
||||||
|
vm._bridges = set(('bridge0', ))
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(nio)
|
||||||
|
with asyncio_patch("gns3server.compute.docker.DockerVM._get_container_state", return_value="running"):
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
|
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_update_nio_binding(0, nio)))
|
||||||
|
assert vm._ubridge_apply_filters.called
|
||||||
|
|
||||||
|
|
||||||
|
def test_adapter_udpate_nio_binding_bridge_not_started(vm, loop):
|
||||||
|
vm._ubridge_apply_filters = AsyncioMagicMock()
|
||||||
|
nio = {"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"}
|
||||||
|
nio = vm.manager.create_nio(nio)
|
||||||
|
with asyncio_patch("gns3server.compute.docker.DockerVM._get_container_state", return_value="running"):
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
|
|
||||||
|
loop.run_until_complete(asyncio.async(vm.adapter_update_nio_binding(0, nio)))
|
||||||
|
assert vm._ubridge_apply_filters.called is False
|
||||||
|
|
||||||
|
|
||||||
def test_adapter_add_nio_binding_invalid_adapter(vm, loop):
|
def test_adapter_add_nio_binding_invalid_adapter(vm, loop):
|
||||||
nio = {"type": "nio_udp",
|
nio = {"type": "nio_udp",
|
||||||
"lport": 4242,
|
"lport": 4242,
|
||||||
|
@ -124,6 +124,20 @@ def test_docker_nio_create_udp(http_compute, vm):
|
|||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
|
def test_docker_update_nio(http_compute, vm):
|
||||||
|
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.adapter_update_nio_binding") as mock:
|
||||||
|
response = http_compute.put("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]),
|
||||||
|
{
|
||||||
|
"type": "nio_udp",
|
||||||
|
"lport": 4242,
|
||||||
|
"rport": 4343,
|
||||||
|
"rhost": "127.0.0.1"
|
||||||
|
},
|
||||||
|
example=True)
|
||||||
|
assert response.status == 201, response.body.decode()
|
||||||
|
assert response.route == "/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_docker_delete_nio(http_compute, vm):
|
def test_docker_delete_nio(http_compute, vm):
|
||||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.adapter_remove_nio_binding") as mock:
|
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.adapter_remove_nio_binding") as mock:
|
||||||
response = http_compute.delete("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
response = http_compute.delete("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||||
|
Loading…
Reference in New Issue
Block a user