Support multiple client connected to the same VPCS console

Ref #770
This commit is contained in:
Julien Duponchelle 2016-11-08 19:44:12 +01:00
parent 3754a49cc0
commit db8296f548
No known key found for this signature in database
GPG Key ID: CE8B29639E07F5E8
3 changed files with 105 additions and 79 deletions

View File

@ -27,6 +27,7 @@ import platform
from ..compute.port_manager import PortManager
from ..utils.asyncio import wait_run_in_executor
from ..utils.asyncio.telnet_server import AsyncioTelnetServer
from ..ubridge.hypervisor import Hypervisor
from ..ubridge.ubridge_error import UbridgeError
from .nios.nio_udp import NIOUDP
@ -50,9 +51,10 @@ class BaseNode:
:param aux: TCP aux console port
:param allocate_aux: Boolean if true will allocate an aux console port
:param linked_clone: The node base image is duplicate/overlay (Each node data are independent)
:param wrap_console: The console is wrapped using AsyncioTelnetServer
"""
def __init__(self, name, node_id, project, manager, console=None, console_type="telnet", aux=None, allocate_aux=False, linked_clone=True):
def __init__(self, name, node_id, project, manager, console=None, console_type="telnet", aux=None, allocate_aux=False, linked_clone=True, wrap_console=False):
self._name = name
self._usage = ""
@ -70,6 +72,8 @@ class BaseNode:
self._node_status = "stopped"
self._command_line = ""
self._allocate_aux = allocate_aux
self._wrap_console = wrap_console
self._wrapper_telnet_server = None
# check if the node will use uBridge or not
server_config = Config.instance().get_section_config("Server")
@ -92,6 +96,12 @@ class BaseNode:
else:
self._console = self._manager.port_manager.get_free_tcp_port(self._project)
if self._wrap_console:
if console_type == "vnc":
self._wrap_console = False # We don't support multiple client connected to the same VNC
else:
self._internal_console_port = self._manager.port_manager.get_free_tcp_port(self._project)
if self._aux is None and allocate_aux:
self._aux = self._manager.port_manager.get_free_tcp_port(self._project)
@ -269,12 +279,15 @@ class BaseNode:
raise NotImplementedError
@asyncio.coroutine
def stop(self):
"""
Starts the node process.
Stop the node process.
"""
raise NotImplementedError
if self._wrapper_telnet_server:
self._wrapper_telnet_server.close()
yield from self._wrapper_telnet_server.wait_closed()
self.status = "stopped"
def suspend(self):
"""
@ -299,6 +312,9 @@ class BaseNode:
if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None
if self._wrap_console:
self._manager.port_manager.release_tcp_port(self._internal_console_port, self._project)
self._internal_console_port = None
if self._aux:
self._manager.port_manager.release_tcp_port(self._aux, self._project)
@ -307,6 +323,18 @@ class BaseNode:
self._closed = True
return True
@asyncio.coroutine
def start_wrap_console(self):
"""
Start a telnet proxy for the console allowing multiple client
connected at the same time
"""
if not self._wrap_console:
return
(reader, writer) = yield from asyncio.open_connection(host="127.0.0.1", port=self._internal_console_port)
server = AsyncioTelnetServer(reader=reader, writer=writer, binary=True, echo=True)
self._wrapper_telnet_server = yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console)
@property
def allocate_aux(self):
"""

View File

@ -61,7 +61,7 @@ class VPCSVM(BaseNode):
def __init__(self, name, node_id, project, manager, console=None, startup_script=None):
super().__init__(name, node_id, project, manager, console=console)
super().__init__(name, node_id, project, manager, console=console, wrap_console=True)
self._process = None
self._vpcs_stdout_file = ""
self._vpcs_version = None
@ -263,6 +263,8 @@ class VPCSVM(BaseNode):
if nio:
yield from self._add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio)
yield from self.start_wrap_console()
log.info("VPCS instance {} started PID={}".format(self.name, self._process.pid))
self._started = True
self.status = "started"
@ -308,7 +310,7 @@ class VPCSVM(BaseNode):
self._process = None
self._started = False
self.status = "stopped"
yield from super().stop()
@asyncio.coroutine
def reload(self):
@ -513,7 +515,7 @@ class VPCSVM(BaseNode):
"""
command = [self.vpcs_path]
command.extend(["-p", str(self._console)]) # listen to console port
command.extend(["-p", str(self._internal_console_port)]) # listen to console port
command.extend(["-m", str(self._manager.get_mac_id(self.id))]) # the unique ID is used to set the MAC address offset
command.extend(["-i", "1"]) # option to start only one VPC instance
command.extend(["-F"]) # option to avoid the daemonization of VPCS

View File

@ -93,23 +93,24 @@ def test_start(loop, vm, async_run):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process) as mock_exec:
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
async_run(vm.port_add_nio_binding(0, nio))
loop.run_until_complete(asyncio.async(vm.start()))
assert mock_exec.call_args[0] == (vm.vpcs_path,
'-p',
str(vm.console),
'-m', '1',
'-i',
'1',
'-F',
'-R',
'-s',
ANY,
'-c',
ANY,
'-t',
'127.0.0.1')
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
async_run(vm.port_add_nio_binding(0, nio))
loop.run_until_complete(asyncio.async(vm.start()))
assert mock_exec.call_args[0] == (vm.vpcs_path,
'-p',
str(vm._internal_console_port),
'-m', '1',
'-i',
'1',
'-F',
'-R',
'-s',
ANY,
'-c',
ANY,
'-t',
'127.0.0.1')
assert vm.is_running()
assert vm.command_line == ' '.join(mock_exec.call_args[0])
(action, event, kwargs) = async_run(queue.get(0))
@ -127,24 +128,25 @@ def test_start_0_6_1(loop, vm, async_run):
vm._vpcs_version = parse_version("0.6.1")
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process) as mock_exec:
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
async_run(vm.port_add_nio_binding(0, nio))
async_run(vm.start())
assert mock_exec.call_args[0] == (vm.vpcs_path,
'-p',
str(vm.console),
'-m', '1',
'-i',
'1',
'-F',
'-s',
ANY,
'-c',
ANY,
'-t',
'127.0.0.1')
assert vm.is_running()
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process) as mock_exec:
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
async_run(vm.port_add_nio_binding(0, nio))
async_run(vm.start())
assert mock_exec.call_args[0] == (vm.vpcs_path,
'-p',
str(vm._internal_console_port),
'-m', '1',
'-i',
'1',
'-F',
'-s',
ANY,
'-c',
ANY,
'-t',
'127.0.0.1')
assert vm.is_running()
def test_stop(loop, vm, async_run):
@ -158,28 +160,29 @@ def test_stop(loop, vm, async_run):
with NotificationManager.instance().queue() as queue:
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
async_run(vm.port_add_nio_binding(0, nio))
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
async_run(vm.port_add_nio_binding(0, nio))
async_run(vm.start())
assert vm.is_running()
async_run(vm.start())
assert vm.is_running()
with asyncio_patch("gns3server.utils.asyncio.wait_for_process_termination"):
loop.run_until_complete(asyncio.async(vm.stop()))
assert vm.is_running() is False
with asyncio_patch("gns3server.utils.asyncio.wait_for_process_termination"):
loop.run_until_complete(asyncio.async(vm.stop()))
assert vm.is_running() is False
if sys.platform.startswith("win"):
process.send_signal.assert_called_with(1)
else:
process.terminate.assert_called_with()
if sys.platform.startswith("win"):
process.send_signal.assert_called_with(1)
else:
process.terminate.assert_called_with()
async_run(queue.get(0)) #  Ping
async_run(queue.get(0)) #  Started
async_run(queue.get(0)) #  Ping
async_run(queue.get(0)) #  Started
(action, event, kwargs) = async_run(queue.get(0))
assert action == "node.updated"
assert event == vm
(action, event, kwargs) = async_run(queue.get(0))
assert action == "node.updated"
assert event == vm
def test_reload(loop, vm, async_run):
@ -192,20 +195,21 @@ def test_reload(loop, vm, async_run):
process.returncode = None
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
async_run(vm.port_add_nio_binding(0, nio))
async_run(vm.start())
assert vm.is_running()
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
async_run(vm.port_add_nio_binding(0, nio))
async_run(vm.start())
assert vm.is_running()
with asyncio_patch("gns3server.utils.asyncio.wait_for_process_termination"):
async_run(vm.reload())
assert vm.is_running() is True
with asyncio_patch("gns3server.utils.asyncio.wait_for_process_termination"):
async_run(vm.reload())
assert vm.is_running() is True
if sys.platform.startswith("win"):
process.send_signal.assert_called_with(1)
else:
process.terminate.assert_called_with()
if sys.platform.startswith("win"):
process.send_signal.assert_called_with(1)
else:
process.terminate.assert_called_with()
def test_add_nio_binding_udp(vm, async_run):
@ -222,14 +226,6 @@ def test_add_nio_binding_tap(vm, ethernet_device):
assert nio.tap_device == ethernet_device
# def test_add_nio_binding_tap_no_privileged_access(vm):
# with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=False):
# with pytest.raises(aiohttp.web.HTTPForbidden):
# nio = VPCS.instance().create_nio({"type": "nio_tap", "tap_device": "test"})
# vm.port_add_nio_binding(0, nio)
# assert vm._ethernet_adapter.ports[0] is None
#
def test_port_remove_nio_binding(vm):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
vm.port_add_nio_binding(0, nio)