Remove use_ubridge flag

Ref #1115
This commit is contained in:
Julien Duponchelle 2017-07-11 13:42:47 +02:00
parent fcd7dfbde6
commit d112da400e
No known key found for this signature in database
GPG Key ID: CE8B29639E07F5E8
11 changed files with 127 additions and 204 deletions

View File

@ -77,10 +77,6 @@ class BaseNode:
self._wrap_console = wrap_console
self._wrapper_telnet_server = None
# check if the node will use uBridge or not
server_config = Config.instance().get_section_config("Server")
self._use_ubridge = server_config.getboolean("use_ubridge")
if self._console is not None:
if console_type == "vnc":
self._console = self._manager.port_manager.reserve_tcp_port(self._console, self._project, port_range_start=5900, port_range_end=6000)
@ -460,16 +456,6 @@ class BaseNode:
id=self.id,
console_type=console_type))
@property
def use_ubridge(self):
"""
Returns if uBridge is used for this node or not
:returns: boolean
"""
return self._use_ubridge
@property
def ubridge(self):
"""
@ -592,6 +578,7 @@ class BaseNode:
@asyncio.coroutine
def _update_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio):
if destination_nio:
yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters)
@asyncio.coroutine

View File

@ -907,7 +907,6 @@ class QemuVM(BaseNode):
stderr=subprocess.STDOUT,
cwd=self.working_dir)
if self.use_ubridge:
yield from self._start_ubridge()
for adapter_number, adapter in enumerate(self._ethernet_adapters):
nio = adapter.get_nio(0)
@ -1131,7 +1130,8 @@ class QemuVM(BaseNode):
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
adapter_number=adapter_number))
if self.ubridge:
if adapter_number not in self._local_udp_tunnels:
self._local_udp_tunnels[adapter_number] = self._create_local_udp_tunnel()
try:
yield from self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
self._local_udp_tunnels[adapter_number][1],
@ -1139,8 +1139,6 @@ class QemuVM(BaseNode):
except IndexError:
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
adapter_number=adapter_number))
elif self.is_running():
raise QemuError("Sorry, adding a link to a started Qemu VM is not supported without using uBridge.")
adapter.add_nio(0, nio)
log.info('QEMU VM "{name}" [{id}]: {nio} added to adapter {adapter_number}'.format(name=self._name,
@ -1164,10 +1162,7 @@ class QemuVM(BaseNode):
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
adapter_number=adapter_number))
if self.ubridge:
yield from self._ubridge_send("bridge delete {name}".format(name="QEMU-{}-{}".format(self._id, adapter_number)))
elif self.is_running():
raise QemuError("Sorry, removing a link to a started Qemu VM is not supported without using uBridge.")
nio = adapter.get_nio(0)
if isinstance(nio, NIOUDP):
@ -1195,9 +1190,6 @@ class QemuVM(BaseNode):
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
adapter_number=adapter_number))
if not self.use_ubridge:
raise QemuError("uBridge must be enabled in order to start packet capture")
nio = adapter.get_nio(0)
if not nio:
@ -1476,13 +1468,10 @@ class QemuVM(BaseNode):
for adapter_number, adapter in enumerate(self._ethernet_adapters):
mac = int_to_macaddress(macaddress_to_int(self._mac_address) + adapter_number)
if self.use_ubridge:
# use a local UDP tunnel to connect to uBridge instead
if adapter_number not in self._local_udp_tunnels:
self._local_udp_tunnels[adapter_number] = self._create_local_udp_tunnel()
nio = self._local_udp_tunnels[adapter_number][0]
else:
nio = adapter.get_nio(0)
if self._legacy_networking:
# legacy QEMU networking syntax (-net)
if nio:

View File

@ -280,7 +280,6 @@ class VirtualBoxVM(BaseNode):
# add a guest property to let the VM know about the GNS3 project directory
yield from self.manager.execute("guestproperty", ["set", self._vmname, "ProjectDirInGNS3", self.working_dir])
if self.use_ubridge:
yield from self._start_ubridge()
for adapter_number in range(0, self._adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
@ -837,13 +836,10 @@ class VirtualBoxVM(BaseNode):
yield from self._modify_vm("--nic{} null".format(adapter_number + 1))
yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
if self.use_ubridge:
# use a local UDP tunnel to connect to uBridge instead
if adapter_number not in self._local_udp_tunnels:
self._local_udp_tunnels[adapter_number] = self._create_local_udp_tunnel()
nio = self._local_udp_tunnels[adapter_number][0]
else:
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
if not self._use_any_adapter and attachment not in ("none", "null", "generic"):
@ -881,7 +877,7 @@ class VirtualBoxVM(BaseNode):
yield from self._modify_vm("--nictrace{} on".format(adapter_number + 1))
yield from self._modify_vm('--nictracefile{} "{}"'.format(adapter_number + 1, nio.pcap_output_file))
if self.use_ubridge and not self._ethernet_adapters[adapter_number].get_nio(0):
if not self._ethernet_adapters[adapter_number].get_nio(0):
yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
for adapter_number in range(self._adapters, self._maximum_adapters):
@ -972,7 +968,6 @@ class VirtualBoxVM(BaseNode):
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
adapter_number=adapter_number))
if self.ubridge:
try:
yield from self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
self._local_udp_tunnels[adapter_number][1],
@ -981,23 +976,6 @@ class VirtualBoxVM(BaseNode):
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
adapter_number=adapter_number))
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
else:
vm_state = yield from self._get_vm_state()
if vm_state == "running":
if isinstance(nio, NIOUDP):
# dynamically configure an UDP tunnel on the VirtualBox adapter
yield from self._control_vm("nic{} generic UDPTunnel".format(adapter_number + 1))
yield from self._control_vm("nicproperty{} sport={}".format(adapter_number + 1, nio.lport))
yield from self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, nio.rhost))
yield from self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, nio.rport))
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
# check if the UDP tunnel has been correctly set
vm_info = yield from self._get_vm_info()
generic_driver_number = "generic{}".format(adapter_number + 1)
if generic_driver_number not in vm_info and vm_info[generic_driver_number] != "UDPTunnel":
log.warning("UDP tunnel has not been set on nic: {}".format(adapter_number + 1))
self.project.emit("log.warning", {"message": "UDP tunnel has not been set on nic: {}".format(adapter_number + 1)})
adapter.add_nio(0, nio)
log.info("VirtualBox VM '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(name=self.name,
@ -1021,17 +999,10 @@ class VirtualBoxVM(BaseNode):
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
adapter_number=adapter_number))
if self.ubridge:
yield from self._ubridge_send("bridge delete {name}".format(name="VBOX-{}-{}".format(self._id, adapter_number)))
vm_state = yield from self._get_vm_state()
if vm_state == "running":
yield from self._control_vm("setlinkstate{} off".format(adapter_number + 1))
else:
vm_state = yield from self._get_vm_state()
if vm_state == "running":
# dynamically disable the VirtualBox adapter
yield from self._control_vm("setlinkstate{} off".format(adapter_number + 1))
yield from self._control_vm("nic{} null".format(adapter_number + 1))
nio = adapter.get_nio(0)
if isinstance(nio, NIOUDP):
@ -1059,11 +1030,6 @@ class VirtualBoxVM(BaseNode):
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
adapter_number=adapter_number))
if not self.use_ubridge:
vm_state = yield from self._get_vm_state()
if vm_state == "running" or vm_state == "paused" or vm_state == "stuck":
raise VirtualBoxError("Sorry, packet capturing on a started VirtualBox VM is not supported without using uBridge")
nio = adapter.get_nio(0)
if not nio:

View File

@ -241,9 +241,6 @@ class VPCSVM(BaseNode):
yield from self._check_requirements()
if not self.is_running():
nio = self._ethernet_adapter.get_nio(0)
if not self.use_ubridge and not nio:
raise VPCSError("This VPCS instance must be connected in order to start")
command = self._build_command()
try:
log.info("Starting VPCS: {}".format(command))
@ -261,7 +258,6 @@ class VPCSVM(BaseNode):
creationflags=flags)
monitor_process(self._process, self._termination_callback)
if self.use_ubridge:
yield from self._start_ubridge()
if nio:
yield from self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio)
@ -378,10 +374,10 @@ class VPCSVM(BaseNode):
raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter,
port_number=port_number))
if self.ubridge:
if not self._local_udp_tunnel:
self._local_udp_tunnel = self._create_local_udp_tunnel()
yield from self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio)
elif self.is_running():
raise VPCSError("Sorry, updating a link to a started VPCS instance is not supported without using uBridge.")
self._ethernet_adapter.add_nio(port_number, nio)
log.info('VPCS "{name}" [{id}]: {nio} added to port {port_number}'.format(name=self._name,
@ -396,10 +392,9 @@ class VPCSVM(BaseNode):
if not self._ethernet_adapter.port_exists(port_number):
raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter,
port_number=port_number))
if self.ubridge:
if not self._local_udp_tunnel:
self._local_udp_tunnel = self._create_local_udp_tunnel()
yield from self._update_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio)
elif self.is_running():
raise VPCSError("Sorry, adding a link to a started VPCS instance is not supported without using uBridge.")
@asyncio.coroutine
def port_remove_nio_binding(self, port_number):
@ -415,10 +410,7 @@ class VPCSVM(BaseNode):
raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter,
port_number=port_number))
if self.ubridge:
yield from self._ubridge_send("bridge delete {name}".format(name="VPCS-{}".format(self._id)))
elif self.is_running():
raise VPCSError("Sorry, adding a link to a started VPCS instance is not supported without using uBridge.")
nio = self._ethernet_adapter.get_nio(port_number)
if isinstance(nio, NIOUDP):
@ -444,9 +436,6 @@ class VPCSVM(BaseNode):
raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter,
port_number=port_number))
if not self.use_ubridge:
raise VPCSError("uBridge must be enabled in order to start packet capture")
nio = self._ethernet_adapter.get_nio(0)
if not nio:
@ -537,13 +526,10 @@ class VPCSVM(BaseNode):
else:
log.warn("The VPCS relay feature could not be disabled because the VPCS version is below 0.8b")
if self.use_ubridge:
# use the local UDP tunnel to uBridge instead
if not self._local_udp_tunnel:
self._local_udp_tunnel = self._create_local_udp_tunnel()
nio = self._local_udp_tunnel[0]
else:
nio = self._ethernet_adapter.get_nio(0)
if nio:
if isinstance(nio, NIOUDP):

View File

@ -121,7 +121,6 @@ def parse_arguments(argv):
"certkey": config.get("certkey", ""),
"record": config.get("record", ""),
"local": config.getboolean("local", False),
"use_ubridge": config.getboolean("use_ubridge", True), # this enables uBridge globally
"allow": config.getboolean("allow_remote_console", False),
"quiet": config.getboolean("quiet", False),
"debug": config.getboolean("debug", False),
@ -137,7 +136,6 @@ def set_config(args):
config = Config.instance()
server_config = config.get_section_config("Server")
server_config["local"] = str(args.local)
server_config["use_ubridge"] = str(args.no_ubridge)
server_config["allow_remote_console"] = str(args.allow)
server_config["host"] = args.host
server_config["port"] = str(args.port)
@ -225,11 +223,6 @@ def run():
if server_config.getboolean("local"):
log.warning("Local mode is enabled. Beware, clients will have full control on your filesystem")
if server_config.getboolean("use_ubridge"):
log.info("uBridge will be used to handle node connections")
else:
log.warning("uBridge will NOT be used to handle node connections")
# we only support Python 3 version >= 3.4
if sys.version_info < (3, 4):
raise SystemExit("Python 3.4 or higher is required")

View File

@ -70,6 +70,9 @@ def vm(project, manager, fake_qemu_binary, fake_qemu_img_binary):
manager.port_manager.console_host = "127.0.0.1"
vm = QemuVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager, qemu_path=fake_qemu_binary)
vm._process_priority = "normal" # Avoid complexity for Windows tests
vm._start_ubridge = AsyncioMagicMock()
vm._ubridge_hypervisor = MagicMock()
vm._ubridge_hypervisor.is_running.return_value = True
return vm
@ -195,21 +198,15 @@ def test_suspend(loop, vm):
def test_add_nio_binding_udp(vm, loop):
nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}})
assert nio.lport == 4242
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
assert nio.lport == 4242
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
def test_add_nio_binding_ethernet(vm, loop, ethernet_device):
with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=True):
nio = Qemu.instance().create_nio({"type": "nio_ethernet", "ethernet_device": ethernet_device})
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
assert nio.ethernet_device == ethernet_device
def test_port_remove_nio_binding(vm, loop):
nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}})
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0)))
assert vm._ethernet_adapters[0].ports[0] is None
@ -460,6 +457,7 @@ def test_build_command(vm, loop, fake_qemu_binary, port_manager):
os.environ["DISPLAY"] = "0:0"
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
cmd = loop.run_until_complete(asyncio.async(vm._build_command()))
nio = vm._local_udp_tunnels[0][0]
assert cmd == [
fake_qemu_binary,
"-name",
@ -477,7 +475,9 @@ def test_build_command(vm, loop, fake_qemu_binary, port_manager):
"-net",
"none",
"-device",
"e1000,mac={}".format(vm._mac_address)
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
"-netdev",
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport)
]
@ -503,6 +503,7 @@ def test_build_command_kvm(linux_platform, vm, loop, fake_qemu_binary, port_mana
os.environ["DISPLAY"] = "0:0"
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
cmd = loop.run_until_complete(asyncio.async(vm._build_command()))
nio = vm._local_udp_tunnels[0][0]
assert cmd == [
fake_qemu_binary,
"-name",
@ -521,7 +522,9 @@ def test_build_command_kvm(linux_platform, vm, loop, fake_qemu_binary, port_mana
"-net",
"none",
"-device",
"e1000,mac={}".format(vm._mac_address)
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
"-netdev",
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport)
]
@ -534,6 +537,7 @@ def test_build_command_kvm_2_4(linux_platform, vm, loop, fake_qemu_binary, port_
os.environ["DISPLAY"] = "0:0"
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
cmd = loop.run_until_complete(asyncio.async(vm._build_command()))
nio = vm._local_udp_tunnels[0][0]
assert cmd == [
fake_qemu_binary,
"-name",
@ -554,7 +558,9 @@ def test_build_command_kvm_2_4(linux_platform, vm, loop, fake_qemu_binary, port_
"-net",
"none",
"-device",
"e1000,mac={}".format(vm._mac_address)
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
"-netdev",
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport)
]
@ -573,6 +579,8 @@ def test_build_command_two_adapters(vm, loop, fake_qemu_binary, port_manager):
vm.adapters = 2
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
cmd = loop.run_until_complete(asyncio.async(vm._build_command()))
nio1 = vm._local_udp_tunnels[0][0]
nio2 = vm._local_udp_tunnels[1][0]
assert cmd == [
fake_qemu_binary,
"-name",
@ -590,9 +598,13 @@ def test_build_command_two_adapters(vm, loop, fake_qemu_binary, port_manager):
"-net",
"none",
"-device",
"e1000,mac={}".format(vm.mac_address),
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
"-netdev",
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio1.rport, nio1.lport),
"-device",
"e1000,mac={}".format(int_to_macaddress(macaddress_to_int(vm._mac_address) + 1))
"e1000,mac={},netdev=gns3-1".format(int_to_macaddress(macaddress_to_int(vm._mac_address) + 1)),
"-netdev",
"socket,id=gns3-1,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio2.rport, nio2.lport)
]
@ -608,17 +620,17 @@ def test_build_command_two_adapters_mac_address(vm, loop, fake_qemu_binary, port
assert mac_0[:8] == "00:00:ab"
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
cmd = loop.run_until_complete(asyncio.async(vm._build_command()))
assert "e1000,mac={}".format(mac_0) in cmd
assert "e1000,mac={}".format(mac_1) in cmd
assert "e1000,mac={},netdev=gns3-0".format(mac_0) in cmd
assert "e1000,mac={},netdev=gns3-1".format(mac_1) in cmd
vm.mac_address = "00:42:ab:0e:0f:0a"
mac_0 = vm._mac_address
mac_1 = int_to_macaddress(macaddress_to_int(vm._mac_address))
mac_1 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 1)
assert mac_0[:8] == "00:42:ab"
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
cmd = loop.run_until_complete(asyncio.async(vm._build_command()))
assert "e1000,mac={}".format(mac_0) in cmd
assert "e1000,mac={}".format(mac_1) in cmd
assert "e1000,mac={},netdev=gns3-0".format(mac_0) in cmd
assert "e1000,mac={},netdev=gns3-1".format(mac_1) in cmd
def test_build_command_large_number_of_adapters(vm, loop, fake_qemu_binary, port_manager):
@ -638,19 +650,23 @@ def test_build_command_large_number_of_adapters(vm, loop, fake_qemu_binary, port
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
cmd = loop.run_until_complete(asyncio.async(vm._build_command()))
assert "e1000,mac={}".format(mac_0) in cmd
assert "e1000,mac={}".format(mac_1) in cmd
# Count if we have 100 e1000 adapters in the command
assert len([l for l in cmd if "e1000" in l ]) == 100
assert len(vm._ethernet_adapters) == 100
assert "e1000,mac={},netdev=gns3-0".format(mac_0) in cmd
assert "e1000,mac={},netdev=gns3-1".format(mac_1) in cmd
assert "pci-bridge,id=pci-bridge0,bus=dmi_pci_bridge0,chassis_nr=0x1,addr=0x0,shpc=off" not in cmd
assert "pci-bridge,id=pci-bridge1,bus=dmi_pci_bridge1,chassis_nr=0x1,addr=0x1,shpc=off" in cmd
assert "pci-bridge,id=pci-bridge2,bus=dmi_pci_bridge2,chassis_nr=0x1,addr=0x2,shpc=off" in cmd
assert "i82801b11-bridge,id=dmi_pci_bridge1" in cmd
mac_29 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 29)
assert "e1000,mac={},bus=pci-bridge1,addr=0x04".format(mac_29) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x04,netdev=gns3-29".format(mac_29) in cmd
mac_30 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 30)
assert "e1000,mac={},bus=pci-bridge1,addr=0x05".format(mac_30) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x05,netdev=gns3-30".format(mac_30) in cmd
mac_74 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 74)
assert "e1000,mac={},bus=pci-bridge2,addr=0x11".format(mac_74) in cmd
assert "e1000,mac={},bus=pci-bridge2,addr=0x11,netdev=gns3-74".format(mac_74) in cmd
# Qemu < 2.4 doesn't support large number of adapters
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.0.0")

View File

@ -43,6 +43,8 @@ def vm(project, manager, ubridge_path):
vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
vm._vpcs_version = parse_version("0.9")
vm._start_ubridge = AsyncioMagicMock()
vm._ubridge_hypervisor = MagicMock()
vm._ubridge_hypervisor.is_running.return_value = True
return vm
@ -67,7 +69,7 @@ def test_vm_check_vpcs_version_0_6_1(loop, vm, manager):
def test_vm_invalid_vpcs_version(loop, manager, vm):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.subprocess_check_output", return_value="Welcome to Virtual PC Simulator, version 0.1"):
with pytest.raises(VPCSError):
nio = manager.create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
nio = manager.create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}})
vm.port_add_nio_binding(0, nio)
loop.run_until_complete(asyncio.async(vm._check_vpcs_version()))
assert vm.name == "test"
@ -94,8 +96,6 @@ def test_start(loop, vm, async_run):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process) as mock_exec:
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
async_run(vm.port_add_nio_binding(0, nio))
loop.run_until_complete(asyncio.async(vm.start()))
assert mock_exec.call_args[0] == (vm._vpcs_path(),
'-p',
@ -130,7 +130,7 @@ def test_start_0_6_1(loop, vm, async_run):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process) as mock_exec:
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}})
async_run(vm.port_add_nio_binding(0, nio))
async_run(vm.start())
assert mock_exec.call_args[0] == (vm._vpcs_path(),
@ -162,7 +162,7 @@ def test_stop(loop, vm, async_run):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}})
async_run(vm.port_add_nio_binding(0, nio))
async_run(vm.start())
@ -197,11 +197,12 @@ def test_reload(loop, vm, async_run):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}})
async_run(vm.port_add_nio_binding(0, nio))
async_run(vm.start())
assert vm.is_running()
vm._ubridge_send = AsyncioMagicMock()
with asyncio_patch("gns3server.utils.asyncio.wait_for_process_termination"):
async_run(vm.reload())
assert vm.is_running() is True
@ -213,7 +214,7 @@ def test_reload(loop, vm, async_run):
def test_add_nio_binding_udp(vm, async_run):
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}})
async_run(vm.port_add_nio_binding(0, nio))
assert nio.lport == 4242

View File

@ -212,6 +212,7 @@ def run_around_tests(monkeypatch, port_manager, controller, config):
config.set("VirtualBox", "vboxmanage_path", tmppath)
config.set("VPCS", "vpcs_path", tmppath)
config.set("VMware", "vmrun_path", tmppath)
config.set("Dynamips", "dynamips_path", tmppath)
# Force turn off KVM because it's not available on CI
config.set("Qemu", "enable_kvm", False)

View File

@ -105,7 +105,6 @@ def demo_topology():
"node_id": "64ba8408-afbf-4b66-9cdd-1fd854427478",
"node_type": "vpcs",
"properties": {
"startup_script": "",
},
"symbol": ":/symbols/computer.svg",
"width": 65,
@ -129,7 +128,6 @@ def demo_topology():
"node_id": "748bcd89-624a-40eb-a8d3-1d2e85c99b51",
"node_type": "vpcs",
"properties": {
"startup_script": "",
},
"symbol": ":/symbols/computer.svg",
"width": 65,
@ -151,6 +149,7 @@ def test_open(controller, tmpdir, demo_topology, async_run, http_server):
controller._computes["local"] = Compute("local", controller=controller, host=http_server[0], port=http_server[1])
controller._computes["vm"] = controller._computes["local"]
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.add_ubridge_udp_connection"):
project = async_run(controller.load_project(str(tmpdir / "demo.gns3")))
assert project.status == "opened"
assert len(project.computes) == 1

View File

@ -168,6 +168,7 @@ def test_qemu_update(http_compute, vm, free_console_port, project, fake_qemu_vm)
def test_qemu_nio_create_udp(http_compute, vm):
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.add_ubridge_udp_connection"):
http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2})
response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
"lport": 4242,
@ -179,16 +180,8 @@ def test_qemu_nio_create_udp(http_compute, vm):
assert response.json["type"] == "nio_udp"
def test_qemu_nio_create_ethernet(http_compute, vm):
http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2})
response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_ethernet",
"ethernet_device": "eth0",
},
example=True)
assert response.status == 409
def test_qemu_delete_nio(http_compute, vm):
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM._ubridge_send"):
http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2})
http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
"lport": 4242,

View File

@ -64,6 +64,7 @@ def test_vpcs_create_port(http_compute, project, free_console_port):
def test_vpcs_nio_create_udp(http_compute, vm):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.add_ubridge_udp_connection"):
response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
@ -83,28 +84,19 @@ def test_vpcs_nio_update_udp(http_compute, vm):
"rhost": "127.0.0.1",
"filters": {}},
example=True)
assert response.status == 201
assert response.status == 201, response.body.decode("utf-8")
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
def test_vpcs_nio_create_tap(http_compute, vm, ethernet_device):
with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=True):
response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_tap",
"tap_device": ethernet_device})
assert response.status == 201
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_tap"
def test_vpcs_delete_nio(http_compute, vm):
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._ubridge_send"):
http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"})
response = http_compute.delete("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
assert response.status == 204
assert response.status == 204, response.body.decode()
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"