mirror of
https://github.com/GNS3/gns3-server.git
synced 2024-12-24 06:56:42 +00:00
Fixes Qemu networking.
This commit is contained in:
parent
d87ebb3ed2
commit
ee578d3c12
@ -236,7 +236,7 @@ class QEMUHandler:
|
|||||||
"project_id": "UUID for the project",
|
"project_id": "UUID for the project",
|
||||||
"vm_id": "UUID for the instance",
|
"vm_id": "UUID for the instance",
|
||||||
"adapter_number": "Network adapter where the nio is located",
|
"adapter_number": "Network adapter where the nio is located",
|
||||||
"port_number": "Port where the nio should be added"
|
"port_number": "Port on the adapter (always 0)"
|
||||||
},
|
},
|
||||||
status_codes={
|
status_codes={
|
||||||
201: "NIO created",
|
201: "NIO created",
|
||||||
@ -251,7 +251,7 @@ class QEMUHandler:
|
|||||||
qemu_manager = Qemu.instance()
|
qemu_manager = Qemu.instance()
|
||||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||||
nio = qemu_manager.create_nio(vm.qemu_path, request.json)
|
nio = qemu_manager.create_nio(vm.qemu_path, request.json)
|
||||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio)
|
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||||
response.set_status(201)
|
response.set_status(201)
|
||||||
response.json(nio)
|
response.json(nio)
|
||||||
|
|
||||||
@ -262,7 +262,7 @@ class QEMUHandler:
|
|||||||
"project_id": "UUID for the project",
|
"project_id": "UUID for the project",
|
||||||
"vm_id": "UUID for the instance",
|
"vm_id": "UUID for the instance",
|
||||||
"adapter_number": "Network adapter where the nio is located",
|
"adapter_number": "Network adapter where the nio is located",
|
||||||
"port_number": "Port from where the nio should be removed"
|
"port_number": "Port on the adapter (always 0)"
|
||||||
},
|
},
|
||||||
status_codes={
|
status_codes={
|
||||||
204: "NIO deleted",
|
204: "NIO deleted",
|
||||||
@ -274,7 +274,7 @@ class QEMUHandler:
|
|||||||
|
|
||||||
qemu_manager = Qemu.instance()
|
qemu_manager = Qemu.instance()
|
||||||
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
|
||||||
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]))
|
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]))
|
||||||
response.set_status(204)
|
response.set_status(204)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -290,7 +290,7 @@ class VirtualBoxHandler:
|
|||||||
"project_id": "UUID for the project",
|
"project_id": "UUID for the project",
|
||||||
"vm_id": "UUID for the instance",
|
"vm_id": "UUID for the instance",
|
||||||
"adapter_number": "Adapter from where the nio should be removed",
|
"adapter_number": "Adapter from where the nio should be removed",
|
||||||
"port_number": "Port on the adapter (always)"
|
"port_number": "Port on the adapter (always 0)"
|
||||||
},
|
},
|
||||||
status_codes={
|
status_codes={
|
||||||
204: "NIO deleted",
|
204: "NIO deleted",
|
||||||
|
@ -598,6 +598,7 @@ class QemuVM(BaseVM):
|
|||||||
if self.is_running() and self._monitor:
|
if self.is_running() and self._monitor:
|
||||||
log.debug("Execute QEMU monitor command: {}".format(command))
|
log.debug("Execute QEMU monitor command: {}".format(command))
|
||||||
try:
|
try:
|
||||||
|
log.info("Connecting to Qemu monitor on {}:{}".format(self._monitor_host, self._monitor))
|
||||||
reader, writer = yield from asyncio.open_connection(self._monitor_host, self._monitor)
|
reader, writer = yield from asyncio.open_connection(self._monitor_host, self._monitor)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.warn("Could not connect to QEMU monitor: {}".format(e))
|
log.warn("Could not connect to QEMU monitor: {}".format(e))
|
||||||
@ -682,13 +683,12 @@ class QemuVM(BaseVM):
|
|||||||
log.info("QEMU VM is not paused to be resumed, current status is {}".format(vm_status))
|
log.info("QEMU VM is not paused to be resumed, current status is {}".format(vm_status))
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def adapter_add_nio_binding(self, adapter_id, port_id, nio):
|
def adapter_add_nio_binding(self, adapter_id, nio):
|
||||||
"""
|
"""
|
||||||
Adds a port NIO binding.
|
Adds a port NIO binding.
|
||||||
|
|
||||||
:param adapter_id: adapter ID
|
:param adapter_id: adapter ID
|
||||||
:param port_id: port ID
|
:param nio: NIO instance to add to the adapter
|
||||||
:param nio: NIO instance to add to the slot/port
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -708,13 +708,16 @@ class QemuVM(BaseVM):
|
|||||||
nio.rport,
|
nio.rport,
|
||||||
nio.rhost))
|
nio.rhost))
|
||||||
else:
|
else:
|
||||||
yield from self._control_vm("host_net_remove {} gns3-{}".format(adapter_id, adapter_id))
|
# FIXME: does it work? very undocumented feature...
|
||||||
yield from self._control_vm("host_net_add socket vlan={},name=gns3-{},udp={}:{},localaddr={}:{}".format(adapter_id,
|
# Apparently there is a bug in Qemu...
|
||||||
adapter_id,
|
# netdev_add [user|tap|socket|hubport|netmap],id=str[,prop=value][,...] -- add host network device
|
||||||
nio.rhost,
|
# netdev_del id -- remove host network device
|
||||||
nio.rport,
|
yield from self._control_vm("netdev_del gns3-{}".format(adapter_id))
|
||||||
self._host,
|
yield from self._control_vm("netdev_add socket,id=gns3-{},udp={}:{},localaddr={}:{}".format(adapter_id,
|
||||||
nio.lport))
|
nio.rhost,
|
||||||
|
nio.rport,
|
||||||
|
self._host,
|
||||||
|
nio.lport))
|
||||||
|
|
||||||
adapter.add_nio(0, nio)
|
adapter.add_nio(0, nio)
|
||||||
log.info("QEMU VM {name} [id={id}]: {nio} added to adapter {adapter_id}".format(name=self._name,
|
log.info("QEMU VM {name} [id={id}]: {nio} added to adapter {adapter_id}".format(name=self._name,
|
||||||
@ -723,12 +726,11 @@ class QemuVM(BaseVM):
|
|||||||
adapter_id=adapter_id))
|
adapter_id=adapter_id))
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def adapter_remove_nio_binding(self, adapter_id, port_id):
|
def adapter_remove_nio_binding(self, adapter_id):
|
||||||
"""
|
"""
|
||||||
Removes a port NIO binding.
|
Removes a port NIO binding.
|
||||||
|
|
||||||
:param adapter_id: adapter ID
|
:param adapter_id: adapter ID
|
||||||
:param port_id: port ID
|
|
||||||
|
|
||||||
:returns: NIO instance
|
:returns: NIO instance
|
||||||
"""
|
"""
|
||||||
@ -745,6 +747,8 @@ class QemuVM(BaseVM):
|
|||||||
yield from self._control_vm("host_net_add user vlan={},name=gns3-{}".format(adapter_id, adapter_id))
|
yield from self._control_vm("host_net_add user vlan={},name=gns3-{}".format(adapter_id, adapter_id))
|
||||||
|
|
||||||
nio = adapter.get_nio(0)
|
nio = adapter.get_nio(0)
|
||||||
|
if isinstance(nio, NIOUDP):
|
||||||
|
self.manager.port_manager.release_udp_port(nio.lport)
|
||||||
adapter.remove_nio(0)
|
adapter.remove_nio(0)
|
||||||
log.info("QEMU VM {name} [id={id}]: {nio} removed from adapter {adapter_id}".format(name=self._name,
|
log.info("QEMU VM {name} [id={id}]: {nio} removed from adapter {adapter_id}".format(name=self._name,
|
||||||
id=self._id,
|
id=self._id,
|
||||||
|
@ -102,7 +102,7 @@ def test_stop(loop, vm, running_subprocess_mock):
|
|||||||
|
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
||||||
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||||
vm.adapter_add_nio_binding(0, 0, nio)
|
vm.adapter_add_nio_binding(0, nio)
|
||||||
loop.run_until_complete(asyncio.async(vm.start()))
|
loop.run_until_complete(asyncio.async(vm.start()))
|
||||||
assert vm.is_running()
|
assert vm.is_running()
|
||||||
loop.run_until_complete(asyncio.async(vm.stop()))
|
loop.run_until_complete(asyncio.async(vm.stop()))
|
||||||
@ -128,21 +128,21 @@ def test_suspend(loop, vm):
|
|||||||
|
|
||||||
def test_add_nio_binding_udp(vm, loop):
|
def test_add_nio_binding_udp(vm, loop):
|
||||||
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
assert nio.lport == 4242
|
assert nio.lport == 4242
|
||||||
|
|
||||||
|
|
||||||
def test_add_nio_binding_ethernet(vm, loop):
|
def test_add_nio_binding_ethernet(vm, loop):
|
||||||
with patch("gns3server.modules.base_manager.BaseManager._has_privileged_access", return_value=True):
|
with patch("gns3server.modules.base_manager.BaseManager._has_privileged_access", return_value=True):
|
||||||
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_generic_ethernet", "ethernet_device": "eth0"})
|
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_generic_ethernet", "ethernet_device": "eth0"})
|
||||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
assert nio.ethernet_device == "eth0"
|
assert nio.ethernet_device == "eth0"
|
||||||
|
|
||||||
|
|
||||||
def test_port_remove_nio_binding(vm, loop):
|
def test_port_remove_nio_binding(vm, loop):
|
||||||
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
|
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||||
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0, 0)))
|
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0)))
|
||||||
assert vm._ethernet_adapters[0].ports[0] is None
|
assert vm._ethernet_adapters[0].ports[0] is None
|
||||||
|
|
||||||
|
|
||||||
@ -244,10 +244,10 @@ def test_control_vm_expect_text(vm, loop, running_subprocess_mock):
|
|||||||
with asyncio_patch("asyncio.open_connection", return_value=(reader, writer)) as open_connect:
|
with asyncio_patch("asyncio.open_connection", return_value=(reader, writer)) as open_connect:
|
||||||
|
|
||||||
future = asyncio.Future()
|
future = asyncio.Future()
|
||||||
future.set_result("epic product")
|
future.set_result(b"epic product")
|
||||||
reader.readline.return_value = future
|
reader.readline.return_value = future
|
||||||
|
|
||||||
res = loop.run_until_complete(asyncio.async(vm._control_vm("test", ["epic"])))
|
res = loop.run_until_complete(asyncio.async(vm._control_vm("test", [b"epic"])))
|
||||||
assert writer.write.called_with("test")
|
assert writer.write.called_with("test")
|
||||||
|
|
||||||
assert res == "epic product"
|
assert res == "epic product"
|
||||||
|
Loading…
Reference in New Issue
Block a user