Fixes Qemu networking.

This commit is contained in:
Jeremy 2015-03-06 20:08:00 -07:00
parent d87ebb3ed2
commit ee578d3c12
4 changed files with 28 additions and 24 deletions

View File

@ -236,7 +236,7 @@ class QEMUHandler:
"project_id": "UUID for the project",
"vm_id": "UUID for the instance",
"adapter_number": "Network adapter where the nio is located",
"port_number": "Port where the nio should be added"
"port_number": "Port on the adapter (always 0)"
},
status_codes={
201: "NIO created",
@ -251,7 +251,7 @@ class QEMUHandler:
qemu_manager = Qemu.instance()
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
nio = qemu_manager.create_nio(vm.qemu_path, request.json)
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio)
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
response.set_status(201)
response.json(nio)
@ -262,7 +262,7 @@ class QEMUHandler:
"project_id": "UUID for the project",
"vm_id": "UUID for the instance",
"adapter_number": "Network adapter where the nio is located",
"port_number": "Port from where the nio should be removed"
"port_number": "Port on the adapter (always 0)"
},
status_codes={
204: "NIO deleted",
@ -274,7 +274,7 @@ class QEMUHandler:
qemu_manager = Qemu.instance()
vm = qemu_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]))
yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]))
response.set_status(204)
@classmethod

View File

@ -290,7 +290,7 @@ class VirtualBoxHandler:
"project_id": "UUID for the project",
"vm_id": "UUID for the instance",
"adapter_number": "Adapter from where the nio should be removed",
"port_number": "Port on the adapter (always)"
"port_number": "Port on the adapter (always 0)"
},
status_codes={
204: "NIO deleted",

View File

@ -598,6 +598,7 @@ class QemuVM(BaseVM):
if self.is_running() and self._monitor:
log.debug("Execute QEMU monitor command: {}".format(command))
try:
log.info("Connecting to Qemu monitor on {}:{}".format(self._monitor_host, self._monitor))
reader, writer = yield from asyncio.open_connection(self._monitor_host, self._monitor)
except OSError as e:
log.warn("Could not connect to QEMU monitor: {}".format(e))
@ -682,13 +683,12 @@ class QemuVM(BaseVM):
log.info("QEMU VM is not paused to be resumed, current status is {}".format(vm_status))
@asyncio.coroutine
def adapter_add_nio_binding(self, adapter_id, port_id, nio):
def adapter_add_nio_binding(self, adapter_id, nio):
"""
Adds a port NIO binding.
:param adapter_id: adapter ID
:param port_id: port ID
:param nio: NIO instance to add to the slot/port
:param nio: NIO instance to add to the adapter
"""
try:
@ -708,13 +708,16 @@ class QemuVM(BaseVM):
nio.rport,
nio.rhost))
else:
yield from self._control_vm("host_net_remove {} gns3-{}".format(adapter_id, adapter_id))
yield from self._control_vm("host_net_add socket vlan={},name=gns3-{},udp={}:{},localaddr={}:{}".format(adapter_id,
adapter_id,
nio.rhost,
nio.rport,
self._host,
nio.lport))
# FIXME: does it work? very undocumented feature...
# Apparently there is a bug in Qemu...
# netdev_add [user|tap|socket|hubport|netmap],id=str[,prop=value][,...] -- add host network device
# netdev_del id -- remove host network device
yield from self._control_vm("netdev_del gns3-{}".format(adapter_id))
yield from self._control_vm("netdev_add socket,id=gns3-{},udp={}:{},localaddr={}:{}".format(adapter_id,
nio.rhost,
nio.rport,
self._host,
nio.lport))
adapter.add_nio(0, nio)
log.info("QEMU VM {name} [id={id}]: {nio} added to adapter {adapter_id}".format(name=self._name,
@ -723,12 +726,11 @@ class QemuVM(BaseVM):
adapter_id=adapter_id))
@asyncio.coroutine
def adapter_remove_nio_binding(self, adapter_id, port_id):
def adapter_remove_nio_binding(self, adapter_id):
"""
Removes a port NIO binding.
:param adapter_id: adapter ID
:param port_id: port ID
:returns: NIO instance
"""
@ -745,6 +747,8 @@ class QemuVM(BaseVM):
yield from self._control_vm("host_net_add user vlan={},name=gns3-{}".format(adapter_id, adapter_id))
nio = adapter.get_nio(0)
if isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport)
adapter.remove_nio(0)
log.info("QEMU VM {name} [id={id}]: {nio} removed from adapter {adapter_id}".format(name=self._name,
id=self._id,

View File

@ -102,7 +102,7 @@ def test_stop(loop, vm, running_subprocess_mock):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
vm.adapter_add_nio_binding(0, 0, nio)
vm.adapter_add_nio_binding(0, nio)
loop.run_until_complete(asyncio.async(vm.start()))
assert vm.is_running()
loop.run_until_complete(asyncio.async(vm.stop()))
@ -128,21 +128,21 @@ def test_suspend(loop, vm):
def test_add_nio_binding_udp(vm, loop):
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
assert nio.lport == 4242
def test_add_nio_binding_ethernet(vm, loop):
with patch("gns3server.modules.base_manager.BaseManager._has_privileged_access", return_value=True):
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_generic_ethernet", "ethernet_device": "eth0"})
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
assert nio.ethernet_device == "eth0"
def test_port_remove_nio_binding(vm, loop):
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0, 0)))
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0)))
assert vm._ethernet_adapters[0].ports[0] is None
@ -244,10 +244,10 @@ def test_control_vm_expect_text(vm, loop, running_subprocess_mock):
with asyncio_patch("asyncio.open_connection", return_value=(reader, writer)) as open_connect:
future = asyncio.Future()
future.set_result("epic product")
future.set_result(b"epic product")
reader.readline.return_value = future
res = loop.run_until_complete(asyncio.async(vm._control_vm("test", ["epic"])))
res = loop.run_until_complete(asyncio.async(vm._control_vm("test", [b"epic"])))
assert writer.write.called_with("test")
assert res == "epic product"