From 76af98404aabe6c0ad7f3eac2b0267386ef824af Mon Sep 17 00:00:00 2001 From: grossmj Date: Mon, 15 Oct 2018 17:05:49 +0700 Subject: [PATCH] Drop Python 3.4 and switch to async / await syntax for asyncio. Fixes #1425 --- gns3server/compute/base_manager.py | 57 ++- gns3server/compute/base_node.py | 92 ++--- gns3server/compute/builtin/nodes/cloud.py | 112 +++--- .../compute/builtin/nodes/ethernet_hub.py | 18 +- .../compute/builtin/nodes/ethernet_switch.py | 18 +- gns3server/compute/docker/__init__.py | 45 +-- gns3server/compute/docker/docker_vm.py | 261 ++++++------- gns3server/compute/dynamips/__init__.py | 130 +++---- .../compute/dynamips/dynamips_hypervisor.py | 46 +-- gns3server/compute/dynamips/hypervisor.py | 14 +- gns3server/compute/dynamips/nios/nio.py | 52 ++- .../dynamips/nios/nio_generic_ethernet.py | 5 +- .../dynamips/nios/nio_linux_ethernet.py | 5 +- gns3server/compute/dynamips/nios/nio_null.py | 5 +- gns3server/compute/dynamips/nios/nio_tap.py | 5 +- gns3server/compute/dynamips/nios/nio_udp.py | 19 +- gns3server/compute/dynamips/nios/nio_unix.py | 5 +- gns3server/compute/dynamips/nios/nio_vde.py | 5 +- .../compute/dynamips/nodes/atm_switch.py | 85 ++--- gns3server/compute/dynamips/nodes/bridge.py | 27 +- gns3server/compute/dynamips/nodes/c1700.py | 17 +- gns3server/compute/dynamips/nodes/c2600.py | 17 +- gns3server/compute/dynamips/nodes/c2691.py | 5 +- gns3server/compute/dynamips/nodes/c3600.py | 17 +- gns3server/compute/dynamips/nodes/c3725.py | 5 +- gns3server/compute/dynamips/nodes/c3745.py | 5 +- gns3server/compute/dynamips/nodes/c7200.py | 42 +-- .../compute/dynamips/nodes/ethernet_hub.py | 41 +- .../compute/dynamips/nodes/ethernet_switch.py | 98 ++--- .../dynamips/nodes/frame_relay_switch.py | 67 ++-- gns3server/compute/dynamips/nodes/router.py | 353 ++++++++---------- gns3server/compute/iou/__init__.py | 7 +- gns3server/compute/iou/iou_vm.py | 121 +++--- gns3server/compute/project.py | 43 +-- gns3server/compute/qemu/__init__.py | 37 +- gns3server/compute/qemu/qcow2.py | 7 +- gns3server/compute/qemu/qemu_vm.py | 217 +++++------ gns3server/compute/traceng/__init__.py | 5 +- gns3server/compute/traceng/traceng_vm.py | 64 ++-- gns3server/compute/virtualbox/__init__.py | 32 +- .../compute/virtualbox/virtualbox_vm.py | 299 +++++++-------- gns3server/compute/vmware/__init__.py | 72 ++-- gns3server/compute/vmware/vmware_vm.py | 159 ++++---- gns3server/compute/vpcs/__init__.py | 10 +- gns3server/compute/vpcs/vpcs_vm.py | 71 ++-- gns3server/controller/__init__.py | 107 +++--- gns3server/controller/compute.py | 115 +++--- gns3server/controller/drawing.py | 3 +- gns3server/controller/export_project.py | 23 +- gns3server/controller/gns3vm/__init__.py | 68 ++-- gns3server/controller/gns3vm/base_gns3_vm.py | 12 +- .../controller/gns3vm/hyperv_gns3_vm.py | 25 +- .../controller/gns3vm/remote_gns3_vm.py | 12 +- .../controller/gns3vm/virtualbox_gns3_vm.py | 122 +++--- .../controller/gns3vm/vmware_gns3_vm.py | 52 ++- gns3server/controller/import_project.py | 23 +- gns3server/controller/link.py | 50 +-- gns3server/controller/node.py | 84 ++--- gns3server/controller/notification.py | 5 +- gns3server/controller/project.py | 138 +++---- gns3server/controller/snapshot.py | 20 +- gns3server/controller/udp_link.py | 53 ++- .../api/compute/atm_switch_handler.py | 36 +- .../handlers/api/compute/cloud_handler.py | 32 +- .../handlers/api/compute/docker_handler.py | 64 ++-- .../api/compute/dynamips_vm_handler.py | 86 ++--- .../api/compute/ethernet_hub_handler.py | 36 +- .../api/compute/ethernet_switch_handler.py | 42 +-- .../api/compute/frame_relay_switch_handler.py | 36 +- .../handlers/api/compute/iou_handler.py | 60 +-- .../handlers/api/compute/nat_handler.py | 28 +- .../api/compute/notification_handler.py | 14 +- .../handlers/api/compute/project_handler.py | 50 +-- .../handlers/api/compute/qemu_handler.py | 88 ++--- .../handlers/api/compute/traceng_handler.py | 44 +-- .../api/compute/virtualbox_handler.py | 64 ++-- .../handlers/api/compute/vmware_handler.py | 52 +-- .../handlers/api/compute/vpcs_handler.py | 44 +-- .../api/controller/appliance_handler.py | 8 +- .../api/controller/compute_handler.py | 36 +- .../api/controller/drawing_handler.py | 26 +- .../api/controller/gns3_vm_handler.py | 8 +- .../handlers/api/controller/link_handler.py | 66 ++-- .../handlers/api/controller/node_handler.py | 118 +++--- .../api/controller/notification_handler.py | 22 +- .../api/controller/project_handler.py | 96 +++-- .../handlers/api/controller/server_handler.py | 17 +- .../api/controller/snapshot_handler.py | 12 +- .../handlers/api/controller/symbol_handler.py | 8 +- gns3server/handlers/index_handler.py | 4 +- gns3server/notification_queue.py | 10 +- gns3server/run.py | 6 +- gns3server/ubridge/hypervisor.py | 19 +- gns3server/ubridge/ubridge_hypervisor.py | 35 +- gns3server/utils/asyncio/__init__.py | 73 ++-- gns3server/utils/asyncio/embed_shell.py | 48 +-- gns3server/utils/asyncio/pool.py | 10 +- .../utils/asyncio/raw_command_server.py | 30 +- gns3server/utils/asyncio/serial.py | 30 +- gns3server/utils/asyncio/telnet_server.py | 93 ++--- gns3server/version.py | 2 +- gns3server/web/response.py | 14 +- gns3server/web/route.py | 25 +- gns3server/web/web_server.py | 38 +- setup.py | 2 +- tests/compute/docker/test_docker.py | 12 +- tests/compute/docker/test_docker_vm.py | 10 +- .../virtualbox/test_virtualbox_manager.py | 3 +- tests/controller/test_compute.py | 6 +- tests/controller/test_link.py | 10 +- tests/controller/test_node.py | 3 +- tests/controller/test_udp_link.py | 36 +- tests/handlers/api/base.py | 17 +- tests/handlers/api/controller/test_link.py | 8 +- tests/handlers/api/controller/test_project.py | 10 +- tests/handlers/api/controller/test_server.py | 4 +- tests/utils.py | 18 +- tests/utils/asyncio/test_embed_shell.py | 13 +- tests/utils/test_asyncio.py | 5 +- 119 files changed, 2432 insertions(+), 3084 deletions(-) diff --git a/gns3server/compute/base_manager.py b/gns3server/compute/base_manager.py index 969104f8..85152b74 100644 --- a/gns3server/compute/base_manager.py +++ b/gns3server/compute/base_manager.py @@ -34,7 +34,6 @@ log = logging.getLogger(__name__) from uuid import UUID, uuid4 from gns3server.utils.interfaces import is_interface_up -from gns3server.utils.asyncio import asyncio_ensure_future from ..config import Config from ..utils.asyncio import wait_run_in_executor from ..utils import force_unix_path @@ -130,15 +129,14 @@ class BaseManager: return self._config - @asyncio.coroutine - def unload(self): + async def unload(self): tasks = [] for node_id in self._nodes.keys(): - tasks.append(asyncio_ensure_future(self.close_node(node_id))) + tasks.append(asyncio.ensure_future(self.close_node(node_id))) if tasks: - done, _ = yield from asyncio.wait(tasks) + done, _ = await asyncio.wait(tasks) for future in done: try: future.result() @@ -179,8 +177,7 @@ class BaseManager: return node - @asyncio.coroutine - def convert_old_project(self, project, legacy_id, name): + async def convert_old_project(self, project, legacy_id, name): """ Convert projects made before version 1.3 @@ -199,7 +196,7 @@ class BaseManager: log.info("Converting old project...") try: log.info('Moving "{}" to "{}"'.format(legacy_project_files_path, new_project_files_path)) - yield from wait_run_in_executor(shutil.move, legacy_project_files_path, new_project_files_path) + await wait_run_in_executor(shutil.move, legacy_project_files_path, new_project_files_path) except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not move project files directory: {} to {} {}".format(legacy_project_files_path, new_project_files_path, e)) @@ -212,7 +209,7 @@ class BaseManager: log.info("Converting old remote project...") try: log.info('Moving "{}" to "{}"'.format(legacy_remote_project_path, new_remote_project_path)) - yield from wait_run_in_executor(shutil.move, legacy_remote_project_path, new_remote_project_path) + await wait_run_in_executor(shutil.move, legacy_remote_project_path, new_remote_project_path) except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not move directory: {} to {} {}".format(legacy_remote_project_path, new_remote_project_path, e)) @@ -226,15 +223,14 @@ class BaseManager: if os.path.exists(legacy_vm_working_path) and not os.path.exists(new_vm_working_path): try: log.info('Moving "{}" to "{}"'.format(legacy_vm_working_path, new_vm_working_path)) - yield from wait_run_in_executor(shutil.move, legacy_vm_working_path, new_vm_working_path) + await wait_run_in_executor(shutil.move, legacy_vm_working_path, new_vm_working_path) except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not move vm working directory: {} to {} {}".format(legacy_vm_working_path, new_vm_working_path, e)) return new_id - @asyncio.coroutine - def create_node(self, name, project_id, node_id, *args, **kwargs): + async def create_node(self, name, project_id, node_id, *args, **kwargs): """ Create a new node @@ -249,23 +245,22 @@ class BaseManager: project = ProjectManager.instance().get_project(project_id) if node_id and isinstance(node_id, int): # old project - with (yield from BaseManager._convert_lock): - node_id = yield from self.convert_old_project(project, node_id, name) + async with BaseManager._convert_lock: + node_id = await self.convert_old_project(project, node_id, name) if not node_id: node_id = str(uuid4()) node = self._NODE_CLASS(name, node_id, project, self, *args, **kwargs) if asyncio.iscoroutinefunction(node.create): - yield from node.create() + await node.create() else: node.create() self._nodes[node.id] = node project.add_node(node) return node - @asyncio.coroutine - def duplicate_node(self, source_node_id, destination_node_id): + async def duplicate_node(self, source_node_id, destination_node_id): """ Duplicate a node @@ -296,8 +291,7 @@ class BaseManager: return destination_node - @asyncio.coroutine - def close_node(self, node_id): + async def close_node(self, node_id): """ Close a node @@ -308,13 +302,12 @@ class BaseManager: node = self.get_node(node_id) if asyncio.iscoroutinefunction(node.close): - yield from node.close() + await node.close() else: node.close() return node - @asyncio.coroutine - def project_closing(self, project): + async def project_closing(self, project): """ Called when a project is about to be closed. @@ -323,8 +316,7 @@ class BaseManager: pass - @asyncio.coroutine - def project_closed(self, project): + async def project_closed(self, project): """ Called when a project is closed. @@ -335,8 +327,7 @@ class BaseManager: if node.id in self._nodes: del self._nodes[node.id] - @asyncio.coroutine - def delete_node(self, node_id): + async def delete_node(self, node_id): """ Delete a node. The node working directory will be destroyed when a commit is received. @@ -347,11 +338,11 @@ class BaseManager: node = None try: node = self.get_node(node_id) - yield from self.close_node(node_id) + await self.close_node(node_id) finally: if node: node.project.emit("node.deleted", node) - yield from node.project.remove_node(node) + await node.project.remove_node(node) if node.id in self._nodes: del self._nodes[node.id] return node @@ -526,8 +517,7 @@ class BaseManager: return relpath return path - @asyncio.coroutine - def list_images(self): + async def list_images(self): """ Return the list of available images for this node type @@ -548,8 +538,7 @@ class BaseManager: return default_images_directory(self._NODE_TYPE) raise NotImplementedError - @asyncio.coroutine - def write_image(self, filename, stream): + async def write_image(self, filename, stream): directory = self.get_images_directory() path = os.path.abspath(os.path.join(directory, *os.path.split(filename))) @@ -563,13 +552,13 @@ class BaseManager: os.makedirs(os.path.dirname(path), exist_ok=True) with open(tmp_path, 'wb') as f: while True: - packet = yield from stream.read(4096) + packet = await stream.read(4096) if not packet: break f.write(packet) os.chmod(tmp_path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC) shutil.move(tmp_path, path) - yield from cancellable_wait_run_in_executor(md5sum, path) + await cancellable_wait_run_in_executor(md5sum, path) except OSError as e: raise aiohttp.web.HTTPConflict(text="Could not write image: {} because {}".format(filename, e)) diff --git a/gns3server/compute/base_node.py b/gns3server/compute/base_node.py index 859e58cd..487ac1c4 100644 --- a/gns3server/compute/base_node.py +++ b/gns3server/compute/base_node.py @@ -273,8 +273,7 @@ class BaseNode: name=self.name, id=self.id)) - @asyncio.coroutine - def delete(self): + async def delete(self): """ Delete the node (including all its files). """ @@ -285,7 +284,7 @@ class BaseNode: directory = self.project.node_working_directory(self) if os.path.exists(directory): try: - yield from wait_run_in_executor(shutil.rmtree, directory, onerror=set_rw) + await wait_run_in_executor(shutil.rmtree, directory, onerror=set_rw) except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not delete the node working directory: {}".format(e)) @@ -296,13 +295,12 @@ class BaseNode: raise NotImplementedError - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stop the node process. """ - yield from self.stop_wrap_console() + await self.stop_wrap_console() self.status = "stopped" def suspend(self): @@ -312,8 +310,7 @@ class BaseNode: raise NotImplementedError - @asyncio.coroutine - def close(self): + async def close(self): """ Close the node process. """ @@ -339,8 +336,7 @@ class BaseNode: self._closed = True return True - @asyncio.coroutine - def start_wrap_console(self): + async def start_wrap_console(self): """ Start a telnet proxy for the console allowing multiple client connected at the same time @@ -351,27 +347,26 @@ class BaseNode: remaining_trial = 60 while True: try: - (reader, writer) = yield from asyncio.open_connection(host="127.0.0.1", port=self._internal_console_port) + (reader, writer) = await asyncio.open_connection(host="127.0.0.1", port=self._internal_console_port) break except (OSError, ConnectionRefusedError) as e: if remaining_trial <= 0: raise e - yield from asyncio.sleep(0.1) + await asyncio.sleep(0.1) remaining_trial -= 1 - yield from AsyncioTelnetServer.write_client_intro(writer, echo=True) + await AsyncioTelnetServer.write_client_intro(writer, echo=True) server = AsyncioTelnetServer(reader=reader, writer=writer, binary=True, echo=True) # warning: this will raise OSError exception if there is a problem... - self._wrapper_telnet_server = yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console) + self._wrapper_telnet_server = await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console) - @asyncio.coroutine - def stop_wrap_console(self): + async def stop_wrap_console(self): """ Stops the telnet proxy. """ if self._wrapper_telnet_server: self._wrapper_telnet_server.close() - yield from self._wrapper_telnet_server.wait_closed() + await self._wrapper_telnet_server.wait_closed() @property def allocate_aux(self): @@ -530,8 +525,7 @@ class BaseNode: path = shutil.which(path) return path - @asyncio.coroutine - def _ubridge_send(self, command): + async def _ubridge_send(self, command): """ Sends a command to uBridge hypervisor. @@ -539,17 +533,16 @@ class BaseNode: """ if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running(): - yield from self._start_ubridge() + await self._start_ubridge() if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running(): raise NodeError("Cannot send command '{}': uBridge is not running".format(command)) try: - yield from self._ubridge_hypervisor.send(command) + await self._ubridge_hypervisor.send(command) except UbridgeError as e: raise UbridgeError("Error while sending command '{}': {}: {}".format(command, e, self._ubridge_hypervisor.read_stdout())) @locking - @asyncio.coroutine - def _start_ubridge(self): + async def _start_ubridge(self): """ Starts uBridge (handles connections to and from this node). """ @@ -569,24 +562,22 @@ class BaseNode: if not self.ubridge: self._ubridge_hypervisor = Hypervisor(self._project, self.ubridge_path, self.working_dir, server_host) log.info("Starting new uBridge hypervisor {}:{}".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port)) - yield from self._ubridge_hypervisor.start() + await self._ubridge_hypervisor.start() if self._ubridge_hypervisor: log.info("Hypervisor {}:{} has successfully started".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port)) - yield from self._ubridge_hypervisor.connect() + await self._ubridge_hypervisor.connect() - @asyncio.coroutine - def _stop_ubridge(self): + async def _stop_ubridge(self): """ Stops uBridge. """ if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running(): log.info("Stopping uBridge hypervisor {}:{}".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port)) - yield from self._ubridge_hypervisor.stop() + await self._ubridge_hypervisor.stop() self._ubridge_hypervisor = None - @asyncio.coroutine - def add_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): + async def add_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): """ Creates an UDP connection in uBridge. @@ -595,43 +586,41 @@ class BaseNode: :param destination_nio: destination NIO instance """ - yield from self._ubridge_send("bridge create {name}".format(name=bridge_name)) + await self._ubridge_send("bridge create {name}".format(name=bridge_name)) if not isinstance(destination_nio, NIOUDP): raise NodeError("Destination NIO is not UDP") - yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, + await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=source_nio.lport, rhost=source_nio.rhost, rport=source_nio.rport)) - yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, + await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=destination_nio.lport, rhost=destination_nio.rhost, rport=destination_nio.rport)) if destination_nio.capturing: - yield from self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name, + await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name, pcap_file=destination_nio.pcap_output_file)) - yield from self._ubridge_send('bridge start {name}'.format(name=bridge_name)) - yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters) + await self._ubridge_send('bridge start {name}'.format(name=bridge_name)) + await self._ubridge_apply_filters(bridge_name, destination_nio.filters) - @asyncio.coroutine - def update_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): + async def update_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): if destination_nio: - yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters) + await self._ubridge_apply_filters(bridge_name, destination_nio.filters) - def ubridge_delete_bridge(self, name): + async def ubridge_delete_bridge(self, name): """ :params name: Delete the bridge with this name """ if self.ubridge: - yield from self._ubridge_send("bridge delete {name}".format(name=name)) + await self._ubridge_send("bridge delete {name}".format(name=name)) - @asyncio.coroutine - def _ubridge_apply_filters(self, bridge_name, filters): + async def _ubridge_apply_filters(self, bridge_name, filters): """ Apply packet filters @@ -639,11 +628,11 @@ class BaseNode: :param filters: Array of filter dictionary """ - yield from self._ubridge_send('bridge reset_packet_filters ' + bridge_name) + await self._ubridge_send('bridge reset_packet_filters ' + bridge_name) for packet_filter in self._build_filter_list(filters): cmd = 'bridge add_packet_filter {} {}'.format(bridge_name, packet_filter) try: - yield from self._ubridge_send(cmd) + await self._ubridge_send(cmd) except UbridgeError as e: match = re.search("Cannot compile filter '(.*)': syntax error", str(e)) if match: @@ -675,8 +664,7 @@ class BaseNode: filter_value=" ".join([str(v) for v in values])) i += 1 - @asyncio.coroutine - def _add_ubridge_ethernet_connection(self, bridge_name, ethernet_interface, block_host_traffic=False): + async def _add_ubridge_ethernet_connection(self, bridge_name, ethernet_interface, block_host_traffic=False): """ Creates a connection with an Ethernet interface in uBridge. @@ -687,7 +675,7 @@ class BaseNode: if sys.platform.startswith("linux") and block_host_traffic is False: # on Linux we use RAW sockets by default excepting if host traffic must be blocked - yield from self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=ethernet_interface)) + await self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=ethernet_interface)) elif sys.platform.startswith("win"): # on Windows we use Winpcap/Npcap windows_interfaces = interfaces() @@ -702,26 +690,26 @@ class BaseNode: npf_id = interface["id"] source_mac = interface["mac_address"] if npf_id: - yield from self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, + await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=npf_id)) else: raise NodeError("Could not find NPF id for interface {}".format(ethernet_interface)) if block_host_traffic: if source_mac: - yield from self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=bridge_name, mac=source_mac)) + await self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=bridge_name, mac=source_mac)) log.info('PCAP filter applied on "{interface}" for source MAC {mac}'.format(interface=ethernet_interface, mac=source_mac)) else: log.warning("Could not block host network traffic on {} (no MAC address found)".format(ethernet_interface)) else: # on other platforms we just rely on the pcap library - yield from self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=ethernet_interface)) + await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=ethernet_interface)) source_mac = None for interface in interfaces(): if interface["name"] == ethernet_interface: source_mac = interface["mac_address"] if source_mac: - yield from self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=bridge_name, mac=source_mac)) + await self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=bridge_name, mac=source_mac)) log.info('PCAP filter applied on "{interface}" for source MAC {mac}'.format(interface=ethernet_interface, mac=source_mac)) def _create_local_udp_tunnel(self): diff --git a/gns3server/compute/builtin/nodes/cloud.py b/gns3server/compute/builtin/nodes/cloud.py index 65065a0e..1db44c1a 100644 --- a/gns3server/compute/builtin/nodes/cloud.py +++ b/gns3server/compute/builtin/nodes/cloud.py @@ -207,58 +207,54 @@ class Cloud(BaseNode): self._ports_mapping = ports - @asyncio.coroutine - def create(self): + async def create(self): """ Creates this cloud. """ - yield from self.start() + await self.start() log.info('Cloud "{name}" [{id}] has been created'.format(name=self._name, id=self._id)) - @asyncio.coroutine - def start(self): + async def start(self): """ Starts this cloud. """ if self.status != "started": if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running(): - yield from self._stop_ubridge() - yield from self._start_ubridge() + await self._stop_ubridge() + await self._start_ubridge() for port_number in self._nios: if self._nios[port_number]: try: - yield from self._add_ubridge_connection(self._nios[port_number], port_number) + await self._add_ubridge_connection(self._nios[port_number], port_number) except (UbridgeError, NodeError) as e: self.status = "stopped" raise e self.status = "started" - @asyncio.coroutine - def close(self): + async def close(self): """ Closes this cloud. """ - if not (yield from super().close()): + if not (await super().close()): return False for nio in self._nios.values(): if nio and isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) - yield from self._stop_ubridge() + await self._stop_ubridge() log.info('Cloud "{name}" [{id}] has been closed'.format(name=self._name, id=self._id)) - @asyncio.coroutine - def _is_wifi_adapter_osx(self, adapter_name): + async def _is_wifi_adapter_osx(self, adapter_name): """ Detects a Wifi adapter on Mac. """ try: - output = yield from gns3server.utils.asyncio.subprocess_check_output("networksetup", "-listallhardwareports") + output = await gns3server.utils.asyncio.subprocess_check_output("networksetup", "-listallhardwareports") except (OSError, subprocess.SubprocessError) as e: log.warning("Could not execute networksetup: {}".format(e)) return False @@ -274,8 +270,7 @@ class Cloud(BaseNode): is_wifi = True return False - @asyncio.coroutine - def _add_ubridge_connection(self, nio, port_number): + async def _add_ubridge_connection(self, nio, port_number): """ Creates a connection in uBridge. @@ -294,19 +289,19 @@ class Cloud(BaseNode): port_number=port_number)) bridge_name = "{}-{}".format(self._id, port_number) - yield from self._ubridge_send("bridge create {name}".format(name=bridge_name)) + await self._ubridge_send("bridge create {name}".format(name=bridge_name)) if not isinstance(nio, NIOUDP): raise NodeError("Source NIO is not UDP") - yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, + await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=nio.lport, rhost=nio.rhost, rport=nio.rport)) - yield from self._ubridge_apply_filters(bridge_name, nio.filters) + await self._ubridge_apply_filters(bridge_name, nio.filters) if port_info["type"] in ("ethernet", "tap"): if sys.platform.startswith("win"): - yield from self._add_ubridge_ethernet_connection(bridge_name, port_info["interface"]) + await self._add_ubridge_ethernet_connection(bridge_name, port_info["interface"]) else: @@ -316,29 +311,28 @@ class Cloud(BaseNode): raise NodeError("Interface '{}' could not be found on this system".format(port_info["interface"])) if sys.platform.startswith("linux"): - yield from self._add_linux_ethernet(port_info, bridge_name) + await self._add_linux_ethernet(port_info, bridge_name) elif sys.platform.startswith("darwin"): - yield from self._add_osx_ethernet(port_info, bridge_name) + await self._add_osx_ethernet(port_info, bridge_name) else: - yield from self._add_windows_ethernet(port_info, bridge_name) + await self._add_windows_ethernet(port_info, bridge_name) elif port_info["type"] == "tap": - yield from self._ubridge_send('bridge add_nio_tap {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"])) + await self._ubridge_send('bridge add_nio_tap {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"])) elif port_info["type"] == "udp": - yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, + await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name, lport=port_info["lport"], rhost=port_info["rhost"], rport=port_info["rport"])) if nio.capturing: - yield from self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name, + await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name, pcap_file=nio.pcap_output_file)) - yield from self._ubridge_send('bridge start {name}'.format(name=bridge_name)) + await self._ubridge_send('bridge start {name}'.format(name=bridge_name)) - @asyncio.coroutine - def _add_linux_ethernet(self, port_info, bridge_name): + async def _add_linux_ethernet(self, port_info, bridge_name): """ Connects an Ethernet interface on Linux using raw sockets. @@ -356,41 +350,38 @@ class Cloud(BaseNode): break i += 1 - yield from self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(name=bridge_name, interface=tap)) - yield from self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(tap=tap, interface=interface)) + await self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(name=bridge_name, interface=tap)) + await self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(tap=tap, interface=interface)) else: - yield from self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=interface)) + await self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=interface)) - @asyncio.coroutine - def _add_osx_ethernet(self, port_info, bridge_name): + async def _add_osx_ethernet(self, port_info, bridge_name): """ Connects an Ethernet interface on OSX using libpcap. """ # Wireless adapters are not well supported by the libpcap on OSX - if (yield from self._is_wifi_adapter_osx(port_info["interface"])): + if (await self._is_wifi_adapter_osx(port_info["interface"])): raise NodeError("Connecting to a Wireless adapter is not supported on Mac OS") if port_info["interface"].startswith("vmnet"): # Use a special NIO to connect to VMware vmnet interfaces on OSX (libpcap doesn't support them) - yield from self._ubridge_send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=bridge_name, + await self._ubridge_send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"])) return if not gns3server.utils.interfaces.has_netmask(port_info["interface"]): raise NodeError("Interface {} has no netmask, interface down?".format(port_info["interface"])) - yield from self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"])) + await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"])) - @asyncio.coroutine - def _add_windows_ethernet(self, port_info, bridge_name): + async def _add_windows_ethernet(self, port_info, bridge_name): """ Connects an Ethernet interface on Windows. """ if not gns3server.utils.interfaces.has_netmask(port_info["interface"]): raise NodeError("Interface {} has no netmask, interface down?".format(port_info["interface"])) - yield from self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"])) + await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"])) - @asyncio.coroutine - def add_nio(self, nio, port_number): + async def add_nio(self, nio, port_number): """ Adds a NIO as new port on this cloud. @@ -406,23 +397,22 @@ class Cloud(BaseNode): nio=nio, port=port_number)) try: - yield from self.start() - yield from self._add_ubridge_connection(nio, port_number) + await self.start() + await self._add_ubridge_connection(nio, port_number) self._nios[port_number] = nio except NodeError as e: self.project.emit("log.error", {"message": str(e)}) - yield from self._stop_ubridge() + await self._stop_ubridge() self.status = "stopped" self._nios[port_number] = nio # Cleanup stuff except UbridgeError as e: self.project.emit("log.error", {"message": str(e)}) - yield from self._stop_ubridge() + await self._stop_ubridge() self.status = "stopped" self._nios[port_number] = nio - @asyncio.coroutine - def update_nio(self, port_number, nio): + async def update_nio(self, port_number, nio): """ Update an nio on this node @@ -432,10 +422,9 @@ class Cloud(BaseNode): bridge_name = "{}-{}".format(self._id, port_number) if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running(): - yield from self._ubridge_apply_filters(bridge_name, nio.filters) + await self._ubridge_apply_filters(bridge_name, nio.filters) - @asyncio.coroutine - def _delete_ubridge_connection(self, port_number): + async def _delete_ubridge_connection(self, port_number): """ Deletes a connection in uBridge. @@ -443,10 +432,9 @@ class Cloud(BaseNode): """ bridge_name = "{}-{}".format(self._id, port_number) - yield from self._ubridge_send("bridge delete {name}".format(name=bridge_name)) + await self._ubridge_send("bridge delete {name}".format(name=bridge_name)) - @asyncio.coroutine - def remove_nio(self, port_number): + async def remove_nio(self, port_number): """ Removes the specified NIO as member of cloud. @@ -469,12 +457,11 @@ class Cloud(BaseNode): del self._nios[port_number] if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running(): - yield from self._delete_ubridge_connection(port_number) - yield from self.start() + await self._delete_ubridge_connection(port_number) + await self.start() return nio - @asyncio.coroutine - def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): + async def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. @@ -496,14 +483,13 @@ class Cloud(BaseNode): raise NodeError("Packet capture is already activated on port {port_number}".format(port_number=port_number)) nio.startPacketCapture(output_file) bridge_name = "{}-{}".format(self._id, port_number) - yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=bridge_name, + await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=bridge_name, output_file=output_file)) log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name, id=self.id, port_number=port_number)) - @asyncio.coroutine - def stop_capture(self, port_number): + async def stop_capture(self, port_number): """ Stops a packet capture. @@ -520,7 +506,7 @@ class Cloud(BaseNode): nio = self._nios[port_number] nio.stopPacketCapture() bridge_name = "{}-{}".format(self._id, port_number) - yield from self._ubridge_send("bridge stop_capture {name}".format(name=bridge_name)) + await self._ubridge_send("bridge stop_capture {name}".format(name=bridge_name)) log.info("Cloud'{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name, id=self.id, diff --git a/gns3server/compute/builtin/nodes/ethernet_hub.py b/gns3server/compute/builtin/nodes/ethernet_hub.py index ebc561ec..6137b588 100644 --- a/gns3server/compute/builtin/nodes/ethernet_hub.py +++ b/gns3server/compute/builtin/nodes/ethernet_hub.py @@ -44,8 +44,7 @@ class EthernetHub(BaseNode): "node_id": self.id, "project_id": self.project.id} - @asyncio.coroutine - def create(self): + async def create(self): """ Creates this hub. """ @@ -53,16 +52,14 @@ class EthernetHub(BaseNode): super().create() log.info('Ethernet hub "{name}" [{id}] has been created'.format(name=self._name, id=self._id)) - @asyncio.coroutine - def delete(self): + async def delete(self): """ Deletes this hub. """ raise NotImplementedError() - @asyncio.coroutine - def add_nio(self, nio, port_number): + async def add_nio(self, nio, port_number): """ Adds a NIO as new port on this hub. @@ -72,8 +69,7 @@ class EthernetHub(BaseNode): raise NotImplementedError() - @asyncio.coroutine - def remove_nio(self, port_number): + async def remove_nio(self, port_number): """ Removes the specified NIO as member of this hub. @@ -84,8 +80,7 @@ class EthernetHub(BaseNode): raise NotImplementedError() - @asyncio.coroutine - def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): + async def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. @@ -96,8 +91,7 @@ class EthernetHub(BaseNode): raise NotImplementedError() - @asyncio.coroutine - def stop_capture(self, port_number): + async def stop_capture(self, port_number): """ Stops a packet capture. diff --git a/gns3server/compute/builtin/nodes/ethernet_switch.py b/gns3server/compute/builtin/nodes/ethernet_switch.py index 523c3d01..69c5b736 100644 --- a/gns3server/compute/builtin/nodes/ethernet_switch.py +++ b/gns3server/compute/builtin/nodes/ethernet_switch.py @@ -44,8 +44,7 @@ class EthernetSwitch(BaseNode): "node_id": self.id, "project_id": self.project.id} - @asyncio.coroutine - def create(self): + async def create(self): """ Creates this switch. """ @@ -53,16 +52,14 @@ class EthernetSwitch(BaseNode): super().create() log.info('Ethernet switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id)) - @asyncio.coroutine - def delete(self): + async def delete(self): """ Deletes this switch. """ raise NotImplementedError() - @asyncio.coroutine - def add_nio(self, nio, port_number): + async def add_nio(self, nio, port_number): """ Adds a NIO as new port on this switch. @@ -72,8 +69,7 @@ class EthernetSwitch(BaseNode): raise NotImplementedError() - @asyncio.coroutine - def remove_nio(self, port_number): + async def remove_nio(self, port_number): """ Removes the specified NIO as member of this switch. @@ -84,8 +80,7 @@ class EthernetSwitch(BaseNode): raise NotImplementedError() - @asyncio.coroutine - def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): + async def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. @@ -96,8 +91,7 @@ class EthernetSwitch(BaseNode): raise NotImplementedError() - @asyncio.coroutine - def stop_capture(self, port_number): + async def stop_capture(self, port_number): """ Stops a packet capture. diff --git a/gns3server/compute/docker/__init__.py b/gns3server/compute/docker/__init__.py index 53948a52..4d8af0f6 100644 --- a/gns3server/compute/docker/__init__.py +++ b/gns3server/compute/docker/__init__.py @@ -54,14 +54,13 @@ class Docker(BaseManager): self._session = None self._api_version = DOCKER_MINIMUM_API_VERSION - @asyncio.coroutine - def _check_connection(self): + async def _check_connection(self): if not self._connected: try: self._connected = True connector = self.connector() - version = yield from self.query("GET", "version") + version = await self.query("GET", "version") except (aiohttp.ClientOSError, FileNotFoundError): self._connected = False raise DockerError("Can't connect to docker daemon") @@ -88,16 +87,14 @@ class Docker(BaseManager): raise DockerError("Can't connect to docker daemon") return self._connector - @asyncio.coroutine - def unload(self): + async def unload(self): - yield from super().unload() + await super().unload() if self._connected: if self._connector and not self._connector.closed: self._connector.close() - @asyncio.coroutine - def query(self, method, path, data={}, params={}): + async def query(self, method, path, data={}, params={}): """ Makes a query to the Docker daemon and decode the request @@ -107,8 +104,8 @@ class Docker(BaseManager): :param params: Parameters added as a query arg """ - response = yield from self.http_query(method, path, data=data, params=params) - body = yield from response.read() + response = await self.http_query(method, path, data=data, params=params) + body = await response.read() if body and len(body): if response.headers['CONTENT-TYPE'] == 'application/json': body = json.loads(body.decode("utf-8")) @@ -117,8 +114,7 @@ class Docker(BaseManager): log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body) return body - @asyncio.coroutine - def http_query(self, method, path, data={}, params={}, timeout=300): + async def http_query(self, method, path, data={}, params={}, timeout=300): """ Makes a query to the docker daemon @@ -140,11 +136,11 @@ class Docker(BaseManager): url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path try: if path != "version": # version is use by check connection - yield from self._check_connection() + await self._check_connection() if self._session is None or self._session.closed: connector = self.connector() self._session = aiohttp.ClientSession(connector=connector) - response = yield from self._session.request( + response = await self._session.request( method, url, params=params, @@ -157,7 +153,7 @@ class Docker(BaseManager): except (asyncio.TimeoutError): raise DockerError("Docker timeout " + method + " " + path) if response.status >= 300: - body = yield from response.read() + body = await response.read() try: body = json.loads(body.decode("utf-8"))["message"] except ValueError: @@ -171,8 +167,7 @@ class Docker(BaseManager): raise DockerError("Docker has returned an error: {} {}".format(response.status, body)) return response - @asyncio.coroutine - def websocket_query(self, path, params={}): + async def websocket_query(self, path, params={}): """ Opens a websocket connection @@ -182,14 +177,13 @@ class Docker(BaseManager): """ url = "http://docker/v" + self._api_version + "/" + path - connection = yield from self._session.ws_connect(url, + connection = await self._session.ws_connect(url, origin="http://docker", autoping=True) return connection @locking - @asyncio.coroutine - def pull_image(self, image, progress_callback=None): + async def pull_image(self, image, progress_callback=None): """ Pulls an image from the Docker repository @@ -198,19 +192,19 @@ class Docker(BaseManager): """ try: - yield from self.query("GET", "images/{}/json".format(image)) + await self.query("GET", "images/{}/json".format(image)) return # We already have the image skip the download except DockerHttp404Error: pass if progress_callback: progress_callback("Pulling '{}' from docker hub".format(image)) - response = yield from self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None) + response = await self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None) # The pull api will stream status via an HTTP JSON stream content = "" while True: try: - chunk = yield from response.content.read(1024) + chunk = await response.content.read(1024) except aiohttp.ServerDisconnectedError: log.error("Disconnected from server while pulling Docker image '{}' from docker hub".format(image)) break @@ -234,8 +228,7 @@ class Docker(BaseManager): if progress_callback: progress_callback("Success pulling image {}".format(image)) - @asyncio.coroutine - def list_images(self): + async def list_images(self): """ Gets Docker image list. @@ -244,7 +237,7 @@ class Docker(BaseManager): """ images = [] - for image in (yield from self.query("GET", "images/json", params={"all": 0})): + for image in (await self.query("GET", "images/json", params={"all": 0})): if image['RepoTags']: for tag in image['RepoTags']: if tag != ":": diff --git a/gns3server/compute/docker/docker_vm.py b/gns3server/compute/docker/docker_vm.py index 78741a80..9a6c06d7 100644 --- a/gns3server/compute/docker/docker_vm.py +++ b/gns3server/compute/docker/docker_vm.py @@ -29,7 +29,6 @@ import os from gns3server.utils.asyncio.telnet_server import AsyncioTelnetServer from gns3server.utils.asyncio.raw_command_server import AsyncioRawCommandServer from gns3server.utils.asyncio import wait_for_file_creation -from gns3server.utils.asyncio import asyncio_ensure_future from gns3server.utils.asyncio import monitor_process from gns3server.utils.get_resource import get_resource @@ -195,8 +194,7 @@ class DockerVM(BaseNode): def extra_hosts(self, extra_hosts): self._extra_hosts = extra_hosts - @asyncio.coroutine - def _get_container_state(self): + async def _get_container_state(self): """ Returns the container state (e.g. running, paused etc.) @@ -205,7 +203,7 @@ class DockerVM(BaseNode): """ try: - result = yield from self.manager.query("GET", "containers/{}/json".format(self._cid)) + result = await self.manager.query("GET", "containers/{}/json".format(self._cid)) except DockerError: return "exited" @@ -215,13 +213,12 @@ class DockerVM(BaseNode): return "running" return "exited" - @asyncio.coroutine - def _get_image_information(self): + async def _get_image_information(self): """ :returns: Dictionary information about the container image """ - result = yield from self.manager.query("GET", "images/{}/json".format(self._image)) + result = await self.manager.query("GET", "images/{}/json".format(self._image)) return result def _mount_binds(self, image_info): @@ -288,18 +285,17 @@ class DockerVM(BaseNode): # iface eth{adapter} inet dhcp""".format(adapter=adapter)) return path - @asyncio.coroutine - def create(self): + async def create(self): """ Creates the Docker container. """ try: - image_infos = yield from self._get_image_information() + image_infos = await self._get_image_information() except DockerHttp404Error: log.info("Image '{}' is missing, pulling it from Docker hub...".format(self._image)) - yield from self.pull_image(self._image) - image_infos = yield from self._get_image_information() + await self.pull_image(self._image) + image_infos = await self._get_image_information() if image_infos is None: raise DockerError("Cannot get information for image '{}', please try again.".format(self._image)) @@ -359,7 +355,7 @@ class DockerVM(BaseNode): params["Env"].append(formatted) if self._console_type == "vnc": - yield from self._start_vnc() + await self._start_vnc() params["Env"].append("QT_GRAPHICSSYSTEM=native") # To fix a Qt issue: https://github.com/GNS3/gns3-server/issues/556 params["Env"].append("DISPLAY=:{}".format(self._display)) params["HostConfig"]["Binds"].append("/tmp/.X11-unix/:/tmp/.X11-unix/") @@ -369,7 +365,7 @@ class DockerVM(BaseNode): if extra_hosts: params["Env"].append("GNS3_EXTRA_HOSTS={}".format(extra_hosts)) - result = yield from self.manager.query("POST", "containers/create", data=params) + result = await self.manager.query("POST", "containers/create", data=params) self._cid = result['Id'] log.info("Docker container '{name}' [{id}] created".format(name=self._name, id=self._id)) return True @@ -393,8 +389,7 @@ class DockerVM(BaseNode): raise DockerError("Can't apply `ExtraHosts`, wrong format: {}".format(extra_hosts)) return "\n".join(["{}\t{}".format(h[1], h[0]) for h in hosts]) - @asyncio.coroutine - def update(self): + async def update(self): """ Destroy an recreate the container with the new settings """ @@ -402,66 +397,65 @@ class DockerVM(BaseNode): # We need to save the console and state and restore it console = self.console aux = self.aux - state = yield from self._get_container_state() + state = await self._get_container_state() # reset the docker container, but don't release the NIO UDP ports - yield from self.reset(False) - yield from self.create() + await self.reset(False) + await self.create() self.console = console self.aux = aux if state == "running": - yield from self.start() + await self.start() - @asyncio.coroutine - def start(self): + async def start(self): """ Starts this Docker container. """ try: - state = yield from self._get_container_state() + state = await self._get_container_state() except DockerHttp404Error: raise DockerError("Docker container '{name}' with ID {cid} does not exist or is not ready yet. Please try again in a few seconds.".format(name=self.name, cid=self._cid)) if state == "paused": - yield from self.unpause() + await self.unpause() elif state == "running": return else: if self._console_type == "vnc" and not self._x11vnc_process: # start the x11vnc process in case it had previously crashed - self._x11vnc_process = yield from asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-shared", "-geometry", self._console_resolution, "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-rfbportv6", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host) + self._x11vnc_process = await asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-shared", "-geometry", self._console_resolution, "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-rfbportv6", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host) - yield from self._clean_servers() + await self._clean_servers() - yield from self.manager.query("POST", "containers/{}/start".format(self._cid)) - self._namespace = yield from self._get_namespace() + await self.manager.query("POST", "containers/{}/start".format(self._cid)) + self._namespace = await self._get_namespace() - yield from self._start_ubridge() + await self._start_ubridge() for adapter_number in range(0, self.adapters): nio = self._ethernet_adapters[adapter_number].get_nio(0) - with (yield from self.manager.ubridge_lock): + async with self.manager.ubridge_lock: try: - yield from self._add_ubridge_connection(nio, adapter_number) + await self._add_ubridge_connection(nio, adapter_number) except UbridgeNamespaceError: log.error("Container %s failed to start", self.name) - yield from self.stop() + await self.stop() # The container can crash soon after the start, this means we can not move the interface to the container namespace - logdata = yield from self._get_log() + logdata = await self._get_log() for line in logdata.split('\n'): log.error(line) raise DockerError(logdata) if self.console_type == "telnet": - yield from self._start_console() + await self._start_console() elif self.console_type == "http" or self.console_type == "https": - yield from self._start_http() + await self._start_http() if self.allocate_aux: - yield from self._start_aux() + await self._start_aux() self.status = "started" log.info("Docker container '{name}' [{image}] started listen for {console_type} on {console}".format(name=self._name, @@ -469,8 +463,7 @@ class DockerVM(BaseNode): console=self.console, console_type=self.console_type)) - @asyncio.coroutine - def _start_aux(self): + async def _start_aux(self): """ Start an auxiliary console """ @@ -478,7 +471,7 @@ class DockerVM(BaseNode): # We can not use the API because docker doesn't expose a websocket api for exec # https://github.com/GNS3/gns3-gui/issues/1039 try: - process = yield from asyncio.subprocess.create_subprocess_exec( + process = await asyncio.subprocess.create_subprocess_exec( "docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "script", "-qfc", "while true; do TERM=vt100 /gns3/bin/busybox sh; done", "/dev/null", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, @@ -487,29 +480,28 @@ class DockerVM(BaseNode): raise DockerError("Could not start auxiliary console process: {}".format(e)) server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=True, echo=True) try: - self._telnet_servers.append((yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux))) + self._telnet_servers.append((await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux))) except OSError as e: raise DockerError("Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.aux, e)) log.debug("Docker container '%s' started listen for auxilary telnet on %d", self.name, self.aux) - @asyncio.coroutine - def _fix_permissions(self): + async def _fix_permissions(self): """ Because docker run as root we need to fix permission and ownership to allow user to interact with it from their filesystem and do operation like file delete """ - state = yield from self._get_container_state() + state = await self._get_container_state() if state == "stopped" or state == "exited": # We need to restart it to fix permissions - yield from self.manager.query("POST", "containers/{}/start".format(self._cid)) + await self.manager.query("POST", "containers/{}/start".format(self._cid)) for volume in self._volumes: log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format( name=self._name, image=self._image, path=volume)) try: - process = yield from asyncio.subprocess.create_subprocess_exec( + process = await asyncio.subprocess.create_subprocess_exec( "docker", "exec", self._cid, @@ -526,10 +518,9 @@ class DockerVM(BaseNode): ) except OSError as e: raise DockerError("Could not fix permissions for {}: {}".format(volume, e)) - yield from process.wait() + await process.wait() - @asyncio.coroutine - def _start_vnc(self): + async def _start_vnc(self): """ Starts a VNC server for this container """ @@ -537,11 +528,11 @@ class DockerVM(BaseNode): self._display = self._get_free_display_port() if shutil.which("Xvfb") is None or shutil.which("x11vnc") is None: raise DockerError("Please install Xvfb and x11vnc before using VNC support") - self._xvfb_process = yield from asyncio.create_subprocess_exec("Xvfb", "-nolisten", "tcp", ":{}".format(self._display), "-screen", "0", self._console_resolution + "x16") + self._xvfb_process = await asyncio.create_subprocess_exec("Xvfb", "-nolisten", "tcp", ":{}".format(self._display), "-screen", "0", self._console_resolution + "x16") # We pass a port for TCPV6 due to a crash in X11VNC if not here: https://github.com/GNS3/gns3-server/issues/569 - self._x11vnc_process = yield from asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-shared", "-geometry", self._console_resolution, "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-rfbportv6", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host) + self._x11vnc_process = await asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-shared", "-geometry", self._console_resolution, "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-rfbportv6", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host) x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display)) - yield from wait_for_file_creation(x11_socket) + await wait_for_file_creation(x11_socket) # sometimes the x11vnc process can crash monitor_process(self._x11vnc_process, self._x11vnc_callback) @@ -557,8 +548,7 @@ class DockerVM(BaseNode): self.project.emit("log.error", {"message": "The x11vnc process has stopped with return code {} for node '{}'. Please restart this node.".format(returncode, self.name)}) self._x11vnc_process = None - @asyncio.coroutine - def _start_http(self): + async def _start_http(self): """ Starts an HTTP tunnel to container localhost. It's not perfect but the only way we have to inject network packet is using nc. @@ -577,10 +567,9 @@ class DockerVM(BaseNode): ':{}'.format(self.console).encode(), ) ]) - self._telnet_servers.append((yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console))) + self._telnet_servers.append((await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console))) - @asyncio.coroutine - def _start_console(self): + async def _start_console(self): """ Starts streaming the console via telnet """ @@ -593,8 +582,7 @@ class DockerVM(BaseNode): def write(self, data): self._data += data - @asyncio.coroutine - def drain(self): + async def drain(self): if not self.ws.closed: self.ws.send_bytes(self._data) self._data = b"" @@ -604,19 +592,18 @@ class DockerVM(BaseNode): telnet = AsyncioTelnetServer(reader=output_stream, writer=input_stream, echo=True) try: - self._telnet_servers.append((yield from asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console))) + self._telnet_servers.append((await asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console))) except OSError as e: raise DockerError("Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e)) - self._console_websocket = yield from self.manager.websocket_query("containers/{}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1".format(self._cid)) + self._console_websocket = await self.manager.websocket_query("containers/{}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1".format(self._cid)) input_stream.ws = self._console_websocket output_stream.feed_data(self.name.encode() + b" console is now available... Press RETURN to get started.\r\n") - asyncio_ensure_future(self._read_console_output(self._console_websocket, output_stream)) + asyncio.ensure_future(self._read_console_output(self._console_websocket, output_stream)) - @asyncio.coroutine - def _read_console_output(self, ws, out): + async def _read_console_output(self, ws, out): """ Reads Websocket and forward it to the telnet @@ -625,7 +612,7 @@ class DockerVM(BaseNode): """ while True: - msg = yield from ws.receive() + msg = await ws.receive() if msg.tp == aiohttp.WSMsgType.text: out.feed_data(msg.data.encode()) elif msg.tp == aiohttp.WSMsgType.BINARY: @@ -636,10 +623,9 @@ class DockerVM(BaseNode): out.feed_eof() ws.close() break - yield from self.stop() + await self.stop() - @asyncio.coroutine - def is_running(self): + async def is_running(self): """ Checks if the container is running. @@ -647,25 +633,23 @@ class DockerVM(BaseNode): :rtype: bool """ - state = yield from self._get_container_state() + state = await self._get_container_state() if state == "running": return True if self.status == "started": # The container crashed we need to clean - yield from self.stop() + await self.stop() return False - @asyncio.coroutine - def restart(self): + async def restart(self): """ Restart this Docker container. """ - yield from self.manager.query("POST", "containers/{}/restart".format(self._cid)) + await self.manager.query("POST", "containers/{}/restart".format(self._cid)) log.info("Docker container '{name}' [{image}] restarted".format( name=self._name, image=self._image)) - @asyncio.coroutine - def _clean_servers(self): + async def _clean_servers(self): """ Clean the list of running console servers """ @@ -673,34 +657,33 @@ class DockerVM(BaseNode): if len(self._telnet_servers) > 0: for telnet_server in self._telnet_servers: telnet_server.close() - yield from telnet_server.wait_closed() + await telnet_server.wait_closed() self._telnet_servers = [] - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops this Docker container. """ try: - yield from self._clean_servers() - yield from self._stop_ubridge() + await self._clean_servers() + await self._stop_ubridge() try: - state = yield from self._get_container_state() + state = await self._get_container_state() except DockerHttp404Error: self.status = "stopped" return if state == "paused": - yield from self.unpause() + await self.unpause() - yield from self._fix_permissions() - state = yield from self._get_container_state() + await self._fix_permissions() + state = await self._get_container_state() if state != "stopped" or state != "exited": # t=5 number of seconds to wait before killing the container try: - yield from self.manager.query("POST", "containers/{}/stop".format(self._cid), params={"t": 5}) + await self.manager.query("POST", "containers/{}/stop".format(self._cid), params={"t": 5}) log.info("Docker container '{name}' [{image}] stopped".format( name=self._name, image=self._image)) except DockerHttp304Error: @@ -712,56 +695,52 @@ class DockerVM(BaseNode): return self.status = "stopped" - @asyncio.coroutine - def pause(self): + async def pause(self): """ Pauses this Docker container. """ - yield from self.manager.query("POST", "containers/{}/pause".format(self._cid)) + await self.manager.query("POST", "containers/{}/pause".format(self._cid)) self.status = "suspended" log.info("Docker container '{name}' [{image}] paused".format(name=self._name, image=self._image)) - @asyncio.coroutine - def unpause(self): + async def unpause(self): """ Unpauses this Docker container. """ - yield from self.manager.query("POST", "containers/{}/unpause".format(self._cid)) + await self.manager.query("POST", "containers/{}/unpause".format(self._cid)) self.status = "started" log.info("Docker container '{name}' [{image}] unpaused".format(name=self._name, image=self._image)) - @asyncio.coroutine - def close(self): + async def close(self): """ Closes this Docker container. """ self._closing = True - if not (yield from super().close()): + if not (await super().close()): return False - yield from self.reset() + await self.reset() - @asyncio.coroutine - def reset(self, release_nio_udp_ports=True): + async def reset(self, release_nio_udp_ports=True): try: - state = yield from self._get_container_state() + state = await self._get_container_state() if state == "paused" or state == "running": - yield from self.stop() + await self.stop() if self.console_type == "vnc": if self._x11vnc_process: try: self._x11vnc_process.terminate() - yield from self._x11vnc_process.wait() + await self._x11vnc_process.wait() except ProcessLookupError: pass if self._xvfb_process: try: self._xvfb_process.terminate() - yield from self._xvfb_process.wait() + await self._xvfb_process.wait() except ProcessLookupError: pass @@ -776,7 +755,7 @@ class DockerVM(BaseNode): # v – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false. # force - 1/True/true or 0/False/false, Kill then remove the container. Default false. try: - yield from self.manager.query("DELETE", "containers/{}".format(self._cid), params={"force": 1, "v": 1}) + await self.manager.query("DELETE", "containers/{}".format(self._cid), params={"force": 1, "v": 1}) except DockerError: pass log.info("Docker container '{name}' [{image}] removed".format( @@ -793,8 +772,7 @@ class DockerVM(BaseNode): log.debug("Docker error when closing: {}".format(str(e))) return - @asyncio.coroutine - def _add_ubridge_connection(self, nio, adapter_number): + async def _add_ubridge_connection(self, nio, adapter_number): """ Creates a connection in uBridge. @@ -816,44 +794,41 @@ class DockerVM(BaseNode): raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name, adapter_number=adapter_number)) bridge_name = 'bridge{}'.format(adapter_number) - yield from self._ubridge_send('bridge create {}'.format(bridge_name)) + await self._ubridge_send('bridge create {}'.format(bridge_name)) self._bridges.add(bridge_name) - yield from self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number, + await self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number, hostif=adapter.host_ifc)) log.debug("Move container %s adapter %s to namespace %s", self.name, adapter.host_ifc, self._namespace) try: - yield from self._ubridge_send('docker move_to_ns {ifc} {ns} eth{adapter}'.format(ifc=adapter.host_ifc, + await self._ubridge_send('docker move_to_ns {ifc} {ns} eth{adapter}'.format(ifc=adapter.host_ifc, ns=self._namespace, adapter=adapter_number)) except UbridgeError as e: raise UbridgeNamespaceError(e) if nio: - yield from self._connect_nio(adapter_number, nio) + await self._connect_nio(adapter_number, nio) - @asyncio.coroutine - def _get_namespace(self): + async def _get_namespace(self): - result = yield from self.manager.query("GET", "containers/{}/json".format(self._cid)) + result = await self.manager.query("GET", "containers/{}/json".format(self._cid)) return int(result['State']['Pid']) - @asyncio.coroutine - def _connect_nio(self, adapter_number, nio): + async def _connect_nio(self, adapter_number, nio): bridge_name = 'bridge{}'.format(adapter_number) - yield from self._ubridge_send('bridge add_nio_udp {bridge_name} {lport} {rhost} {rport}'.format(bridge_name=bridge_name, + await self._ubridge_send('bridge add_nio_udp {bridge_name} {lport} {rhost} {rport}'.format(bridge_name=bridge_name, lport=nio.lport, rhost=nio.rhost, rport=nio.rport)) if nio.capturing: - yield from self._ubridge_send('bridge start_capture {bridge_name} "{pcap_file}"'.format(bridge_name=bridge_name, + await self._ubridge_send('bridge start_capture {bridge_name} "{pcap_file}"'.format(bridge_name=bridge_name, pcap_file=nio.pcap_output_file)) - yield from self._ubridge_send('bridge start {bridge_name}'.format(bridge_name=bridge_name)) - yield from self._ubridge_apply_filters(bridge_name, nio.filters) + await self._ubridge_send('bridge start {bridge_name}'.format(bridge_name=bridge_name)) + await self._ubridge_apply_filters(bridge_name, nio.filters) - @asyncio.coroutine - def adapter_add_nio_binding(self, adapter_number, nio): + async def adapter_add_nio_binding(self, adapter_number, nio): """ Adds an adapter NIO binding. @@ -868,7 +843,7 @@ class DockerVM(BaseNode): adapter_number=adapter_number)) if self.status == "started" and self.ubridge: - yield from self._connect_nio(adapter_number, nio) + await self._connect_nio(adapter_number, nio) adapter.add_nio(0, nio) log.info("Docker container '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(name=self.name, @@ -876,8 +851,7 @@ class DockerVM(BaseNode): nio=nio, adapter_number=adapter_number)) - @asyncio.coroutine - def adapter_update_nio_binding(self, adapter_number, nio): + async def adapter_update_nio_binding(self, adapter_number, nio): """ Update a port NIO binding. @@ -888,10 +862,9 @@ class DockerVM(BaseNode): if self.ubridge: bridge_name = 'bridge{}'.format(adapter_number) if bridge_name in self._bridges: - yield from self._ubridge_apply_filters(bridge_name, nio.filters) + await self._ubridge_apply_filters(bridge_name, nio.filters) - @asyncio.coroutine - def adapter_remove_nio_binding(self, adapter_number): + async def adapter_remove_nio_binding(self, adapter_number): """ Removes an adapter NIO binding. @@ -909,8 +882,8 @@ class DockerVM(BaseNode): if self.ubridge: nio = adapter.get_nio(0) bridge_name = 'bridge{}'.format(adapter_number) - yield from self._ubridge_send("bridge stop {}".format(bridge_name)) - yield from self._ubridge_send('bridge remove_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number, + await self._ubridge_send("bridge stop {}".format(bridge_name)) + await self._ubridge_send('bridge remove_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number, lport=nio.lport, rhost=nio.rhost, rport=nio.rport)) @@ -952,18 +925,16 @@ class DockerVM(BaseNode): id=self._id, adapters=adapters)) - @asyncio.coroutine - def pull_image(self, image): + async def pull_image(self, image): """ Pulls an image from Docker repository """ def callback(msg): self.project.emit("log.info", {"message": msg}) - yield from self.manager.pull_image(image, progress_callback=callback) + await self.manager.pull_image(image, progress_callback=callback) - @asyncio.coroutine - def _start_ubridge_capture(self, adapter_number, output_file): + async def _start_ubridge_capture(self, adapter_number, output_file): """ Starts a packet capture in uBridge. @@ -974,10 +945,9 @@ class DockerVM(BaseNode): adapter = "bridge{}".format(adapter_number) if not self.ubridge: raise DockerError("Cannot start the packet capture: uBridge is not running") - yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=adapter, output_file=output_file)) + await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=adapter, output_file=output_file)) - @asyncio.coroutine - def _stop_ubridge_capture(self, adapter_number): + async def _stop_ubridge_capture(self, adapter_number): """ Stops a packet capture in uBridge. @@ -987,10 +957,9 @@ class DockerVM(BaseNode): adapter = "bridge{}".format(adapter_number) if not self.ubridge: raise DockerError("Cannot stop the packet capture: uBridge is not running") - yield from self._ubridge_send("bridge stop_capture {name}".format(name=adapter)) + await self._ubridge_send("bridge stop_capture {name}".format(name=adapter)) - @asyncio.coroutine - def start_capture(self, adapter_number, output_file): + async def start_capture(self, adapter_number, output_file): """ Starts a packet capture. @@ -1015,13 +984,13 @@ class DockerVM(BaseNode): nio.startPacketCapture(output_file) if self.status == "started" and self.ubridge: - yield from self._start_ubridge_capture(adapter_number, output_file) + await self._start_ubridge_capture(adapter_number, output_file) log.info("Docker VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number)) - def stop_capture(self, adapter_number): + async def stop_capture(self, adapter_number): """ Stops a packet capture. @@ -1042,28 +1011,26 @@ class DockerVM(BaseNode): nio.stopPacketCapture() if self.status == "started" and self.ubridge: - yield from self._stop_ubridge_capture(adapter_number) + await self._stop_ubridge_capture(adapter_number) log.info("Docker VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number)) - @asyncio.coroutine - def _get_log(self): + async def _get_log(self): """ Returns the log from the container :returns: string """ - result = yield from self.manager.query("GET", "containers/{}/logs".format(self._cid), params={"stderr": 1, "stdout": 1}) + result = await self.manager.query("GET", "containers/{}/logs".format(self._cid), params={"stderr": 1, "stdout": 1}) return result - @asyncio.coroutine - def delete(self): + async def delete(self): """ Deletes the VM (including all its files). """ - yield from self.close() - yield from super().delete() + await self.close() + await super().delete() diff --git a/gns3server/compute/dynamips/__init__.py b/gns3server/compute/dynamips/__init__.py index 140c2af8..87f65d14 100644 --- a/gns3server/compute/dynamips/__init__.py +++ b/gns3server/compute/dynamips/__init__.py @@ -34,9 +34,8 @@ import re log = logging.getLogger(__name__) from gns3server.utils.interfaces import interfaces, is_interface_up -from gns3server.utils.asyncio import wait_run_in_executor, asyncio_ensure_future +from gns3server.utils.asyncio import wait_run_in_executor from gns3server.utils import parse_version -from gns3server.utils.asyncio import asyncio_ensure_future from uuid import uuid4 from ..base_manager import BaseManager from ..port_manager import PortManager @@ -166,17 +165,16 @@ class Dynamips(BaseManager): if dynamips_id in self._dynamips_ids[project_id]: self._dynamips_ids[project_id].remove(dynamips_id) - @asyncio.coroutine - def unload(self): + async def unload(self): - yield from BaseManager.unload(self) + await BaseManager.unload(self) tasks = [] for device in self._devices.values(): - tasks.append(asyncio_ensure_future(device.hypervisor.stop())) + tasks.append(asyncio.ensure_future(device.hypervisor.stop())) if tasks: - done, _ = yield from asyncio.wait(tasks) + done, _ = await asyncio.wait(tasks) for future in done: try: future.result() @@ -184,37 +182,35 @@ class Dynamips(BaseManager): log.error("Could not stop device hypervisor {}".format(e), exc_info=1) continue - @asyncio.coroutine - def project_closing(self, project): + async def project_closing(self, project): """ Called when a project is about to be closed. :param project: Project instance """ - yield from super().project_closing(project) + await super().project_closing(project) # delete the Dynamips devices corresponding to the project tasks = [] for device in self._devices.values(): if device.project.id == project.id: - tasks.append(asyncio_ensure_future(device.delete())) + tasks.append(asyncio.ensure_future(device.delete())) if tasks: - done, _ = yield from asyncio.wait(tasks) + done, _ = await asyncio.wait(tasks) for future in done: try: future.result() except (Exception, GeneratorExit) as e: log.error("Could not delete device {}".format(e), exc_info=1) - @asyncio.coroutine - def project_closed(self, project): + async def project_closed(self, project): """ Called when a project is closed. :param project: Project instance """ - yield from super().project_closed(project) + await super().project_closed(project) # delete useless Dynamips files project_dir = project.module_working_path(self.module_name.lower()) @@ -230,7 +226,7 @@ class Dynamips(BaseManager): log.debug("Deleting file {}".format(file)) if file in self._ghost_files: self._ghost_files.remove(file) - yield from wait_run_in_executor(os.remove, file) + await wait_run_in_executor(os.remove, file) except OSError as e: log.warning("Could not delete file {}: {}".format(file, e)) continue @@ -267,8 +263,7 @@ class Dynamips(BaseManager): self._dynamips_path = dynamips_path return dynamips_path - @asyncio.coroutine - def start_new_hypervisor(self, working_dir=None): + async def start_new_hypervisor(self, working_dir=None): """ Creates a new Dynamips process and start it. @@ -306,27 +301,25 @@ class Dynamips(BaseManager): hypervisor = Hypervisor(self._dynamips_path, working_dir, server_host, port, port_manager.console_host) log.info("Creating new hypervisor {}:{} with working directory {}".format(hypervisor.host, hypervisor.port, working_dir)) - yield from hypervisor.start() + await hypervisor.start() log.info("Hypervisor {}:{} has successfully started".format(hypervisor.host, hypervisor.port)) - yield from hypervisor.connect() + await hypervisor.connect() if parse_version(hypervisor.version) < parse_version('0.2.11'): raise DynamipsError("Dynamips version must be >= 0.2.11, detected version is {}".format(hypervisor.version)) return hypervisor - @asyncio.coroutine - def ghost_ios_support(self, vm): + async def ghost_ios_support(self, vm): ghost_ios_support = self.config.get_section_config("Dynamips").getboolean("ghost_ios_support", True) if ghost_ios_support: - with (yield from Dynamips._ghost_ios_lock): + async with Dynamips._ghost_ios_lock: try: - yield from self._set_ghost_ios(vm) + await self._set_ghost_ios(vm) except GeneratorExit: log.warning("Could not create ghost IOS image {} (GeneratorExit)".format(vm.name)) - @asyncio.coroutine - def create_nio(self, node, nio_settings): + async def create_nio(self, node, nio_settings): """ Creates a new NIO. @@ -394,11 +387,10 @@ class Dynamips(BaseManager): else: raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_settings["type"])) - yield from nio.create() + await nio.create() return nio - @asyncio.coroutine - def _set_ghost_ios(self, vm): + async def _set_ghost_ios(self, vm): """ Manages Ghost IOS support. @@ -421,29 +413,28 @@ class Dynamips(BaseManager): ghost_id = str(uuid4()) ghost = Router("ghost-" + ghost_file, ghost_id, vm.project, vm.manager, platform=vm.platform, hypervisor=vm.hypervisor, ghost_flag=True) try: - yield from ghost.create() - yield from ghost.set_image(vm.image) - yield from ghost.set_ghost_status(1) - yield from ghost.set_ghost_file(ghost_file_path) - yield from ghost.set_ram(vm.ram) + await ghost.create() + await ghost.set_image(vm.image) + await ghost.set_ghost_status(1) + await ghost.set_ghost_file(ghost_file_path) + await ghost.set_ram(vm.ram) try: - yield from ghost.start() - yield from ghost.stop() + await ghost.start() + await ghost.stop() self._ghost_files.add(ghost_file_path) except DynamipsError: raise finally: - yield from ghost.clean_delete() + await ghost.clean_delete() except DynamipsError as e: log.warning("Could not create ghost instance: {}".format(e)) if vm.ghost_file != ghost_file and os.path.isfile(ghost_file_path): # set the ghost file to the router - yield from vm.set_ghost_status(2) - yield from vm.set_ghost_file(ghost_file_path) + await vm.set_ghost_status(2) + await vm.set_ghost_file(ghost_file_path) - @asyncio.coroutine - def update_vm_settings(self, vm, settings): + async def update_vm_settings(self, vm, settings): """ Updates the VM settings. @@ -455,23 +446,23 @@ class Dynamips(BaseManager): if hasattr(vm, name) and getattr(vm, name) != value: if hasattr(vm, "set_{}".format(name)): setter = getattr(vm, "set_{}".format(name)) - yield from setter(value) + await setter(value) elif name.startswith("slot") and value in ADAPTER_MATRIX: slot_id = int(name[-1]) adapter_name = value adapter = ADAPTER_MATRIX[adapter_name]() try: if vm.slots[slot_id] and not isinstance(vm.slots[slot_id], type(adapter)): - yield from vm.slot_remove_binding(slot_id) + await vm.slot_remove_binding(slot_id) if not isinstance(vm.slots[slot_id], type(adapter)): - yield from vm.slot_add_binding(slot_id, adapter) + await vm.slot_add_binding(slot_id, adapter) except IndexError: raise DynamipsError("Slot {} doesn't exist on this router".format(slot_id)) elif name.startswith("slot") and (value is None or value is ""): slot_id = int(name[-1]) try: if vm.slots[slot_id]: - yield from vm.slot_remove_binding(slot_id) + await vm.slot_remove_binding(slot_id) except IndexError: raise DynamipsError("Slot {} doesn't exist on this router".format(slot_id)) elif name.startswith("wic") and value in WIC_MATRIX: @@ -480,32 +471,31 @@ class Dynamips(BaseManager): wic = WIC_MATRIX[wic_name]() try: if vm.slots[0].wics[wic_slot_id] and not isinstance(vm.slots[0].wics[wic_slot_id], type(wic)): - yield from vm.uninstall_wic(wic_slot_id) + await vm.uninstall_wic(wic_slot_id) if not isinstance(vm.slots[0].wics[wic_slot_id], type(wic)): - yield from vm.install_wic(wic_slot_id, wic) + await vm.install_wic(wic_slot_id, wic) except IndexError: raise DynamipsError("WIC slot {} doesn't exist on this router".format(wic_slot_id)) elif name.startswith("wic") and (value is None or value is ""): wic_slot_id = int(name[-1]) try: if vm.slots[0].wics and vm.slots[0].wics[wic_slot_id]: - yield from vm.uninstall_wic(wic_slot_id) + await vm.uninstall_wic(wic_slot_id) except IndexError: raise DynamipsError("WIC slot {} doesn't exist on this router".format(wic_slot_id)) mmap_support = self.config.get_section_config("Dynamips").getboolean("mmap_support", True) if mmap_support is False: - yield from vm.set_mmap(False) + await vm.set_mmap(False) sparse_memory_support = self.config.get_section_config("Dynamips").getboolean("sparse_memory_support", True) if sparse_memory_support is False: - yield from vm.set_sparsemem(False) + await vm.set_sparsemem(False) # update the configs if needed - yield from self.set_vm_configs(vm, settings) + await self.set_vm_configs(vm, settings) - @asyncio.coroutine - def set_vm_configs(self, vm, settings): + async def set_vm_configs(self, vm, settings): """ Set VM configs from pushed content or existing config files. @@ -554,25 +544,24 @@ class Dynamips(BaseManager): return os.path.join("configs", os.path.basename(path)) - @asyncio.coroutine - def auto_idlepc(self, vm): + async def auto_idlepc(self, vm): """ Try to find the best possible idle-pc value. :param vm: VM instance """ - yield from vm.set_idlepc("0x0") + await vm.set_idlepc("0x0") was_auto_started = False old_priority = None try: - status = yield from vm.get_status() + status = await vm.get_status() if status != "running": - yield from vm.start() + await vm.start() was_auto_started = True - yield from asyncio.sleep(20) # leave time to the router to boot + await asyncio.sleep(20) # leave time to the router to boot validated_idlepc = None - idlepcs = yield from vm.get_idle_pc_prop() + idlepcs = await vm.get_idle_pc_prop() if not idlepcs: raise DynamipsError("No Idle-PC values found") @@ -582,14 +571,14 @@ class Dynamips(BaseManager): match = re.search(r"^0x[0-9a-f]{8}$", idlepc.split()[0]) if not match: continue - yield from vm.set_idlepc(idlepc.split()[0]) + await vm.set_idlepc(idlepc.split()[0]) log.debug("Auto Idle-PC: trying idle-PC value {}".format(vm.idlepc)) start_time = time.time() - initial_cpu_usage = yield from vm.get_cpu_usage() + initial_cpu_usage = await vm.get_cpu_usage() log.debug("Auto Idle-PC: initial CPU usage is {}%".format(initial_cpu_usage)) - yield from asyncio.sleep(3) # wait 3 seconds to probe the cpu again + await asyncio.sleep(3) # wait 3 seconds to probe the cpu again elapsed_time = time.time() - start_time - cpu_usage = yield from vm.get_cpu_usage() + cpu_usage = await vm.get_cpu_usage() cpu_elapsed_usage = cpu_usage - initial_cpu_usage cpu_usage = abs(cpu_elapsed_usage * 100.0 / elapsed_time) if cpu_usage > 100: @@ -609,11 +598,10 @@ class Dynamips(BaseManager): if old_priority is not None: vm.set_process_priority_windows(vm.hypervisor.process.pid, old_priority) if was_auto_started: - yield from vm.stop() + await vm.stop() return validated_idlepc - @asyncio.coroutine - def duplicate_node(self, source_node_id, destination_node_id): + async def duplicate_node(self, source_node_id, destination_node_id): """ Duplicate a node @@ -626,7 +614,7 @@ class Dynamips(BaseManager): # Not a Dynamips router if not hasattr(source_node, "startup_config_path"): - return (yield from super().duplicate_node(source_node_id, destination_node_id)) + return (await super().duplicate_node(source_node_id, destination_node_id)) try: with open(source_node.startup_config_path) as f: @@ -638,13 +626,13 @@ class Dynamips(BaseManager): private_config = f.read() except OSError: private_config = None - yield from self.set_vm_configs(destination_node, { + await self.set_vm_configs(destination_node, { "startup_config_content": startup_config, "private_config_content": private_config }) # Force refresh of the name in configuration files new_name = destination_node.name - yield from destination_node.set_name(source_node.name) - yield from destination_node.set_name(new_name) + await destination_node.set_name(source_node.name) + await destination_node.set_name(new_name) return destination_node diff --git a/gns3server/compute/dynamips/dynamips_hypervisor.py b/gns3server/compute/dynamips/dynamips_hypervisor.py index e59ab2dc..b21e6494 100644 --- a/gns3server/compute/dynamips/dynamips_hypervisor.py +++ b/gns3server/compute/dynamips/dynamips_hypervisor.py @@ -59,8 +59,7 @@ class DynamipsHypervisor: self._writer = None self._io_lock = asyncio.Lock() - @asyncio.coroutine - def connect(self, timeout=10): + async def connect(self, timeout=10): """ Connects to the hypervisor. """ @@ -78,9 +77,9 @@ class DynamipsHypervisor: connection_success = False last_exception = None while time.time() - begin < timeout: - yield from asyncio.sleep(0.01) + await asyncio.sleep(0.01) try: - self._reader, self._writer = yield from asyncio.wait_for(asyncio.open_connection(host, self._port), timeout=1) + self._reader, self._writer = await asyncio.wait_for(asyncio.open_connection(host, self._port), timeout=1) except (asyncio.TimeoutError, OSError) as e: last_exception = e continue @@ -93,13 +92,13 @@ class DynamipsHypervisor: log.info("Connected to Dynamips hypervisor on {}:{} after {:.4f} seconds".format(host, self._port, time.time() - begin)) try: - version = yield from self.send("hypervisor version") + version = await self.send("hypervisor version") self._version = version[0].split("-", 1)[0] except IndexError: self._version = "Unknown" # this forces to send the working dir to Dynamips - yield from self.set_working_dir(self._working_dir) + await self.set_working_dir(self._working_dir) @property def version(self): @@ -111,45 +110,41 @@ class DynamipsHypervisor: return self._version - @asyncio.coroutine - def close(self): + async def close(self): """ Closes the connection to this hypervisor (but leave it running). """ - yield from self.send("hypervisor close") + await self.send("hypervisor close") self._writer.close() self._reader, self._writer = None - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops this hypervisor (will no longer run). """ try: # try to properly stop the hypervisor - yield from self.send("hypervisor stop") + await self.send("hypervisor stop") except DynamipsError: pass try: if self._writer is not None: - yield from self._writer.drain() + await self._writer.drain() self._writer.close() except OSError as e: log.debug("Stopping hypervisor {}:{} {}".format(self._host, self._port, e)) self._reader = self._writer = None - @asyncio.coroutine - def reset(self): + async def reset(self): """ Resets this hypervisor (used to get an empty configuration). """ - yield from self.send("hypervisor reset") + await self.send("hypervisor reset") - @asyncio.coroutine - def set_working_dir(self, working_dir): + async def set_working_dir(self, working_dir): """ Sets the working directory for this hypervisor. @@ -157,7 +152,7 @@ class DynamipsHypervisor: """ # encase working_dir in quotes to protect spaces in the path - yield from self.send('hypervisor working_dir "{}"'.format(working_dir)) + await self.send('hypervisor working_dir "{}"'.format(working_dir)) self._working_dir = working_dir log.debug("Working directory set to {}".format(self._working_dir)) @@ -221,8 +216,7 @@ class DynamipsHypervisor: self._host = host - @asyncio.coroutine - def send(self, command): + async def send(self, command): """ Sends commands to this hypervisor. @@ -244,7 +238,7 @@ class DynamipsHypervisor: # but still have more data. The only thing we know for sure is the last line # will begin with '100-' or a '2xx-' and end with '\r\n' - with (yield from self._io_lock): + async with self._io_lock: if self._writer is None or self._reader is None: raise DynamipsError("Not connected") @@ -252,7 +246,7 @@ class DynamipsHypervisor: command = command.strip() + '\n' log.debug("sending {}".format(command)) self._writer.write(command.encode()) - yield from self._writer.drain() + await self._writer.drain() except OSError as e: raise DynamipsError("Could not send Dynamips command '{command}' to {host}:{port}: {error}, process running: {run}" .format(command=command.strip(), host=self._host, port=self._port, error=e, run=self.is_running())) @@ -265,8 +259,8 @@ class DynamipsHypervisor: while True: try: try: - # line = yield from self._reader.readline() # this can lead to ValueError: Line is too long - chunk = yield from self._reader.read(1024) # match to Dynamips' buffer size + # line = await self._reader.readline() # this can lead to ValueError: Line is too long + chunk = await self._reader.read(1024) # match to Dynamips' buffer size except asyncio.CancelledError: # task has been canceled but continue to read # any remaining data sent by the hypervisor @@ -283,7 +277,7 @@ class DynamipsHypervisor: .format(host=self._host, port=self._port, run=self.is_running())) else: retries += 1 - yield from asyncio.sleep(0.1) + await asyncio.sleep(0.1) continue retries = 0 buf += chunk.decode("utf-8", errors="ignore") diff --git a/gns3server/compute/dynamips/hypervisor.py b/gns3server/compute/dynamips/hypervisor.py index f3a692ed..d0ef0a2d 100644 --- a/gns3server/compute/dynamips/hypervisor.py +++ b/gns3server/compute/dynamips/hypervisor.py @@ -111,8 +111,7 @@ class Hypervisor(DynamipsHypervisor): self._path = path - @asyncio.coroutine - def start(self): + async def start(self): """ Starts the Dynamips hypervisor process. """ @@ -129,7 +128,7 @@ class Hypervisor(DynamipsHypervisor): self._stdout_file = os.path.join(self.working_dir, "dynamips_i{}_stdout.txt".format(self._id)) log.info("Dynamips process logging to {}".format(self._stdout_file)) with open(self._stdout_file, "w", encoding="utf-8") as fd: - self._process = yield from asyncio.create_subprocess_exec(*self._command, + self._process = await asyncio.create_subprocess_exec(*self._command, stdout=fd, stderr=subprocess.STDOUT, cwd=self._working_dir, @@ -140,20 +139,19 @@ class Hypervisor(DynamipsHypervisor): log.error("Could not start Dynamips: {}".format(e)) raise DynamipsError("Could not start Dynamips: {}".format(e)) - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the Dynamips hypervisor process. """ if self.is_running(): log.info("Stopping Dynamips process PID={}".format(self._process.pid)) - yield from DynamipsHypervisor.stop(self) + await DynamipsHypervisor.stop(self) # give some time for the hypervisor to properly stop. # time to delete UNIX NIOs for instance. - yield from asyncio.sleep(0.01) + await asyncio.sleep(0.01) try: - yield from wait_for_process_termination(self._process, timeout=3) + await wait_for_process_termination(self._process, timeout=3) except asyncio.TimeoutError: if self._process.returncode is None: log.warning("Dynamips process {} is still running... killing it".format(self._process.pid)) diff --git a/gns3server/compute/dynamips/nios/nio.py b/gns3server/compute/dynamips/nios/nio.py index d6a4f9b0..dddf08ae 100644 --- a/gns3server/compute/dynamips/nios/nio.py +++ b/gns3server/compute/dynamips/nios/nio.py @@ -48,53 +48,48 @@ class NIO: self._output_filter_options = None # no output filter options by default self._dynamips_direction = {"in": 0, "out": 1, "both": 2} - @asyncio.coroutine - def list(self): + async def list(self): """ Returns all NIOs. :returns: NIO list """ - nio_list = yield from self._hypervisor.send("nio list") + nio_list = await self._hypervisor.send("nio list") return nio_list - @asyncio.coroutine - def delete(self): + async def delete(self): """ Deletes this NIO. """ if self._input_filter or self._output_filter: - yield from self.unbind_filter("both") - yield from self._hypervisor.send("nio delete {}".format(self._name)) + await self.unbind_filter("both") + await self._hypervisor.send("nio delete {}".format(self._name)) log.info("NIO {name} has been deleted".format(name=self._name)) - @asyncio.coroutine - def rename(self, new_name): + async def rename(self, new_name): """ Renames this NIO :param new_name: new NIO name """ - yield from self._hypervisor.send("nio rename {name} {new_name}".format(name=self._name, new_name=new_name)) + await self._hypervisor.send("nio rename {name} {new_name}".format(name=self._name, new_name=new_name)) log.info("NIO {name} renamed to {new_name}".format(name=self._name, new_name=new_name)) self._name = new_name - @asyncio.coroutine - def debug(self, debug): + async def debug(self, debug): """ Enables/Disables debugging for this NIO. :param debug: debug value (0 = disable, enable = 1) """ - yield from self._hypervisor.send("nio set_debug {name} {debug}".format(name=self._name, debug=debug)) + await self._hypervisor.send("nio set_debug {name} {debug}".format(name=self._name, debug=debug)) - @asyncio.coroutine - def bind_filter(self, direction, filter_name): + async def bind_filter(self, direction, filter_name): """ Adds a packet filter to this NIO. Filter "freq_drop" drops packets. @@ -108,7 +103,7 @@ class NIO: raise DynamipsError("Unknown direction {} to bind filter {}:".format(direction, filter_name)) dynamips_direction = self._dynamips_direction[direction] - yield from self._hypervisor.send("nio bind_filter {name} {direction} {filter}".format(name=self._name, + await self._hypervisor.send("nio bind_filter {name} {direction} {filter}".format(name=self._name, direction=dynamips_direction, filter=filter_name)) @@ -120,8 +115,7 @@ class NIO: self._input_filter = filter_name self._output_filter = filter_name - @asyncio.coroutine - def unbind_filter(self, direction): + async def unbind_filter(self, direction): """ Removes packet filter for this NIO. @@ -132,7 +126,7 @@ class NIO: raise DynamipsError("Unknown direction {} to unbind filter:".format(direction)) dynamips_direction = self._dynamips_direction[direction] - yield from self._hypervisor.send("nio unbind_filter {name} {direction}".format(name=self._name, + await self._hypervisor.send("nio unbind_filter {name} {direction}".format(name=self._name, direction=dynamips_direction)) if direction == "in": @@ -143,8 +137,7 @@ class NIO: self._input_filter = None self._output_filter = None - @asyncio.coroutine - def setup_filter(self, direction, options): + async def setup_filter(self, direction, options): """ Setups a packet filter bound with this NIO. @@ -166,7 +159,7 @@ class NIO: raise DynamipsError("Unknown direction {} to setup filter:".format(direction)) dynamips_direction = self._dynamips_direction[direction] - yield from self._hypervisor.send("nio setup_filter {name} {direction} {options}".format(name=self._name, + await self._hypervisor.send("nio setup_filter {name} {direction} {options}".format(name=self._name, direction=dynamips_direction, options=options)) @@ -198,24 +191,22 @@ class NIO: return self._output_filter, self._output_filter_options - @asyncio.coroutine - def get_stats(self): + async def get_stats(self): """ Gets statistics for this NIO. :returns: NIO statistics (string with packets in, packets out, bytes in, bytes out) """ - stats = yield from self._hypervisor.send("nio get_stats {}".format(self._name)) + stats = await self._hypervisor.send("nio get_stats {}".format(self._name)) return stats[0] - @asyncio.coroutine - def reset_stats(self): + async def reset_stats(self): """ Resets statistics for this NIO. """ - yield from self._hypervisor.send("nio reset_stats {}".format(self._name)) + await self._hypervisor.send("nio reset_stats {}".format(self._name)) @property def bandwidth(self): @@ -227,15 +218,14 @@ class NIO: return self._bandwidth - @asyncio.coroutine - def set_bandwidth(self, bandwidth): + async def set_bandwidth(self, bandwidth): """ Sets bandwidth constraint. :param bandwidth: bandwidth integer value (in Kb/s) """ - yield from self._hypervisor.send("nio set_bandwidth {name} {bandwidth}".format(name=self._name, bandwidth=bandwidth)) + await self._hypervisor.send("nio set_bandwidth {name} {bandwidth}".format(name=self._name, bandwidth=bandwidth)) self._bandwidth = bandwidth @property diff --git a/gns3server/compute/dynamips/nios/nio_generic_ethernet.py b/gns3server/compute/dynamips/nios/nio_generic_ethernet.py index 870ecb13..533de664 100644 --- a/gns3server/compute/dynamips/nios/nio_generic_ethernet.py +++ b/gns3server/compute/dynamips/nios/nio_generic_ethernet.py @@ -43,10 +43,9 @@ class NIOGenericEthernet(NIO): self._ethernet_device = ethernet_device super().__init__(name, hypervisor) - @asyncio.coroutine - def create(self): + async def create(self): - yield from self._hypervisor.send("nio create_gen_eth {name} {eth_device}".format(name=self._name, + await self._hypervisor.send("nio create_gen_eth {name} {eth_device}".format(name=self._name, eth_device=self._ethernet_device)) log.info("NIO Generic Ethernet {name} created with device {device}".format(name=self._name, diff --git a/gns3server/compute/dynamips/nios/nio_linux_ethernet.py b/gns3server/compute/dynamips/nios/nio_linux_ethernet.py index cf96f4af..d032202b 100644 --- a/gns3server/compute/dynamips/nios/nio_linux_ethernet.py +++ b/gns3server/compute/dynamips/nios/nio_linux_ethernet.py @@ -42,10 +42,9 @@ class NIOLinuxEthernet(NIO): self._ethernet_device = ethernet_device super().__init__(name, hypervisor) - @asyncio.coroutine - def create(self): + async def create(self): - yield from self._hypervisor.send("nio create_linux_eth {name} {eth_device}".format(name=self._name, + await self._hypervisor.send("nio create_linux_eth {name} {eth_device}".format(name=self._name, eth_device=self._ethernet_device)) log.info("NIO Linux Ethernet {name} created with device {device}".format(name=self._name, diff --git a/gns3server/compute/dynamips/nios/nio_null.py b/gns3server/compute/dynamips/nios/nio_null.py index 174dd032..6524de40 100644 --- a/gns3server/compute/dynamips/nios/nio_null.py +++ b/gns3server/compute/dynamips/nios/nio_null.py @@ -41,10 +41,9 @@ class NIONull(NIO): name = 'null-{}'.format(uuid.uuid4()) super().__init__(name, hypervisor) - @asyncio.coroutine - def create(self): + async def create(self): - yield from self._hypervisor.send("nio create_null {}".format(self._name)) + await self._hypervisor.send("nio create_null {}".format(self._name)) log.info("NIO NULL {name} created.".format(name=self._name)) def __json__(self): diff --git a/gns3server/compute/dynamips/nios/nio_tap.py b/gns3server/compute/dynamips/nios/nio_tap.py index 20fdaeaa..ea5c8926 100644 --- a/gns3server/compute/dynamips/nios/nio_tap.py +++ b/gns3server/compute/dynamips/nios/nio_tap.py @@ -43,10 +43,9 @@ class NIOTAP(NIO): self._tap_device = tap_device super().__init__(name, hypervisor) - @asyncio.coroutine - def create(self): + async def create(self): - yield from self._hypervisor.send("nio create_tap {name} {tap}".format(name=self._name, tap=self._tap_device)) + await self._hypervisor.send("nio create_tap {name} {tap}".format(name=self._name, tap=self._tap_device)) log.info("NIO TAP {name} created with device {device}".format(name=self._name, device=self._tap_device)) @property diff --git a/gns3server/compute/dynamips/nios/nio_udp.py b/gns3server/compute/dynamips/nios/nio_udp.py index 55714808..987840b3 100644 --- a/gns3server/compute/dynamips/nios/nio_udp.py +++ b/gns3server/compute/dynamips/nios/nio_udp.py @@ -53,13 +53,12 @@ class NIOUDP(NIO): self._node = node super().__init__(name, node.hypervisor) - @asyncio.coroutine - def create(self): + async def create(self): if not self._hypervisor: return # Ubridge is not supported if not hasattr(self._node, "add_ubridge_udp_connection"): - yield from self._hypervisor.send("nio create_udp {name} {lport} {rhost} {rport}".format(name=self._name, + await self._hypervisor.send("nio create_udp {name} {lport} {rhost} {rport}".format(name=self._name, lport=self._lport, rhost=self._rhost, rport=self._rport)) @@ -67,7 +66,7 @@ class NIOUDP(NIO): self._local_tunnel_lport = self._node.manager.port_manager.get_free_udp_port(self._node.project) self._local_tunnel_rport = self._node.manager.port_manager.get_free_udp_port(self._node.project) self._bridge_name = 'DYNAMIPS-{}-{}'.format(self._local_tunnel_lport, self._local_tunnel_rport) - yield from self._hypervisor.send("nio create_udp {name} {lport} {rhost} {rport}".format(name=self._name, + await self._hypervisor.send("nio create_udp {name} {lport} {rhost} {rport}".format(name=self._name, lport=self._local_tunnel_lport, rhost='127.0.0.1', rport=self._local_tunnel_rport)) @@ -84,24 +83,22 @@ class NIOUDP(NIO): self._rhost, self._rport) self._destination_nio.filters = self._filters - yield from self._node.add_ubridge_udp_connection( + await self._node.add_ubridge_udp_connection( self._bridge_name, self._source_nio, self._destination_nio ) - @asyncio.coroutine - def update(self): + async def update(self): self._destination_nio.filters = self._filters - yield from self._node.update_ubridge_udp_connection( + await self._node.update_ubridge_udp_connection( self._bridge_name, self._source_nio, self._destination_nio) - @asyncio.coroutine - def close(self): + async def close(self): if self._local_tunnel_lport: - yield from self._node.ubridge_delete_bridge(self._bridge_name) + await self._node.ubridge_delete_bridge(self._bridge_name) self._node.manager.port_manager.release_udp_port(self._local_tunnel_lport, self ._node.project) if self._local_tunnel_rport: self._node.manager.port_manager.release_udp_port(self._local_tunnel_rport, self._node.project) diff --git a/gns3server/compute/dynamips/nios/nio_unix.py b/gns3server/compute/dynamips/nios/nio_unix.py index 048572a4..64eeca7b 100644 --- a/gns3server/compute/dynamips/nios/nio_unix.py +++ b/gns3server/compute/dynamips/nios/nio_unix.py @@ -45,10 +45,9 @@ class NIOUNIX(NIO): self._remote_file = remote_file super().__init__(name, hypervisor) - @asyncio.coroutine - def create(self): + async def create(self): - yield from self._hypervisor.send("nio create_unix {name} {local} {remote}".format(name=self._name, + await self._hypervisor.send("nio create_unix {name} {local} {remote}".format(name=self._name, local=self._local_file, remote=self._remote_file)) diff --git a/gns3server/compute/dynamips/nios/nio_vde.py b/gns3server/compute/dynamips/nios/nio_vde.py index 099db5a3..00701f3a 100644 --- a/gns3server/compute/dynamips/nios/nio_vde.py +++ b/gns3server/compute/dynamips/nios/nio_vde.py @@ -45,10 +45,9 @@ class NIOVDE(NIO): self._local_file = local_file super().__init__(name, hypervisor) - @asyncio.coroutine - def create(self): + async def create(self): - yield from self._hypervisor.send("nio create_vde {name} {control} {local}".format(name=self._name, + await self._hypervisor.send("nio create_vde {name} {control} {local}".format(name=self._name, control=self._control_file, local=self._local_file)) diff --git a/gns3server/compute/dynamips/nodes/atm_switch.py b/gns3server/compute/dynamips/nodes/atm_switch.py index 38cac774..7b1896eb 100644 --- a/gns3server/compute/dynamips/nodes/atm_switch.py +++ b/gns3server/compute/dynamips/nodes/atm_switch.py @@ -64,26 +64,24 @@ class ATMSwitch(Device): "mappings": mappings, "status": "started"} - @asyncio.coroutine - def create(self): + async def create(self): if self._hypervisor is None: module_workdir = self.project.module_working_directory(self.manager.module_name.lower()) - self._hypervisor = yield from self.manager.start_new_hypervisor(working_dir=module_workdir) + self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir) - yield from self._hypervisor.send('atmsw create "{}"'.format(self._name)) + await self._hypervisor.send('atmsw create "{}"'.format(self._name)) log.info('ATM switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id)) self._hypervisor.devices.append(self) - @asyncio.coroutine - def set_name(self, new_name): + async def set_name(self, new_name): """ Renames this ATM switch. :param new_name: New name for this switch """ - yield from self._hypervisor.send('atm rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name)) + await self._hypervisor.send('atm rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name)) log.info('ATM switch "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name, id=self._id, new_name=new_name)) @@ -119,31 +117,29 @@ class ATMSwitch(Device): self._mappings = mappings - @asyncio.coroutine - def close(self): + async def close(self): for nio in self._nios.values(): if nio: - yield from nio.close() + await nio.close() if self._hypervisor: try: - yield from self._hypervisor.send('atmsw delete "{}"'.format(self._name)) + await self._hypervisor.send('atmsw delete "{}"'.format(self._name)) log.info('ATM switch "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id)) except DynamipsError: log.debug("Could not properly delete ATM switch {}".format(self._name)) if self._hypervisor and self in self._hypervisor.devices: self._hypervisor.devices.remove(self) if self._hypervisor and not self._hypervisor.devices: - yield from self.hypervisor.stop() + await self.hypervisor.stop() self._hypervisor = None return True - @asyncio.coroutine - def delete(self): + async def delete(self): """ Deletes this ATM switch. """ - yield from self.close() + await self.close() def has_port(self, port): """ @@ -156,8 +152,7 @@ class ATMSwitch(Device): return True return False - @asyncio.coroutine - def add_nio(self, nio, port_number): + async def add_nio(self, nio, port_number): """ Adds a NIO as new port on ATM switch. @@ -174,10 +169,9 @@ class ATMSwitch(Device): port=port_number)) self._nios[port_number] = nio - yield from self.set_mappings(self._mappings) + await self.set_mappings(self._mappings) - @asyncio.coroutine - def remove_nio(self, port_number): + async def remove_nio(self, port_number): """ Removes the specified NIO as member of this ATM switch. @@ -202,8 +196,8 @@ class ATMSwitch(Device): destination_port=destination_port, destination_vpi=destination_vpi, destination_vci=destination_vci)) - yield from self.unmap_pvc(source_port, source_vpi, source_vci, destination_port, destination_vpi, destination_vci) - yield from self.unmap_pvc(destination_port, destination_vpi, destination_vci, source_port, source_vpi, source_vci) + await self.unmap_pvc(source_port, source_vpi, source_vci, destination_port, destination_vpi, destination_vci) + await self.unmap_pvc(destination_port, destination_vpi, destination_vci, source_port, source_vpi, source_vci) else: # remove the virtual paths mapped with this port/nio source_port, source_vpi = source @@ -215,8 +209,8 @@ class ATMSwitch(Device): source_vpi=source_vpi, destination_port=destination_port, destination_vpi=destination_vpi)) - yield from self.unmap_vp(source_port, source_vpi, destination_port, destination_vpi) - yield from self.unmap_vp(destination_port, destination_vpi, source_port, source_vpi) + await self.unmap_vp(source_port, source_vpi, destination_port, destination_vpi) + await self.unmap_vp(destination_port, destination_vpi, source_port, source_vpi) nio = self._nios[port_number] if isinstance(nio, NIOUDP): @@ -229,8 +223,7 @@ class ATMSwitch(Device): del self._nios[port_number] return nio - @asyncio.coroutine - def set_mappings(self, mappings): + async def set_mappings(self, mappings): """ Applies VC mappings @@ -258,8 +251,8 @@ class ATMSwitch(Device): destination_port=destination_port, destination_vpi=destination_vpi, destination_vci=destination_vci)) - yield from self.map_pvc(source_port, source_vpi, source_vci, destination_port, destination_vpi, destination_vci) - yield from self.map_pvc(destination_port, destination_vpi, destination_vci, source_port, source_vpi, source_vci) + await self.map_pvc(source_port, source_vpi, source_vci, destination_port, destination_vpi, destination_vci) + await self.map_pvc(destination_port, destination_vpi, destination_vci, source_port, source_vpi, source_vci) else: # add the virtual paths source_port, source_vpi = map(int, source.split(':')) @@ -272,11 +265,10 @@ class ATMSwitch(Device): source_vpi=source_vpi, destination_port=destination_port, destination_vpi=destination_vpi)) - yield from self.map_vp(source_port, source_vpi, destination_port, destination_vpi) - yield from self.map_vp(destination_port, destination_vpi, source_port, source_vpi) + await self.map_vp(source_port, source_vpi, destination_port, destination_vpi) + await self.map_vp(destination_port, destination_vpi, source_port, source_vpi) - @asyncio.coroutine - def map_vp(self, port1, vpi1, port2, vpi2): + async def map_vp(self, port1, vpi1, port2, vpi2): """ Creates a new Virtual Path connection. @@ -295,7 +287,7 @@ class ATMSwitch(Device): nio1 = self._nios[port1] nio2 = self._nios[port2] - yield from self._hypervisor.send('atmsw create_vpc "{name}" {input_nio} {input_vpi} {output_nio} {output_vpi}'.format(name=self._name, + await self._hypervisor.send('atmsw create_vpc "{name}" {input_nio} {input_vpi} {output_nio} {output_vpi}'.format(name=self._name, input_nio=nio1, input_vpi=vpi1, output_nio=nio2, @@ -310,8 +302,7 @@ class ATMSwitch(Device): self._active_mappings[(port1, vpi1)] = (port2, vpi2) - @asyncio.coroutine - def unmap_vp(self, port1, vpi1, port2, vpi2): + async def unmap_vp(self, port1, vpi1, port2, vpi2): """ Deletes a new Virtual Path connection. @@ -330,7 +321,7 @@ class ATMSwitch(Device): nio1 = self._nios[port1] nio2 = self._nios[port2] - yield from self._hypervisor.send('atmsw delete_vpc "{name}" {input_nio} {input_vpi} {output_nio} {output_vpi}'.format(name=self._name, + await self._hypervisor.send('atmsw delete_vpc "{name}" {input_nio} {input_vpi} {output_nio} {output_vpi}'.format(name=self._name, input_nio=nio1, input_vpi=vpi1, output_nio=nio2, @@ -345,8 +336,7 @@ class ATMSwitch(Device): del self._active_mappings[(port1, vpi1)] - @asyncio.coroutine - def map_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2): + async def map_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2): """ Creates a new Virtual Channel connection (unidirectional). @@ -367,7 +357,7 @@ class ATMSwitch(Device): nio1 = self._nios[port1] nio2 = self._nios[port2] - yield from self._hypervisor.send('atmsw create_vcc "{name}" {input_nio} {input_vpi} {input_vci} {output_nio} {output_vpi} {output_vci}'.format(name=self._name, + await self._hypervisor.send('atmsw create_vcc "{name}" {input_nio} {input_vpi} {input_vci} {output_nio} {output_vpi} {output_vci}'.format(name=self._name, input_nio=nio1, input_vpi=vpi1, input_vci=vci1, @@ -386,8 +376,7 @@ class ATMSwitch(Device): self._active_mappings[(port1, vpi1, vci1)] = (port2, vpi2, vci2) - @asyncio.coroutine - def unmap_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2): + async def unmap_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2): """ Deletes a new Virtual Channel connection (unidirectional). @@ -408,7 +397,7 @@ class ATMSwitch(Device): nio1 = self._nios[port1] nio2 = self._nios[port2] - yield from self._hypervisor.send('atmsw delete_vcc "{name}" {input_nio} {input_vpi} {input_vci} {output_nio} {output_vpi} {output_vci}'.format(name=self._name, + await self._hypervisor.send('atmsw delete_vcc "{name}" {input_nio} {input_vpi} {input_vci} {output_nio} {output_vpi} {output_vci}'.format(name=self._name, input_nio=nio1, input_vpi=vpi1, input_vci=vci1, @@ -426,8 +415,7 @@ class ATMSwitch(Device): vci2=vci2)) del self._active_mappings[(port1, vpi1, vci1)] - @asyncio.coroutine - def start_capture(self, port_number, output_file, data_link_type="DLT_ATM_RFC1483"): + async def start_capture(self, port_number, output_file, data_link_type="DLT_ATM_RFC1483"): """ Starts a packet capture. @@ -448,15 +436,14 @@ class ATMSwitch(Device): if nio.input_filter[0] is not None and nio.output_filter[0] is not None: raise DynamipsError("Port {} has already a filter applied".format(port_number)) - yield from nio.bind_filter("both", "capture") - yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) + await nio.bind_filter("both", "capture") + await nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) log.info('ATM switch "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number)) - @asyncio.coroutine - def stop_capture(self, port_number): + async def stop_capture(self, port_number): """ Stops a packet capture. @@ -467,7 +454,7 @@ class ATMSwitch(Device): raise DynamipsError("Port {} is not allocated".format(port_number)) nio = self._nios[port_number] - yield from nio.unbind_filter("both") + await nio.unbind_filter("both") log.info('ATM switch "{name}" [{id}]: stopping packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number)) diff --git a/gns3server/compute/dynamips/nodes/bridge.py b/gns3server/compute/dynamips/nodes/bridge.py index b37ae5d8..7146865c 100644 --- a/gns3server/compute/dynamips/nodes/bridge.py +++ b/gns3server/compute/dynamips/nodes/bridge.py @@ -41,25 +41,23 @@ class Bridge(Device): super().__init__(name, node_id, project, manager, hypervisor) self._nios = [] - @asyncio.coroutine - def create(self): + async def create(self): if self._hypervisor is None: module_workdir = self.project.module_working_directory(self.manager.module_name.lower()) - self._hypervisor = yield from self.manager.start_new_hypervisor(working_dir=module_workdir) + self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir) - yield from self._hypervisor.send('nio_bridge create "{}"'.format(self._name)) + await self._hypervisor.send('nio_bridge create "{}"'.format(self._name)) self._hypervisor.devices.append(self) - @asyncio.coroutine - def set_name(self, new_name): + async def set_name(self, new_name): """ Renames this bridge. :param new_name: New name for this bridge """ - yield from self._hypervisor.send('nio_bridge rename "{name}" "{new_name}"'.format(name=self._name, + await self._hypervisor.send('nio_bridge rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name)) self._name = new_name @@ -74,8 +72,7 @@ class Bridge(Device): return self._nios - @asyncio.coroutine - def delete(self): + async def delete(self): """ Deletes this bridge. """ @@ -83,28 +80,26 @@ class Bridge(Device): if self._hypervisor and self in self._hypervisor.devices: self._hypervisor.devices.remove(self) if self._hypervisor and not self._hypervisor.devices: - yield from self._hypervisor.send('nio_bridge delete "{}"'.format(self._name)) + await self._hypervisor.send('nio_bridge delete "{}"'.format(self._name)) - @asyncio.coroutine - def add_nio(self, nio): + async def add_nio(self, nio): """ Adds a NIO as new port on this bridge. :param nio: NIO instance to add """ - yield from self._hypervisor.send('nio_bridge add_nio "{name}" {nio}'.format(name=self._name, nio=nio)) + await self._hypervisor.send('nio_bridge add_nio "{name}" {nio}'.format(name=self._name, nio=nio)) self._nios.append(nio) - @asyncio.coroutine - def remove_nio(self, nio): + async def remove_nio(self, nio): """ Removes the specified NIO as member of this bridge. :param nio: NIO instance to remove """ if self._hypervisor: - yield from self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio)) + await self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio)) self._nios.remove(nio) @property diff --git a/gns3server/compute/dynamips/nodes/c1700.py b/gns3server/compute/dynamips/nodes/c1700.py index 59df0a01..cb4b8537 100644 --- a/gns3server/compute/dynamips/nodes/c1700.py +++ b/gns3server/compute/dynamips/nodes/c1700.py @@ -70,12 +70,11 @@ class C1700(Router): router_info.update(c1700_router_info) return router_info - @asyncio.coroutine - def create(self): + async def create(self): - yield from Router.create(self) + await Router.create(self) if self._chassis != "1720": - yield from self.set_chassis(self._chassis) + await self.set_chassis(self._chassis) self._setup_chassis() def _setup_chassis(self): @@ -103,8 +102,7 @@ class C1700(Router): return self._chassis - @asyncio.coroutine - def set_chassis(self, chassis): + async def set_chassis(self, chassis): """ Sets the chassis. @@ -112,7 +110,7 @@ class C1700(Router): 1720, 1721, 1750, 1751 or 1760 """ - yield from self._hypervisor.send('c1700 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis)) + await self._hypervisor.send('c1700 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis)) log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name, id=self._id, @@ -131,15 +129,14 @@ class C1700(Router): return self._iomem - @asyncio.coroutine - def set_iomem(self, iomem): + async def set_iomem(self, iomem): """ Sets I/O memory size for this router. :param iomem: I/O memory size """ - yield from self._hypervisor.send('c1700 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) + await self._hypervisor.send('c1700 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name, id=self._id, diff --git a/gns3server/compute/dynamips/nodes/c2600.py b/gns3server/compute/dynamips/nodes/c2600.py index a938426c..b065607b 100644 --- a/gns3server/compute/dynamips/nodes/c2600.py +++ b/gns3server/compute/dynamips/nodes/c2600.py @@ -85,12 +85,11 @@ class C2600(Router): router_info.update(c2600_router_info) return router_info - @asyncio.coroutine - def create(self): + async def create(self): - yield from Router.create(self) + await Router.create(self) if self._chassis != "2610": - yield from self.set_chassis(self._chassis) + await self.set_chassis(self._chassis) self._setup_chassis() def _setup_chassis(self): @@ -112,8 +111,7 @@ class C2600(Router): return self._chassis - @asyncio.coroutine - def set_chassis(self, chassis): + async def set_chassis(self, chassis): """ Sets the chassis. @@ -122,7 +120,7 @@ class C2600(Router): 2620XM, 2621XM, 2650XM or 2651XM """ - yield from self._hypervisor.send('c2600 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis)) + await self._hypervisor.send('c2600 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis)) log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name, id=self._id, @@ -140,15 +138,14 @@ class C2600(Router): return self._iomem - @asyncio.coroutine - def set_iomem(self, iomem): + async def set_iomem(self, iomem): """ Sets I/O memory size for this router. :param iomem: I/O memory size """ - yield from self._hypervisor.send('c2600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) + await self._hypervisor.send('c2600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name, id=self._id, diff --git a/gns3server/compute/dynamips/nodes/c2691.py b/gns3server/compute/dynamips/nodes/c2691.py index ce2b20de..8441881f 100644 --- a/gns3server/compute/dynamips/nodes/c2691.py +++ b/gns3server/compute/dynamips/nodes/c2691.py @@ -79,15 +79,14 @@ class C2691(Router): return self._iomem - @asyncio.coroutine - def set_iomem(self, iomem): + async def set_iomem(self, iomem): """ Sets I/O memory size for this router. :param iomem: I/O memory size """ - yield from self._hypervisor.send('c2691 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) + await self._hypervisor.send('c2691 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name, id=self._id, diff --git a/gns3server/compute/dynamips/nodes/c3600.py b/gns3server/compute/dynamips/nodes/c3600.py index bcb40377..984a5621 100644 --- a/gns3server/compute/dynamips/nodes/c3600.py +++ b/gns3server/compute/dynamips/nodes/c3600.py @@ -66,12 +66,11 @@ class C3600(Router): router_info.update(c3600_router_info) return router_info - @asyncio.coroutine - def create(self): + async def create(self): - yield from Router.create(self) + await Router.create(self) if self._chassis != "3640": - yield from self.set_chassis(self._chassis) + await self.set_chassis(self._chassis) self._setup_chassis() def _setup_chassis(self): @@ -98,15 +97,14 @@ class C3600(Router): return self._chassis - @asyncio.coroutine - def set_chassis(self, chassis): + async def set_chassis(self, chassis): """ Sets the chassis. :param: chassis string: 3620, 3640 or 3660 """ - yield from self._hypervisor.send('c3600 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis)) + await self._hypervisor.send('c3600 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis)) log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name, id=self._id, @@ -125,15 +123,14 @@ class C3600(Router): return self._iomem - @asyncio.coroutine - def set_iomem(self, iomem): + async def set_iomem(self, iomem): """ Set I/O memory size for this router. :param iomem: I/O memory size """ - yield from self._hypervisor.send('c3600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) + await self._hypervisor.send('c3600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name, id=self._id, diff --git a/gns3server/compute/dynamips/nodes/c3725.py b/gns3server/compute/dynamips/nodes/c3725.py index de1775ea..be194cf5 100644 --- a/gns3server/compute/dynamips/nodes/c3725.py +++ b/gns3server/compute/dynamips/nodes/c3725.py @@ -79,15 +79,14 @@ class C3725(Router): return self._iomem - @asyncio.coroutine - def set_iomem(self, iomem): + async def set_iomem(self, iomem): """ Sets I/O memory size for this router. :param iomem: I/O memory size """ - yield from self._hypervisor.send('c3725 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) + await self._hypervisor.send('c3725 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name, id=self._id, diff --git a/gns3server/compute/dynamips/nodes/c3745.py b/gns3server/compute/dynamips/nodes/c3745.py index 2c0eee2f..9087a98f 100644 --- a/gns3server/compute/dynamips/nodes/c3745.py +++ b/gns3server/compute/dynamips/nodes/c3745.py @@ -79,15 +79,14 @@ class C3745(Router): return self._iomem - @asyncio.coroutine - def set_iomem(self, iomem): + async def set_iomem(self, iomem): """ Sets I/O memory size for this router. :param iomem: I/O memory size """ - yield from self._hypervisor.send('c3745 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) + await self._hypervisor.send('c3745 set_iomem "{name}" {size}'.format(name=self._name, size=iomem)) log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name, id=self._id, diff --git a/gns3server/compute/dynamips/nodes/c7200.py b/gns3server/compute/dynamips/nodes/c7200.py index 9fb1eb98..155bc385 100644 --- a/gns3server/compute/dynamips/nodes/c7200.py +++ b/gns3server/compute/dynamips/nodes/c7200.py @@ -86,19 +86,18 @@ class C7200(Router): router_info.update(c7200_router_info) return router_info - @asyncio.coroutine - def create(self): + async def create(self): - yield from Router.create(self) + await Router.create(self) if self._npe != "npe-400": - yield from self.set_npe(self._npe) + await self.set_npe(self._npe) # first slot is a mandatory Input/Output controller (based on NPE type) if self.npe == "npe-g2": - yield from self.slot_add_binding(0, C7200_IO_GE_E()) + await self.slot_add_binding(0, C7200_IO_GE_E()) else: - yield from self.slot_add_binding(0, C7200_IO_FE()) + await self.slot_add_binding(0, C7200_IO_FE()) @property def npe(self): @@ -110,8 +109,7 @@ class C7200(Router): return self._npe - @asyncio.coroutine - def set_npe(self, npe): + async def set_npe(self, npe): """ Sets the NPE model. @@ -120,10 +118,10 @@ class C7200(Router): npe-225, npe-300, npe-400 and npe-g2 (PowerPC c7200 only) """ - if (yield from self.is_running()): + if (await self.is_running()): raise DynamipsError("Cannot change NPE on running router") - yield from self._hypervisor.send('c7200 set_npe "{name}" {npe}'.format(name=self._name, npe=npe)) + await self._hypervisor.send('c7200 set_npe "{name}" {npe}'.format(name=self._name, npe=npe)) log.info('Router "{name}" [{id}]: NPE updated from {old_npe} to {new_npe}'.format(name=self._name, id=self._id, @@ -141,15 +139,14 @@ class C7200(Router): return self._midplane - @asyncio.coroutine - def set_midplane(self, midplane): + async def set_midplane(self, midplane): """ Sets the midplane model. :returns: midplane model string (e.g. "vxr" or "std") """ - yield from self._hypervisor.send('c7200 set_midplane "{name}" {midplane}'.format(name=self._name, midplane=midplane)) + await self._hypervisor.send('c7200 set_midplane "{name}" {midplane}'.format(name=self._name, midplane=midplane)) log.info('Router "{name}" [{id}]: midplane updated from {old_midplane} to {new_midplane}'.format(name=self._name, id=self._id, @@ -167,8 +164,7 @@ class C7200(Router): return self._sensors - @asyncio.coroutine - def set_sensors(self, sensors): + async def set_sensors(self, sensors): """ Sets the 4 sensors with temperature in degree Celcius. @@ -182,7 +178,7 @@ class C7200(Router): sensor_id = 0 for sensor in sensors: - yield from self._hypervisor.send('c7200 set_temp_sensor "{name}" {sensor_id} {temp}'.format(name=self._name, + await self._hypervisor.send('c7200 set_temp_sensor "{name}" {sensor_id} {temp}'.format(name=self._name, sensor_id=sensor_id, temp=sensor)) @@ -205,8 +201,7 @@ class C7200(Router): return self._power_supplies - @asyncio.coroutine - def set_power_supplies(self, power_supplies): + async def set_power_supplies(self, power_supplies): """ Sets the 2 power supplies with 0 = off, 1 = on. @@ -216,7 +211,7 @@ class C7200(Router): power_supply_id = 0 for power_supply in power_supplies: - yield from self._hypervisor.send('c7200 set_power_supply "{name}" {power_supply_id} {powered_on}'.format(name=self._name, + await self._hypervisor.send('c7200 set_power_supply "{name}" {power_supply_id} {powered_on}'.format(name=self._name, power_supply_id=power_supply_id, powered_on=power_supply)) @@ -228,8 +223,7 @@ class C7200(Router): self._power_supplies = power_supplies - @asyncio.coroutine - def start(self): + async def start(self): """ Starts this router. At least the IOS image must be set before starting it. @@ -237,8 +231,8 @@ class C7200(Router): # trick: we must send sensors and power supplies info after starting the router # otherwise they are not taken into account (Dynamips bug?) - yield from Router.start(self) + await Router.start(self) if self._sensors != [22, 22, 22, 22]: - yield from self.set_sensors(self._sensors) + await self.set_sensors(self._sensors) if self._power_supplies != [1, 1]: - yield from self.set_power_supplies(self._power_supplies) + await self.set_power_supplies(self._power_supplies) diff --git a/gns3server/compute/dynamips/nodes/ethernet_hub.py b/gns3server/compute/dynamips/nodes/ethernet_hub.py index c481a8ab..250c245c 100644 --- a/gns3server/compute/dynamips/nodes/ethernet_hub.py +++ b/gns3server/compute/dynamips/nodes/ethernet_hub.py @@ -93,10 +93,9 @@ class EthernetHub(Bridge): self._ports = ports - @asyncio.coroutine - def create(self): + async def create(self): - yield from Bridge.create(self) + await Bridge.create(self) log.info('Ethernet hub "{name}" [{id}] has been created'.format(name=self._name, id=self._id)) @property @@ -109,32 +108,29 @@ class EthernetHub(Bridge): return self._mappings - @asyncio.coroutine - def delete(self): - return (yield from self.close()) + async def delete(self): + return (await self.close()) - @asyncio.coroutine - def close(self): + async def close(self): """ Deletes this hub. """ for nio in self._nios: if nio: - yield from nio.close() + await nio.close() try: - yield from Bridge.delete(self) + await Bridge.delete(self) log.info('Ethernet hub "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id)) except DynamipsError: log.debug("Could not properly delete Ethernet hub {}".format(self._name)) if self._hypervisor and not self._hypervisor.devices: - yield from self.hypervisor.stop() + await self.hypervisor.stop() self._hypervisor = None return True - @asyncio.coroutine - def add_nio(self, nio, port_number): + async def add_nio(self, nio, port_number): """ Adds a NIO as new port on this hub. @@ -148,7 +144,7 @@ class EthernetHub(Bridge): if port_number in self._mappings: raise DynamipsError("Port {} isn't free".format(port_number)) - yield from Bridge.add_nio(self, nio) + await Bridge.add_nio(self, nio) log.info('Ethernet hub "{name}" [{id}]: NIO {nio} bound to port {port}'.format(name=self._name, id=self._id, @@ -156,8 +152,7 @@ class EthernetHub(Bridge): port=port_number)) self._mappings[port_number] = nio - @asyncio.coroutine - def remove_nio(self, port_number): + async def remove_nio(self, port_number): """ Removes the specified NIO as member of this hub. @@ -172,7 +167,7 @@ class EthernetHub(Bridge): nio = self._mappings[port_number] if isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) - yield from Bridge.remove_nio(self, nio) + await Bridge.remove_nio(self, nio) log.info('Ethernet hub "{name}" [{id}]: NIO {nio} removed from port {port}'.format(name=self._name, id=self._id, @@ -182,8 +177,7 @@ class EthernetHub(Bridge): del self._mappings[port_number] return nio - @asyncio.coroutine - def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): + async def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. @@ -204,15 +198,14 @@ class EthernetHub(Bridge): if nio.input_filter[0] is not None and nio.output_filter[0] is not None: raise DynamipsError("Port {} has already a filter applied".format(port_number)) - yield from nio.bind_filter("both", "capture") - yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) + await nio.bind_filter("both", "capture") + await nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) log.info('Ethernet hub "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number)) - @asyncio.coroutine - def stop_capture(self, port_number): + async def stop_capture(self, port_number): """ Stops a packet capture. @@ -223,7 +216,7 @@ class EthernetHub(Bridge): raise DynamipsError("Port {} is not allocated".format(port_number)) nio = self._mappings[port_number] - yield from nio.unbind_filter("both") + await nio.unbind_filter("both") log.info('Ethernet hub "{name}" [{id}]: stopping packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number)) diff --git a/gns3server/compute/dynamips/nodes/ethernet_switch.py b/gns3server/compute/dynamips/nodes/ethernet_switch.py index 5c7ac2e3..54f4f209 100644 --- a/gns3server/compute/dynamips/nodes/ethernet_switch.py +++ b/gns3server/compute/dynamips/nodes/ethernet_switch.py @@ -43,13 +43,12 @@ class EthernetSwitchConsole(EmbedShell): super().__init__(welcome_message="Welcome to GNS3 builtin Ethernet switch.\n\nType help for available commands\n") self._node = node - @asyncio.coroutine - def mac(self): + async def mac(self): """ Show MAC address table """ res = 'Port Mac VLAN\n' - result = (yield from self._node._hypervisor.send('ethsw show_mac_addr_table {}'.format(self._node.name))) + result = (await self._node._hypervisor.send('ethsw show_mac_addr_table {}'.format(self._node.name))) for line in result: mac, vlan, nio = line.replace(' ', ' ').split(' ') mac = mac.replace('.', '') @@ -163,41 +162,38 @@ class EthernetSwitch(Device): self._ports = ports - @asyncio.coroutine - def update_port_settings(self): + async def update_port_settings(self): for port_settings in self._ports: port_number = port_settings["port_number"] if port_number in self._nios and self._nios[port_number] is not None: - yield from self.set_port_settings(port_number, port_settings) + await self.set_port_settings(port_number, port_settings) - @asyncio.coroutine - def create(self): + async def create(self): if self._hypervisor is None: module_workdir = self.project.module_working_directory(self.manager.module_name.lower()) - self._hypervisor = yield from self.manager.start_new_hypervisor(working_dir=module_workdir) + self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir) - yield from self._hypervisor.send('ethsw create "{}"'.format(self._name)) + await self._hypervisor.send('ethsw create "{}"'.format(self._name)) log.info('Ethernet switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id)) self._telnet_shell = EthernetSwitchConsole(self) self._telnet_shell.prompt = self._name + '> ' self._telnet = create_telnet_shell(self._telnet_shell) try: - self._telnet_server = (yield from asyncio.start_server(self._telnet.run, self._manager.port_manager.console_host, self.console)) + self._telnet_server = (await asyncio.start_server(self._telnet.run, self._manager.port_manager.console_host, self.console)) except OSError as e: self.project.emit("log.warning", {"message": "Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e)}) self._hypervisor.devices.append(self) - @asyncio.coroutine - def set_name(self, new_name): + async def set_name(self, new_name): """ Renames this Ethernet switch. :param new_name: New name for this switch """ - yield from self._hypervisor.send('ethsw rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name)) + await self._hypervisor.send('ethsw rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name)) log.info('Ethernet switch "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name, id=self._id, new_name=new_name)) @@ -223,39 +219,36 @@ class EthernetSwitch(Device): return self._mappings - @asyncio.coroutine - def delete(self): - return (yield from self.close()) + async def delete(self): + return (await self.close()) - @asyncio.coroutine - def close(self): + async def close(self): """ Deletes this Ethernet switch. """ - yield from self._telnet.close() + await self._telnet.close() if self._telnet_server: self._telnet_server.close() for nio in self._nios.values(): if nio: - yield from nio.close() + await nio.close() self.manager.port_manager.release_tcp_port(self._console, self._project) if self._hypervisor: try: - yield from self._hypervisor.send('ethsw delete "{}"'.format(self._name)) + await self._hypervisor.send('ethsw delete "{}"'.format(self._name)) log.info('Ethernet switch "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id)) except DynamipsError: log.debug("Could not properly delete Ethernet switch {}".format(self._name)) if self._hypervisor and self in self._hypervisor.devices: self._hypervisor.devices.remove(self) if self._hypervisor and not self._hypervisor.devices: - yield from self.hypervisor.stop() + await self.hypervisor.stop() self._hypervisor = None return True - @asyncio.coroutine - def add_nio(self, nio, port_number): + async def add_nio(self, nio, port_number): """ Adds a NIO as new port on Ethernet switch. @@ -266,7 +259,7 @@ class EthernetSwitch(Device): if port_number in self._nios: raise DynamipsError("Port {} isn't free".format(port_number)) - yield from self._hypervisor.send('ethsw add_nio "{name}" {nio}'.format(name=self._name, nio=nio)) + await self._hypervisor.send('ethsw add_nio "{name}" {nio}'.format(name=self._name, nio=nio)) log.info('Ethernet switch "{name}" [{id}]: NIO {nio} bound to port {port}'.format(name=self._name, id=self._id, @@ -275,11 +268,10 @@ class EthernetSwitch(Device): self._nios[port_number] = nio for port_settings in self._ports: if port_settings["port_number"] == port_number: - yield from self.set_port_settings(port_number, port_settings) + await self.set_port_settings(port_number, port_settings) break - @asyncio.coroutine - def remove_nio(self, port_number): + async def remove_nio(self, port_number): """ Removes the specified NIO as member of this Ethernet switch. @@ -295,7 +287,7 @@ class EthernetSwitch(Device): if isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) if self._hypervisor: - yield from self._hypervisor.send('ethsw remove_nio "{name}" {nio}'.format(name=self._name, nio=nio)) + await self._hypervisor.send('ethsw remove_nio "{name}" {nio}'.format(name=self._name, nio=nio)) log.info('Ethernet switch "{name}" [{id}]: NIO {nio} removed from port {port}'.format(name=self._name, id=self._id, @@ -308,8 +300,7 @@ class EthernetSwitch(Device): return nio - @asyncio.coroutine - def set_port_settings(self, port_number, settings): + async def set_port_settings(self, port_number, settings): """ Applies port settings to a specific port. @@ -318,14 +309,13 @@ class EthernetSwitch(Device): """ if settings["type"] == "access": - yield from self.set_access_port(port_number, settings["vlan"]) + await self.set_access_port(port_number, settings["vlan"]) elif settings["type"] == "dot1q": - yield from self.set_dot1q_port(port_number, settings["vlan"]) + await self.set_dot1q_port(port_number, settings["vlan"]) elif settings["type"] == "qinq": - yield from self.set_qinq_port(port_number, settings["vlan"], settings.get("ethertype")) + await self.set_qinq_port(port_number, settings["vlan"], settings.get("ethertype")) - @asyncio.coroutine - def set_access_port(self, port_number, vlan_id): + async def set_access_port(self, port_number, vlan_id): """ Sets the specified port as an ACCESS port. @@ -337,7 +327,7 @@ class EthernetSwitch(Device): raise DynamipsError("Port {} is not allocated".format(port_number)) nio = self._nios[port_number] - yield from self._hypervisor.send('ethsw set_access_port "{name}" {nio} {vlan_id}'.format(name=self._name, + await self._hypervisor.send('ethsw set_access_port "{name}" {nio} {vlan_id}'.format(name=self._name, nio=nio, vlan_id=vlan_id)) @@ -347,8 +337,7 @@ class EthernetSwitch(Device): vlan_id=vlan_id)) self._mappings[port_number] = ("access", vlan_id) - @asyncio.coroutine - def set_dot1q_port(self, port_number, native_vlan): + async def set_dot1q_port(self, port_number, native_vlan): """ Sets the specified port as a 802.1Q trunk port. @@ -360,7 +349,7 @@ class EthernetSwitch(Device): raise DynamipsError("Port {} is not allocated".format(port_number)) nio = self._nios[port_number] - yield from self._hypervisor.send('ethsw set_dot1q_port "{name}" {nio} {native_vlan}'.format(name=self._name, + await self._hypervisor.send('ethsw set_dot1q_port "{name}" {nio} {native_vlan}'.format(name=self._name, nio=nio, native_vlan=native_vlan)) @@ -371,8 +360,7 @@ class EthernetSwitch(Device): self._mappings[port_number] = ("dot1q", native_vlan) - @asyncio.coroutine - def set_qinq_port(self, port_number, outer_vlan, ethertype): + async def set_qinq_port(self, port_number, outer_vlan, ethertype): """ Sets the specified port as a trunk (QinQ) port. @@ -387,7 +375,7 @@ class EthernetSwitch(Device): if ethertype != "0x8100" and parse_version(self.hypervisor.version) < parse_version('0.2.16'): raise DynamipsError("Dynamips version required is >= 0.2.16 to change the default QinQ Ethernet type, detected version is {}".format(self.hypervisor.version)) - yield from self._hypervisor.send('ethsw set_qinq_port "{name}" {nio} {outer_vlan} {ethertype}'.format(name=self._name, + await self._hypervisor.send('ethsw set_qinq_port "{name}" {nio} {outer_vlan} {ethertype}'.format(name=self._name, nio=nio, outer_vlan=outer_vlan, ethertype=ethertype if ethertype != "0x8100" else "")) @@ -399,27 +387,24 @@ class EthernetSwitch(Device): ethertype=ethertype)) self._mappings[port_number] = ("qinq", outer_vlan, ethertype) - @asyncio.coroutine - def get_mac_addr_table(self): + async def get_mac_addr_table(self): """ Returns the MAC address table for this Ethernet switch. :returns: list of entries (Ethernet address, VLAN, NIO) """ - mac_addr_table = yield from self._hypervisor.send('ethsw show_mac_addr_table "{}"'.format(self._name)) + mac_addr_table = await self._hypervisor.send('ethsw show_mac_addr_table "{}"'.format(self._name)) return mac_addr_table - @asyncio.coroutine - def clear_mac_addr_table(self): + async def clear_mac_addr_table(self): """ Clears the MAC address table for this Ethernet switch. """ - yield from self._hypervisor.send('ethsw clear_mac_addr_table "{}"'.format(self._name)) + await self._hypervisor.send('ethsw clear_mac_addr_table "{}"'.format(self._name)) - @asyncio.coroutine - def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): + async def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. @@ -443,15 +428,14 @@ class EthernetSwitch(Device): if nio.input_filter[0] is not None and nio.output_filter[0] is not None: raise DynamipsError("Port {} has already a filter applied".format(port_number)) - yield from nio.bind_filter("both", "capture") - yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) + await nio.bind_filter("both", "capture") + await nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) log.info('Ethernet switch "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number)) - @asyncio.coroutine - def stop_capture(self, port_number): + async def stop_capture(self, port_number): """ Stops a packet capture. @@ -466,7 +450,7 @@ class EthernetSwitch(Device): if not nio: raise DynamipsError("Port {} is not connected".format(port_number)) - yield from nio.unbind_filter("both") + await nio.unbind_filter("both") log.info('Ethernet switch "{name}" [{id}]: stopping packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number)) diff --git a/gns3server/compute/dynamips/nodes/frame_relay_switch.py b/gns3server/compute/dynamips/nodes/frame_relay_switch.py index c6dac00e..73d97432 100644 --- a/gns3server/compute/dynamips/nodes/frame_relay_switch.py +++ b/gns3server/compute/dynamips/nodes/frame_relay_switch.py @@ -63,26 +63,24 @@ class FrameRelaySwitch(Device): "mappings": mappings, "status": "started"} - @asyncio.coroutine - def create(self): + async def create(self): if self._hypervisor is None: module_workdir = self.project.module_working_directory(self.manager.module_name.lower()) - self._hypervisor = yield from self.manager.start_new_hypervisor(working_dir=module_workdir) + self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir) - yield from self._hypervisor.send('frsw create "{}"'.format(self._name)) + await self._hypervisor.send('frsw create "{}"'.format(self._name)) log.info('Frame Relay switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id)) self._hypervisor.devices.append(self) - @asyncio.coroutine - def set_name(self, new_name): + async def set_name(self, new_name): """ Renames this Frame Relay switch. :param new_name: New name for this switch """ - yield from self._hypervisor.send('frsw rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name)) + await self._hypervisor.send('frsw rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name)) log.info('Frame Relay switch "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name, id=self._id, new_name=new_name)) @@ -118,15 +116,14 @@ class FrameRelaySwitch(Device): self._mappings = mappings - @asyncio.coroutine - def close(self): + async def close(self): for nio in self._nios.values(): if nio: - yield from nio.close() + await nio.close() if self._hypervisor: try: - yield from self._hypervisor.send('frsw delete "{}"'.format(self._name)) + await self._hypervisor.send('frsw delete "{}"'.format(self._name)) log.info('Frame Relay switch "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id)) except DynamipsError: log.debug("Could not properly delete Frame relay switch {}".format(self._name)) @@ -134,15 +131,14 @@ class FrameRelaySwitch(Device): if self._hypervisor and self in self._hypervisor.devices: self._hypervisor.devices.remove(self) if self._hypervisor and not self._hypervisor.devices: - yield from self.hypervisor.stop() + await self.hypervisor.stop() self._hypervisor = None - @asyncio.coroutine - def delete(self): + async def delete(self): """ Deletes this Frame Relay switch. """ - yield from self.close() + await self.close() return True def has_port(self, port): @@ -156,8 +152,7 @@ class FrameRelaySwitch(Device): return True return False - @asyncio.coroutine - def add_nio(self, nio, port_number): + async def add_nio(self, nio, port_number): """ Adds a NIO as new port on Frame Relay switch. @@ -174,10 +169,9 @@ class FrameRelaySwitch(Device): port=port_number)) self._nios[port_number] = nio - yield from self.set_mappings(self._mappings) + await self.set_mappings(self._mappings) - @asyncio.coroutine - def remove_nio(self, port_number): + async def remove_nio(self, port_number): """ Removes the specified NIO as member of this Frame Relay switch. @@ -200,8 +194,8 @@ class FrameRelaySwitch(Device): source_dlci=source_dlci, destination_port=destination_port, destination_dlci=destination_dlci)) - yield from self.unmap_vc(source_port, source_dlci, destination_port, destination_dlci) - yield from self.unmap_vc(destination_port, destination_dlci, source_port, source_dlci) + await self.unmap_vc(source_port, source_dlci, destination_port, destination_dlci) + await self.unmap_vc(destination_port, destination_dlci, source_port, source_dlci) nio = self._nios[port_number] if isinstance(nio, NIOUDP): @@ -215,8 +209,7 @@ class FrameRelaySwitch(Device): del self._nios[port_number] return nio - @asyncio.coroutine - def set_mappings(self, mappings): + async def set_mappings(self, mappings): """ Applies VC mappings @@ -237,11 +230,10 @@ class FrameRelaySwitch(Device): destination_port=destination_port, destination_dlci=destination_dlci)) - yield from self.map_vc(source_port, source_dlci, destination_port, destination_dlci) - yield from self.map_vc(destination_port, destination_dlci, source_port, source_dlci) + await self.map_vc(source_port, source_dlci, destination_port, destination_dlci) + await self.map_vc(destination_port, destination_dlci, source_port, source_dlci) - @asyncio.coroutine - def map_vc(self, port1, dlci1, port2, dlci2): + async def map_vc(self, port1, dlci1, port2, dlci2): """ Creates a new Virtual Circuit connection (unidirectional). @@ -260,7 +252,7 @@ class FrameRelaySwitch(Device): nio1 = self._nios[port1] nio2 = self._nios[port2] - yield from self._hypervisor.send('frsw create_vc "{name}" {input_nio} {input_dlci} {output_nio} {output_dlci}'.format(name=self._name, + await self._hypervisor.send('frsw create_vc "{name}" {input_nio} {input_dlci} {output_nio} {output_dlci}'.format(name=self._name, input_nio=nio1, input_dlci=dlci1, output_nio=nio2, @@ -275,8 +267,7 @@ class FrameRelaySwitch(Device): self._active_mappings[(port1, dlci1)] = (port2, dlci2) - @asyncio.coroutine - def unmap_vc(self, port1, dlci1, port2, dlci2): + async def unmap_vc(self, port1, dlci1, port2, dlci2): """ Deletes a Virtual Circuit connection (unidirectional). @@ -295,7 +286,7 @@ class FrameRelaySwitch(Device): nio1 = self._nios[port1] nio2 = self._nios[port2] - yield from self._hypervisor.send('frsw delete_vc "{name}" {input_nio} {input_dlci} {output_nio} {output_dlci}'.format(name=self._name, + await self._hypervisor.send('frsw delete_vc "{name}" {input_nio} {input_dlci} {output_nio} {output_dlci}'.format(name=self._name, input_nio=nio1, input_dlci=dlci1, output_nio=nio2, @@ -309,8 +300,7 @@ class FrameRelaySwitch(Device): dlci2=dlci2)) del self._active_mappings[(port1, dlci1)] - @asyncio.coroutine - def start_capture(self, port_number, output_file, data_link_type="DLT_FRELAY"): + async def start_capture(self, port_number, output_file, data_link_type="DLT_FRELAY"): """ Starts a packet capture. @@ -331,15 +321,14 @@ class FrameRelaySwitch(Device): if nio.input_filter[0] is not None and nio.output_filter[0] is not None: raise DynamipsError("Port {} has already a filter applied".format(port_number)) - yield from nio.bind_filter("both", "capture") - yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) + await nio.bind_filter("both", "capture") + await nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) log.info('Frame relay switch "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number)) - @asyncio.coroutine - def stop_capture(self, port_number): + async def stop_capture(self, port_number): """ Stops a packet capture. @@ -350,7 +339,7 @@ class FrameRelaySwitch(Device): raise DynamipsError("Port {} is not allocated".format(port_number)) nio = self._nios[port_number] - yield from nio.unbind_filter("both") + await nio.unbind_filter("both") log.info('Frame relay switch "{name}" [{id}]: stopping packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number)) diff --git a/gns3server/compute/dynamips/nodes/router.py b/gns3server/compute/dynamips/nodes/router.py index ee9f1695..3032dd47 100644 --- a/gns3server/compute/dynamips/nodes/router.py +++ b/gns3server/compute/dynamips/nodes/router.py @@ -38,7 +38,7 @@ from ...base_node import BaseNode from ..dynamips_error import DynamipsError from gns3server.utils.file_watcher import FileWatcher -from gns3server.utils.asyncio import wait_run_in_executor, monitor_process, asyncio_ensure_future +from gns3server.utils.asyncio import wait_run_in_executor, monitor_process from gns3server.utils.images import md5sum @@ -195,7 +195,7 @@ class Router(BaseNode): """ Called when the NVRAM file has changed """ - asyncio_ensure_future(self.save_configs()) + asyncio.ensure_future(self.save_configs()) @property def dynamips_id(self): @@ -207,16 +207,15 @@ class Router(BaseNode): return self._dynamips_id - @asyncio.coroutine - def create(self): + async def create(self): if not self._hypervisor: # We start the hypervisor is the dynamips folder and next we change to node dir # this allow the creation of common files in the dynamips folder - self._hypervisor = yield from self.manager.start_new_hypervisor(working_dir=self.project.module_working_directory(self.manager.module_name.lower())) - yield from self._hypervisor.set_working_dir(self._working_directory) + self._hypervisor = await self.manager.start_new_hypervisor(working_dir=self.project.module_working_directory(self.manager.module_name.lower())) + await self._hypervisor.set_working_dir(self._working_directory) - yield from self._hypervisor.send('vm create "{name}" {id} {platform}'.format(name=self._name, + await self._hypervisor.send('vm create "{name}" {id} {platform}'.format(name=self._name, id=self._dynamips_id, platform=self._platform)) @@ -227,41 +226,39 @@ class Router(BaseNode): id=self._id)) if self._console: - yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self._console)) + await self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self._console)) if self.aux is not None: - yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=self.aux)) + await self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=self.aux)) # get the default base MAC address - mac_addr = yield from self._hypervisor.send('{platform} get_mac_addr "{name}"'.format(platform=self._platform, + mac_addr = await self._hypervisor.send('{platform} get_mac_addr "{name}"'.format(platform=self._platform, name=self._name)) self._mac_addr = mac_addr[0] self._hypervisor.devices.append(self) - @asyncio.coroutine - def get_status(self): + async def get_status(self): """ Returns the status of this router :returns: inactive, shutting down, running or suspended. """ - status = yield from self._hypervisor.send('vm get_status "{name}"'.format(name=self._name)) + status = await self._hypervisor.send('vm get_status "{name}"'.format(name=self._name)) if len(status) == 0: raise DynamipsError("Can't get vm {name} status".format(name=self._name)) return self._status[int(status[0])] - @asyncio.coroutine - def start(self): + async def start(self): """ Starts this router. At least the IOS image must be set before it can start. """ - status = yield from self.get_status() + status = await self.get_status() if status == "suspended": - yield from self.resume() + await self.resume() elif status == "inactive": if not os.path.isfile(self._image) or not os.path.exists(self._image): @@ -294,19 +291,18 @@ class Router(BaseNode): # an empty private-config can prevent a router to boot. private_config_path = '' - yield from self._hypervisor.send('vm set_config "{name}" "{startup}" "{private}"'.format( + await self._hypervisor.send('vm set_config "{name}" "{startup}" "{private}"'.format( name=self._name, startup=startup_config_path, private=private_config_path)) - yield from self._hypervisor.send('vm start "{name}"'.format(name=self._name)) + await self._hypervisor.send('vm start "{name}"'.format(name=self._name)) self.status = "started" log.info('router "{name}" [{id}] has been started'.format(name=self._name, id=self._id)) self._memory_watcher = FileWatcher(self._memory_files(), self._memory_changed, strategy='hash', delay=30) monitor_process(self._hypervisor.process, self._termination_callback) - @asyncio.coroutine - def _termination_callback(self, returncode): + async def _termination_callback(self, returncode): """ Called when the process has stopped. @@ -319,16 +315,15 @@ class Router(BaseNode): if returncode != 0: self.project.emit("log.error", {"message": "Dynamips hypervisor process has stopped, return code: {}\n{}".format(returncode, self._hypervisor.read_stdout())}) - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops this router. """ - status = yield from self.get_status() + status = await self.get_status() if status != "inactive": try: - yield from self._hypervisor.send('vm stop "{name}"'.format(name=self._name)) + await self._hypervisor.send('vm stop "{name}"'.format(name=self._name)) except DynamipsError as e: log.warning("Could not stop {}: {}".format(self._name, e)) self.status = "stopped" @@ -336,77 +331,72 @@ class Router(BaseNode): if self._memory_watcher: self._memory_watcher.close() self._memory_watcher = None - yield from self.save_configs() + await self.save_configs() - @asyncio.coroutine - def reload(self): + async def reload(self): """ Reload this router. """ - yield from self.stop() - yield from self.start() + await self.stop() + await self.start() - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspends this router. """ - status = yield from self.get_status() + status = await self.get_status() if status == "running": - yield from self._hypervisor.send('vm suspend "{name}"'.format(name=self._name)) + await self._hypervisor.send('vm suspend "{name}"'.format(name=self._name)) self.status = "suspended" log.info('Router "{name}" [{id}] has been suspended'.format(name=self._name, id=self._id)) - @asyncio.coroutine - def resume(self): + async def resume(self): """ Resumes this suspended router """ - status = yield from self.get_status() + status = await self.get_status() if status == "suspended": - yield from self._hypervisor.send('vm resume "{name}"'.format(name=self._name)) + await self._hypervisor.send('vm resume "{name}"'.format(name=self._name)) self.status = "started" log.info('Router "{name}" [{id}] has been resumed'.format(name=self._name, id=self._id)) - @asyncio.coroutine - def is_running(self): + async def is_running(self): """ Checks if this router is running. :returns: True if running, False otherwise """ - status = yield from self.get_status() + status = await self.get_status() if status == "running": return True return False - @asyncio.coroutine - def close(self): + async def close(self): - if not (yield from super().close()): + if not (await super().close()): return False for adapter in self._slots: if adapter is not None: for nio in adapter.ports.values(): if nio: - yield from nio.close() + await nio.close() - yield from self._stop_ubridge() + await self._stop_ubridge() if self in self._hypervisor.devices: self._hypervisor.devices.remove(self) if self._hypervisor and not self._hypervisor.devices: try: - yield from self.stop() - yield from self._hypervisor.send('vm delete "{}"'.format(self._name)) + await self.stop() + await self._hypervisor.send('vm delete "{}"'.format(self._name)) except DynamipsError as e: log.warning("Could not stop and delete {}: {}".format(self._name, e)) - yield from self.hypervisor.stop() + await self.hypervisor.stop() if self._auto_delete_disks: # delete nvram and disk files @@ -420,7 +410,7 @@ class Router(BaseNode): for file in files: try: log.debug("Deleting file {}".format(file)) - yield from wait_run_in_executor(os.remove, file) + await wait_run_in_executor(os.remove, file) except OSError as e: log.warning("Could not delete file {}: {}".format(file, e)) continue @@ -447,37 +437,34 @@ class Router(BaseNode): return self._hypervisor - @asyncio.coroutine - def list(self): + async def list(self): """ Returns all VM instances :returns: list of all VM instances """ - vm_list = yield from self._hypervisor.send("vm list") + vm_list = await self._hypervisor.send("vm list") return vm_list - @asyncio.coroutine - def list_con_ports(self): + async def list_con_ports(self): """ Returns all VM console TCP ports :returns: list of port numbers """ - port_list = yield from self._hypervisor.send("vm list_con_ports") + port_list = await self._hypervisor.send("vm list_con_ports") return port_list - @asyncio.coroutine - def set_debug_level(self, level): + async def set_debug_level(self, level): """ Sets the debug level for this router (default is 0). :param level: level number """ - yield from self._hypervisor.send('vm set_debug_level "{name}" {level}'.format(name=self._name, level=level)) + await self._hypervisor.send('vm set_debug_level "{name}" {level}'.format(name=self._name, level=level)) @property def image(self): @@ -489,8 +476,7 @@ class Router(BaseNode): return self._image - @asyncio.coroutine - def set_image(self, image): + async def set_image(self, image): """ Sets the IOS image for this router. There is no default. @@ -500,7 +486,7 @@ class Router(BaseNode): image = self.manager.get_abs_image_path(image) - yield from self._hypervisor.send('vm set_ios "{name}" "{image}"'.format(name=self._name, image=image)) + await self._hypervisor.send('vm set_ios "{name}" "{image}"'.format(name=self._name, image=image)) log.info('Router "{name}" [{id}]: has a new IOS image set: "{image}"'.format(name=self._name, id=self._id, @@ -518,8 +504,7 @@ class Router(BaseNode): return self._ram - @asyncio.coroutine - def set_ram(self, ram): + async def set_ram(self, ram): """ Sets amount of RAM allocated to this router @@ -529,7 +514,7 @@ class Router(BaseNode): if self._ram == ram: return - yield from self._hypervisor.send('vm set_ram "{name}" {ram}'.format(name=self._name, ram=ram)) + await self._hypervisor.send('vm set_ram "{name}" {ram}'.format(name=self._name, ram=ram)) log.info('Router "{name}" [{id}]: RAM updated from {old_ram}MB to {new_ram}MB'.format(name=self._name, id=self._id, old_ram=self._ram, @@ -546,8 +531,7 @@ class Router(BaseNode): return self._nvram - @asyncio.coroutine - def set_nvram(self, nvram): + async def set_nvram(self, nvram): """ Sets amount of NVRAM allocated to this router @@ -557,7 +541,7 @@ class Router(BaseNode): if self._nvram == nvram: return - yield from self._hypervisor.send('vm set_nvram "{name}" {nvram}'.format(name=self._name, nvram=nvram)) + await self._hypervisor.send('vm set_nvram "{name}" {nvram}'.format(name=self._name, nvram=nvram)) log.info('Router "{name}" [{id}]: NVRAM updated from {old_nvram}KB to {new_nvram}KB'.format(name=self._name, id=self._id, old_nvram=self._nvram, @@ -574,8 +558,7 @@ class Router(BaseNode): return self._mmap - @asyncio.coroutine - def set_mmap(self, mmap): + async def set_mmap(self, mmap): """ Enable/Disable use of a mapped file to simulate router memory. By default, a mapped file is used. This is a bit slower, but requires less memory. @@ -588,7 +571,7 @@ class Router(BaseNode): else: flag = 0 - yield from self._hypervisor.send('vm set_ram_mmap "{name}" {mmap}'.format(name=self._name, mmap=flag)) + await self._hypervisor.send('vm set_ram_mmap "{name}" {mmap}'.format(name=self._name, mmap=flag)) if mmap: log.info('Router "{name}" [{id}]: mmap enabled'.format(name=self._name, id=self._id)) @@ -606,8 +589,7 @@ class Router(BaseNode): return self._sparsemem - @asyncio.coroutine - def set_sparsemem(self, sparsemem): + async def set_sparsemem(self, sparsemem): """ Enable/disable use of sparse memory @@ -618,7 +600,7 @@ class Router(BaseNode): flag = 1 else: flag = 0 - yield from self._hypervisor.send('vm set_sparse_mem "{name}" {sparsemem}'.format(name=self._name, sparsemem=flag)) + await self._hypervisor.send('vm set_sparse_mem "{name}" {sparsemem}'.format(name=self._name, sparsemem=flag)) if sparsemem: log.info('Router "{name}" [{id}]: sparse memory enabled'.format(name=self._name, id=self._id)) @@ -636,8 +618,7 @@ class Router(BaseNode): return self._clock_divisor - @asyncio.coroutine - def set_clock_divisor(self, clock_divisor): + async def set_clock_divisor(self, clock_divisor): """ Sets the clock divisor value. The higher is the value, the faster is the clock in the virtual machine. The default is 4, but it is often required to adjust it. @@ -645,7 +626,7 @@ class Router(BaseNode): :param clock_divisor: clock divisor value (integer) """ - yield from self._hypervisor.send('vm set_clock_divisor "{name}" {clock}'.format(name=self._name, clock=clock_divisor)) + await self._hypervisor.send('vm set_clock_divisor "{name}" {clock}'.format(name=self._name, clock=clock_divisor)) log.info('Router "{name}" [{id}]: clock divisor updated from {old_clock} to {new_clock}'.format(name=self._name, id=self._id, old_clock=self._clock_divisor, @@ -662,8 +643,7 @@ class Router(BaseNode): return self._idlepc - @asyncio.coroutine - def set_idlepc(self, idlepc): + async def set_idlepc(self, idlepc): """ Sets the idle Pointer Counter (PC) @@ -673,12 +653,12 @@ class Router(BaseNode): if not idlepc: idlepc = "0x0" - is_running = yield from self.is_running() + is_running = await self.is_running() if not is_running: # router is not running - yield from self._hypervisor.send('vm set_idle_pc "{name}" {idlepc}'.format(name=self._name, idlepc=idlepc)) + await self._hypervisor.send('vm set_idle_pc "{name}" {idlepc}'.format(name=self._name, idlepc=idlepc)) else: - yield from self._hypervisor.send('vm set_idle_pc_online "{name}" 0 {idlepc}'.format(name=self._name, idlepc=idlepc)) + await self._hypervisor.send('vm set_idle_pc_online "{name}" 0 {idlepc}'.format(name=self._name, idlepc=idlepc)) log.info('Router "{name}" [{id}]: idle-PC set to {idlepc}'.format(name=self._name, id=self._id, idlepc=idlepc)) self._idlepc = idlepc @@ -706,8 +686,7 @@ class Router(BaseNode): log.error("Cannot set priority for Dynamips process (PID={}) ".format(pid, e.strerror)) return old_priority - @asyncio.coroutine - def get_idle_pc_prop(self): + async def get_idle_pc_prop(self): """ Gets the idle PC proposals. Takes 1000 measurements and records up to 10 idle PC proposals. @@ -716,42 +695,41 @@ class Router(BaseNode): :returns: list of idle PC proposal """ - is_running = yield from self.is_running() + is_running = await self.is_running() was_auto_started = False if not is_running: - yield from self.start() + await self.start() was_auto_started = True - yield from asyncio.sleep(20) # leave time to the router to boot + await asyncio.sleep(20) # leave time to the router to boot log.info('Router "{name}" [{id}] has started calculating Idle-PC values'.format(name=self._name, id=self._id)) old_priority = None if sys.platform.startswith("win"): old_priority = self.set_process_priority_windows(self._hypervisor.process.pid) begin = time.time() - idlepcs = yield from self._hypervisor.send('vm get_idle_pc_prop "{}" 0'.format(self._name)) + idlepcs = await self._hypervisor.send('vm get_idle_pc_prop "{}" 0'.format(self._name)) if old_priority is not None: self.set_process_priority_windows(self._hypervisor.process.pid, old_priority) log.info('Router "{name}" [{id}] has finished calculating Idle-PC values after {time:.4f} seconds'.format(name=self._name, id=self._id, time=time.time() - begin)) if was_auto_started: - yield from self.stop() + await self.stop() return idlepcs - @asyncio.coroutine - def show_idle_pc_prop(self): + async def show_idle_pc_prop(self): """ Dumps the idle PC proposals (previously generated). :returns: list of idle PC proposal """ - is_running = yield from self.is_running() + is_running = await self.is_running() if not is_running: # router is not running raise DynamipsError('Router "{name}" is not running'.format(name=self._name)) - proposals = yield from self._hypervisor.send('vm show_idle_pc_prop "{}" 0'.format(self._name)) + proposals = await self._hypervisor.send('vm show_idle_pc_prop "{}" 0'.format(self._name)) return proposals @property @@ -764,17 +742,16 @@ class Router(BaseNode): return self._idlemax - @asyncio.coroutine - def set_idlemax(self, idlemax): + async def set_idlemax(self, idlemax): """ Sets CPU idle max value :param idlemax: idle max value (integer) """ - is_running = yield from self.is_running() + is_running = await self.is_running() if is_running: # router is running - yield from self._hypervisor.send('vm set_idle_max "{name}" 0 {idlemax}'.format(name=self._name, idlemax=idlemax)) + await self._hypervisor.send('vm set_idle_max "{name}" 0 {idlemax}'.format(name=self._name, idlemax=idlemax)) log.info('Router "{name}" [{id}]: idlemax updated from {old_idlemax} to {new_idlemax}'.format(name=self._name, id=self._id, @@ -793,17 +770,16 @@ class Router(BaseNode): return self._idlesleep - @asyncio.coroutine - def set_idlesleep(self, idlesleep): + async def set_idlesleep(self, idlesleep): """ Sets CPU idle sleep time value. :param idlesleep: idle sleep value (integer) """ - is_running = yield from self.is_running() + is_running = await self.is_running() if is_running: # router is running - yield from self._hypervisor.send('vm set_idle_sleep_time "{name}" 0 {idlesleep}'.format(name=self._name, + await self._hypervisor.send('vm set_idle_sleep_time "{name}" 0 {idlesleep}'.format(name=self._name, idlesleep=idlesleep)) log.info('Router "{name}" [{id}]: idlesleep updated from {old_idlesleep} to {new_idlesleep}'.format(name=self._name, @@ -823,15 +799,14 @@ class Router(BaseNode): return self._ghost_file - @asyncio.coroutine - def set_ghost_file(self, ghost_file): + async def set_ghost_file(self, ghost_file): """ Sets ghost RAM file :ghost_file: path to ghost file """ - yield from self._hypervisor.send('vm set_ghost_file "{name}" {ghost_file}'.format(name=self._name, + await self._hypervisor.send('vm set_ghost_file "{name}" {ghost_file}'.format(name=self._name, ghost_file=shlex.quote(ghost_file))) log.info('Router "{name}" [{id}]: ghost file set to {ghost_file}'.format(name=self._name, @@ -861,8 +836,7 @@ class Router(BaseNode): return self._ghost_status - @asyncio.coroutine - def set_ghost_status(self, ghost_status): + async def set_ghost_status(self, ghost_status): """ Sets ghost RAM status @@ -872,7 +846,7 @@ class Router(BaseNode): 2 => Use an existing ghost instance """ - yield from self._hypervisor.send('vm set_ghost_status "{name}" {ghost_status}'.format(name=self._name, + await self._hypervisor.send('vm set_ghost_status "{name}" {ghost_status}'.format(name=self._name, ghost_status=ghost_status)) log.info('Router "{name}" [{id}]: ghost status set to {ghost_status}'.format(name=self._name, @@ -890,8 +864,7 @@ class Router(BaseNode): return self._exec_area - @asyncio.coroutine - def set_exec_area(self, exec_area): + async def set_exec_area(self, exec_area): """ Sets the exec area value. The exec area is a pool of host memory used to store pages @@ -901,7 +874,7 @@ class Router(BaseNode): :param exec_area: exec area value (integer) """ - yield from self._hypervisor.send('vm set_exec_area "{name}" {exec_area}'.format(name=self._name, + await self._hypervisor.send('vm set_exec_area "{name}" {exec_area}'.format(name=self._name, exec_area=exec_area)) log.info('Router "{name}" [{id}]: exec area updated from {old_exec}MB to {new_exec}MB'.format(name=self._name, @@ -920,15 +893,14 @@ class Router(BaseNode): return self._disk0 - @asyncio.coroutine - def set_disk0(self, disk0): + async def set_disk0(self, disk0): """ Sets the size (MB) for PCMCIA disk0. :param disk0: disk0 size (integer) """ - yield from self._hypervisor.send('vm set_disk0 "{name}" {disk0}'.format(name=self._name, disk0=disk0)) + await self._hypervisor.send('vm set_disk0 "{name}" {disk0}'.format(name=self._name, disk0=disk0)) log.info('Router "{name}" [{id}]: disk0 updated from {old_disk0}MB to {new_disk0}MB'.format(name=self._name, id=self._id, @@ -946,15 +918,14 @@ class Router(BaseNode): return self._disk1 - @asyncio.coroutine - def set_disk1(self, disk1): + async def set_disk1(self, disk1): """ Sets the size (MB) for PCMCIA disk1. :param disk1: disk1 size (integer) """ - yield from self._hypervisor.send('vm set_disk1 "{name}" {disk1}'.format(name=self._name, disk1=disk1)) + await self._hypervisor.send('vm set_disk1 "{name}" {disk1}'.format(name=self._name, disk1=disk1)) log.info('Router "{name}" [{id}]: disk1 updated from {old_disk1}MB to {new_disk1}MB'.format(name=self._name, id=self._id, @@ -972,8 +943,7 @@ class Router(BaseNode): return self._auto_delete_disks - @asyncio.coroutine - def set_auto_delete_disks(self, auto_delete_disks): + async def set_auto_delete_disks(self, auto_delete_disks): """ Enable/disable use of auto delete disks @@ -986,8 +956,7 @@ class Router(BaseNode): log.info('Router "{name}" [{id}]: auto delete disks disabled'.format(name=self._name, id=self._id)) self._auto_delete_disks = auto_delete_disks - @asyncio.coroutine - def set_console(self, console): + async def set_console(self, console): """ Sets the TCP console port. @@ -995,10 +964,9 @@ class Router(BaseNode): """ self.console = console - yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self.console)) + await self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self.console)) - @asyncio.coroutine - def set_console_type(self, console_type): + async def set_console_type(self, console_type): """ Sets the console type. @@ -1006,7 +974,7 @@ class Router(BaseNode): """ if self.console_type != console_type: - status = yield from self.get_status() + status = await self.get_status() if status == "running": raise DynamipsError('"{name}" must be stopped to change the console type to {console_type}'.format(name=self._name, console_type=console_type)) @@ -1015,10 +983,9 @@ class Router(BaseNode): self.console_type = console_type if self._console and console_type == "telnet": - yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self._console)) + await self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self._console)) - @asyncio.coroutine - def set_aux(self, aux): + async def set_aux(self, aux): """ Sets the TCP auxiliary port. @@ -1026,17 +993,16 @@ class Router(BaseNode): """ self.aux = aux - yield from self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=aux)) + await self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=aux)) - @asyncio.coroutine - def get_cpu_usage(self, cpu_id=0): + async def get_cpu_usage(self, cpu_id=0): """ Shows cpu usage in seconds, "cpu_id" is ignored. :returns: cpu usage in seconds """ - cpu_usage = yield from self._hypervisor.send('vm cpu_usage "{name}" {cpu_id}'.format(name=self._name, cpu_id=cpu_id)) + cpu_usage = await self._hypervisor.send('vm cpu_usage "{name}" {cpu_id}'.format(name=self._name, cpu_id=cpu_id)) return int(cpu_usage[0]) @property @@ -1049,15 +1015,14 @@ class Router(BaseNode): return self._mac_addr - @asyncio.coroutine - def set_mac_addr(self, mac_addr): + async def set_mac_addr(self, mac_addr): """ Sets the MAC address. :param mac_addr: a MAC address (hexadecimal format: hh:hh:hh:hh:hh:hh) """ - yield from self._hypervisor.send('{platform} set_mac_addr "{name}" {mac_addr}'.format(platform=self._platform, + await self._hypervisor.send('{platform} set_mac_addr "{name}" {mac_addr}'.format(platform=self._platform, name=self._name, mac_addr=mac_addr)) @@ -1077,15 +1042,14 @@ class Router(BaseNode): return self._system_id - @asyncio.coroutine - def set_system_id(self, system_id): + async def set_system_id(self, system_id): """ Sets the system ID. :param system_id: a system ID (also called board processor ID) """ - yield from self._hypervisor.send('{platform} set_system_id "{name}" {system_id}'.format(platform=self._platform, + await self._hypervisor.send('{platform} set_system_id "{name}" {system_id}'.format(platform=self._platform, name=self._name, system_id=system_id)) @@ -1095,19 +1059,17 @@ class Router(BaseNode): new_id=system_id)) self._system_id = system_id - @asyncio.coroutine - def get_slot_bindings(self): + async def get_slot_bindings(self): """ Returns slot bindings. :returns: slot bindings (adapter names) list """ - slot_bindings = yield from self._hypervisor.send('vm slot_bindings "{}"'.format(self._name)) + slot_bindings = await self._hypervisor.send('vm slot_bindings "{}"'.format(self._name)) return slot_bindings - @asyncio.coroutine - def slot_add_binding(self, slot_number, adapter): + async def slot_add_binding(self, slot_number, adapter): """ Adds a slot binding (a module into a slot). @@ -1126,7 +1088,7 @@ class Router(BaseNode): slot_number=slot_number, adapter=current_adapter)) - is_running = yield from self.is_running() + is_running = await self.is_running() # Only c7200, c3600 and c3745 (NM-4T only) support new adapter while running if is_running and not ((self._platform == 'c7200' and not str(adapter).startswith('C7200')) @@ -1135,7 +1097,7 @@ class Router(BaseNode): raise DynamipsError('Adapter {adapter} cannot be added while router "{name}" is running'.format(adapter=adapter, name=self._name)) - yield from self._hypervisor.send('vm slot_add_binding "{name}" {slot_number} 0 {adapter}'.format(name=self._name, + await self._hypervisor.send('vm slot_add_binding "{name}" {slot_number} 0 {adapter}'.format(name=self._name, slot_number=slot_number, adapter=adapter)) @@ -1149,15 +1111,14 @@ class Router(BaseNode): # Generate an OIR event if the router is running if is_running: - yield from self._hypervisor.send('vm slot_oir_start "{name}" {slot_number} 0'.format(name=self._name, + await self._hypervisor.send('vm slot_oir_start "{name}" {slot_number} 0'.format(name=self._name, slot_number=slot_number)) log.info('Router "{name}" [{id}]: OIR start event sent to slot {slot_number}'.format(name=self._name, id=self._id, slot_number=slot_number)) - @asyncio.coroutine - def slot_remove_binding(self, slot_number): + async def slot_remove_binding(self, slot_number): """ Removes a slot binding (a module from a slot). @@ -1174,7 +1135,7 @@ class Router(BaseNode): raise DynamipsError('No adapter in slot {slot_number} on router "{name}"'.format(name=self._name, slot_number=slot_number)) - is_running = yield from self.is_running() + is_running = await self.is_running() # Only c7200, c3600 and c3745 (NM-4T only) support to remove adapter while running if is_running and not ((self._platform == 'c7200' and not str(adapter).startswith('C7200')) @@ -1186,14 +1147,14 @@ class Router(BaseNode): # Generate an OIR event if the router is running if is_running: - yield from self._hypervisor.send('vm slot_oir_stop "{name}" {slot_number} 0'.format(name=self._name, + await self._hypervisor.send('vm slot_oir_stop "{name}" {slot_number} 0'.format(name=self._name, slot_number=slot_number)) log.info('Router "{name}" [{id}]: OIR stop event sent to slot {slot_number}'.format(name=self._name, id=self._id, slot_number=slot_number)) - yield from self._hypervisor.send('vm slot_remove_binding "{name}" {slot_number} 0'.format(name=self._name, + await self._hypervisor.send('vm slot_remove_binding "{name}" {slot_number} 0'.format(name=self._name, slot_number=slot_number)) log.info('Router "{name}" [{id}]: adapter {adapter} removed from slot {slot_number}'.format(name=self._name, @@ -1202,8 +1163,7 @@ class Router(BaseNode): slot_number=slot_number)) self._slots[slot_number] = None - @asyncio.coroutine - def install_wic(self, wic_slot_number, wic): + async def install_wic(self, wic_slot_number, wic): """ Installs a WIC adapter into this router. @@ -1227,7 +1187,7 @@ class Router(BaseNode): # Dynamips WICs slot IDs start on a multiple of 16 # WIC1 = 16, WIC2 = 32 and WIC3 = 48 internal_wic_slot_number = 16 * (wic_slot_number + 1) - yield from self._hypervisor.send('vm slot_add_binding "{name}" {slot_number} {wic_slot_number} {wic}'.format(name=self._name, + await self._hypervisor.send('vm slot_add_binding "{name}" {slot_number} {wic_slot_number} {wic}'.format(name=self._name, slot_number=slot_number, wic_slot_number=internal_wic_slot_number, wic=wic)) @@ -1239,8 +1199,7 @@ class Router(BaseNode): adapter.install_wic(wic_slot_number, wic) - @asyncio.coroutine - def uninstall_wic(self, wic_slot_number): + async def uninstall_wic(self, wic_slot_number): """ Uninstalls a WIC adapter from this router. @@ -1263,7 +1222,7 @@ class Router(BaseNode): # Dynamips WICs slot IDs start on a multiple of 16 # WIC1 = 16, WIC2 = 32 and WIC3 = 48 internal_wic_slot_number = 16 * (wic_slot_number + 1) - yield from self._hypervisor.send('vm slot_remove_binding "{name}" {slot_number} {wic_slot_number}'.format(name=self._name, + await self._hypervisor.send('vm slot_remove_binding "{name}" {slot_number} {wic_slot_number}'.format(name=self._name, slot_number=slot_number, wic_slot_number=internal_wic_slot_number)) @@ -1273,8 +1232,7 @@ class Router(BaseNode): wic_slot_number=wic_slot_number)) adapter.uninstall_wic(wic_slot_number) - @asyncio.coroutine - def get_slot_nio_bindings(self, slot_number): + async def get_slot_nio_bindings(self, slot_number): """ Returns slot NIO bindings. @@ -1283,12 +1241,11 @@ class Router(BaseNode): :returns: list of NIO bindings """ - nio_bindings = yield from self._hypervisor.send('vm slot_nio_bindings "{name}" {slot_number}'.format(name=self._name, + nio_bindings = await self._hypervisor.send('vm slot_nio_bindings "{name}" {slot_number}'.format(name=self._name, slot_number=slot_number)) return nio_bindings - @asyncio.coroutine - def slot_add_nio_binding(self, slot_number, port_number, nio): + async def slot_add_nio_binding(self, slot_number, port_number, nio): """ Adds a slot NIO binding. @@ -1311,16 +1268,16 @@ class Router(BaseNode): port_number=port_number)) try: - yield from self._hypervisor.send('vm slot_add_nio_binding "{name}" {slot_number} {port_number} {nio}'.format(name=self._name, + await self._hypervisor.send('vm slot_add_nio_binding "{name}" {slot_number} {port_number} {nio}'.format(name=self._name, slot_number=slot_number, port_number=port_number, nio=nio)) except DynamipsError: # in case of error try to remove and add the nio binding - yield from self._hypervisor.send('vm slot_remove_nio_binding "{name}" {slot_number} {port_number}'.format(name=self._name, + await self._hypervisor.send('vm slot_remove_nio_binding "{name}" {slot_number} {port_number}'.format(name=self._name, slot_number=slot_number, port_number=port_number)) - yield from self._hypervisor.send('vm slot_add_nio_binding "{name}" {slot_number} {port_number} {nio}'.format(name=self._name, + await self._hypervisor.send('vm slot_add_nio_binding "{name}" {slot_number} {port_number} {nio}'.format(name=self._name, slot_number=slot_number, port_number=port_number, nio=nio)) @@ -1331,11 +1288,10 @@ class Router(BaseNode): slot_number=slot_number, port_number=port_number)) - yield from self.slot_enable_nio(slot_number, port_number) + await self.slot_enable_nio(slot_number, port_number) adapter.add_nio(port_number, nio) - @asyncio.coroutine - def slot_update_nio_binding(self, slot_number, port_number, nio): + async def slot_update_nio_binding(self, slot_number, port_number, nio): """ Update a slot NIO binding. @@ -1343,10 +1299,9 @@ class Router(BaseNode): :param port_number: port number :param nio: NIO instance to add to the slot/port """ - yield from nio.update() + await nio.update() - @asyncio.coroutine - def slot_remove_nio_binding(self, slot_number, port_number): + async def slot_remove_nio_binding(self, slot_number, port_number): """ Removes a slot NIO binding. @@ -1369,15 +1324,15 @@ class Router(BaseNode): raise DynamipsError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter, port_number=port_number)) - yield from self.slot_disable_nio(slot_number, port_number) - yield from self._hypervisor.send('vm slot_remove_nio_binding "{name}" {slot_number} {port_number}'.format(name=self._name, + await self.slot_disable_nio(slot_number, port_number) + await self._hypervisor.send('vm slot_remove_nio_binding "{name}" {slot_number} {port_number}'.format(name=self._name, slot_number=slot_number, port_number=port_number)) nio = adapter.get_nio(port_number) if nio is None: return - yield from nio.close() + await nio.close() adapter.remove_nio(port_number) log.info('Router "{name}" [{id}]: NIO {nio_name} removed from port {slot_number}/{port_number}'.format(name=self._name, @@ -1388,8 +1343,7 @@ class Router(BaseNode): return nio - @asyncio.coroutine - def slot_enable_nio(self, slot_number, port_number): + async def slot_enable_nio(self, slot_number, port_number): """ Enables a slot NIO binding. @@ -1397,9 +1351,9 @@ class Router(BaseNode): :param port_number: port number """ - is_running = yield from self.is_running() + is_running = await self.is_running() if is_running: # running router - yield from self._hypervisor.send('vm slot_enable_nio "{name}" {slot_number} {port_number}'.format(name=self._name, + await self._hypervisor.send('vm slot_enable_nio "{name}" {slot_number} {port_number}'.format(name=self._name, slot_number=slot_number, port_number=port_number)) @@ -1408,8 +1362,7 @@ class Router(BaseNode): slot_number=slot_number, port_number=port_number)) - @asyncio.coroutine - def slot_disable_nio(self, slot_number, port_number): + async def slot_disable_nio(self, slot_number, port_number): """ Disables a slot NIO binding. @@ -1417,9 +1370,9 @@ class Router(BaseNode): :param port_number: port number """ - is_running = yield from self.is_running() + is_running = await self.is_running() if is_running: # running router - yield from self._hypervisor.send('vm slot_disable_nio "{name}" {slot_number} {port_number}'.format(name=self._name, + await self._hypervisor.send('vm slot_disable_nio "{name}" {slot_number} {port_number}'.format(name=self._name, slot_number=slot_number, port_number=port_number)) @@ -1428,8 +1381,7 @@ class Router(BaseNode): slot_number=slot_number, port_number=port_number)) - @asyncio.coroutine - def start_capture(self, slot_number, port_number, output_file, data_link_type="DLT_EN10MB"): + async def start_capture(self, slot_number, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. @@ -1467,8 +1419,8 @@ class Router(BaseNode): raise DynamipsError("Port {port_number} has already a filter applied on {adapter}".format(adapter=adapter, port_number=port_number)) - yield from nio.bind_filter("both", "capture") - yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) + await nio.bind_filter("both", "capture") + await nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) log.info('Router "{name}" [{id}]: starting packet capture on port {slot_number}/{port_number}'.format(name=self._name, id=self._id, @@ -1476,8 +1428,7 @@ class Router(BaseNode): slot_number=slot_number, port_number=port_number)) - @asyncio.coroutine - def stop_capture(self, slot_number, port_number): + async def stop_capture(self, slot_number, port_number): """ Stops a packet capture. @@ -1500,7 +1451,7 @@ class Router(BaseNode): raise DynamipsError("Port {slot_number}/{port_number} is not connected".format(slot_number=slot_number, port_number=port_number)) - yield from nio.unbind_filter("both") + await nio.unbind_filter("both") log.info('Router "{name}" [{id}]: stopping packet capture on port {slot_number}/{port_number}'.format(name=self._name, id=self._id, @@ -1541,8 +1492,7 @@ class Router(BaseNode): """ return os.path.join(self._working_directory, "configs", "i{}_private-config.cfg".format(self._dynamips_id)) - @asyncio.coroutine - def set_name(self, new_name): + async def set_name(self, new_name): """ Renames this router. @@ -1571,12 +1521,11 @@ class Router(BaseNode): except OSError as e: raise DynamipsError("Could not amend the configuration {}: {}".format(self.private_config_path, e)) - yield from self._hypervisor.send('vm rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name)) + await self._hypervisor.send('vm rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name)) log.info('Router "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name, id=self._id, new_name=new_name)) self._name = new_name - @asyncio.coroutine - def extract_config(self): + async def extract_config(self): """ Gets the contents of the config files startup-config and private-config from NVRAM. @@ -1585,7 +1534,7 @@ class Router(BaseNode): """ try: - reply = yield from self._hypervisor.send('vm extract_config "{}"'.format(self._name)) + reply = await self._hypervisor.send('vm extract_config "{}"'.format(self._name)) except DynamipsError: # for some reason Dynamips gets frozen when it does not find the magic number in the NVRAM file. return None, None @@ -1594,8 +1543,7 @@ class Router(BaseNode): private_config = reply[1][1:-1] # get private-config and remove single quotes return startup_config, private_config - @asyncio.coroutine - def save_configs(self): + async def save_configs(self): """ Saves the startup-config and private-config to files. """ @@ -1606,7 +1554,7 @@ class Router(BaseNode): except OSError as e: raise DynamipsError("Could could not create configuration directory {}: {}".format(config_path, e)) - startup_config_base64, private_config_base64 = yield from self.extract_config() + startup_config_base64, private_config_base64 = await self.extract_config() if startup_config_base64: startup_config = self.startup_config_path try: @@ -1630,28 +1578,27 @@ class Router(BaseNode): except (binascii.Error, OSError) as e: raise DynamipsError("Could not save the private configuration {}: {}".format(config_path, e)) - def delete(self): + async def delete(self): """ Deletes this VM (including all its files). """ try: - yield from wait_run_in_executor(shutil.rmtree, self._working_directory) + await wait_run_in_executor(shutil.rmtree, self._working_directory) except OSError as e: log.warning("Could not delete file {}".format(e)) self.manager.release_dynamips_id(self._project.id, self._dynamips_id) - @asyncio.coroutine - def clean_delete(self): + async def clean_delete(self): """ Deletes this router & associated files (nvram, disks etc.) """ - yield from self._hypervisor.send('vm clean_delete "{}"'.format(self._name)) + await self._hypervisor.send('vm clean_delete "{}"'.format(self._name)) self._hypervisor.devices.remove(self) try: - yield from wait_run_in_executor(shutil.rmtree, self._working_directory) + await wait_run_in_executor(shutil.rmtree, self._working_directory) except OSError as e: log.warning("Could not delete file {}".format(e)) log.info('Router "{name}" [{id}] has been deleted (including associated files)'.format(name=self._name, id=self._id)) diff --git a/gns3server/compute/iou/__init__.py b/gns3server/compute/iou/__init__.py index 4cba97d7..028736b7 100644 --- a/gns3server/compute/iou/__init__.py +++ b/gns3server/compute/iou/__init__.py @@ -41,20 +41,19 @@ class IOU(BaseManager): super().__init__() self._iou_id_lock = asyncio.Lock() - @asyncio.coroutine - def create_node(self, *args, **kwargs): + async def create_node(self, *args, **kwargs): """ Creates a new IOU VM. :returns: IOUVM instance """ - with (yield from self._iou_id_lock): + async with self._iou_id_lock: # wait for a node to be completely created before adding a new one # this is important otherwise we allocate the same application ID # when creating multiple IOU node at the same time application_id = get_next_application_id(self.nodes) - node = yield from super().create_node(*args, application_id=application_id, **kwargs) + node = await super().create_node(*args, application_id=application_id, **kwargs) return node @staticmethod diff --git a/gns3server/compute/iou/iou_vm.py b/gns3server/compute/iou/iou_vm.py index 66f2cd77..b270245a 100644 --- a/gns3server/compute/iou/iou_vm.py +++ b/gns3server/compute/iou/iou_vm.py @@ -101,13 +101,12 @@ class IOUVM(BaseNode): self.save_configs() self.updated() - @asyncio.coroutine - def close(self): + async def close(self): """ Closes this IOU VM. """ - if not (yield from super().close()): + if not (await super().close()): return False adapters = self._ethernet_adapters + self._serial_adapters @@ -117,7 +116,7 @@ class IOUVM(BaseNode): if nio and isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) - yield from self.stop() + await self.stop() @property def path(self): @@ -164,14 +163,13 @@ class IOUVM(BaseNode): else: log.info('IOU "{name}" [{id}]: does not use the default IOU image values'.format(name=self._name, id=self._id)) - @asyncio.coroutine - def update_default_iou_values(self): + async def update_default_iou_values(self): """ Finds the default RAM and NVRAM values for the IOU image. """ try: - output = yield from gns3server.utils.asyncio.subprocess_check_output(self._path, "-h", cwd=self.working_dir, stderr=True) + output = await gns3server.utils.asyncio.subprocess_check_output(self._path, "-h", cwd=self.working_dir, stderr=True) match = re.search("-n \s+Size of nvram in Kb \(default ([0-9]+)KB\)", output) if match: self.nvram = int(match.group(1)) @@ -181,10 +179,9 @@ class IOUVM(BaseNode): except (ValueError, OSError, subprocess.SubprocessError) as e: log.warning("could not find default RAM and NVRAM values for {}: {}".format(os.path.basename(self._path), e)) - @asyncio.coroutine - def create(self): + async def create(self): - yield from self.update_default_iou_values() + await self.update_default_iou_values() def _check_requirements(self): """ @@ -361,14 +358,13 @@ class IOUVM(BaseNode): except OSError as e: raise IOUError("Could not write the iourc file {}: {}".format(path, e)) - @asyncio.coroutine - def _library_check(self): + async def _library_check(self): """ Checks for missing shared library dependencies in the IOU image. """ try: - output = yield from gns3server.utils.asyncio.subprocess_check_output("ldd", self._path) + output = await gns3server.utils.asyncio.subprocess_check_output("ldd", self._path) except (OSError, subprocess.SubprocessError) as e: log.warning("Could not determine the shared library dependencies for {}: {}".format(self._path, e)) return @@ -379,8 +375,7 @@ class IOUVM(BaseNode): raise IOUError("The following shared library dependencies cannot be found for IOU image {}: {}".format(self._path, ", ".join(missing_libs))) - @asyncio.coroutine - def _check_iou_licence(self): + async def _check_iou_licence(self): """ Checks for a valid IOU key in the iourc file (paranoid mode). """ @@ -419,7 +414,7 @@ class IOUVM(BaseNode): # in tests or generating one if not hasattr(sys, "_called_from_test"): try: - hostid = (yield from gns3server.utils.asyncio.subprocess_check_output("hostid")).strip() + hostid = (await gns3server.utils.asyncio.subprocess_check_output("hostid")).strip() except FileNotFoundError as e: raise IOUError("Could not find hostid: {}".format(e)) except (OSError, subprocess.SubprocessError) as e: @@ -477,8 +472,7 @@ class IOUVM(BaseNode): except OSError as e: raise IOUError("Cannot write nvram file {}: {}".format(nvram_file, e)) - @asyncio.coroutine - def start(self): + async def start(self): """ Starts the IOU process. """ @@ -486,7 +480,7 @@ class IOUVM(BaseNode): self._check_requirements() if not self.is_running(): - yield from self._library_check() + await self._library_check() try: self._rename_nvram_file() @@ -499,13 +493,13 @@ class IOUVM(BaseNode): if not os.path.isfile(iourc_path): raise IOUError("The iourc path '{}' is not a regular file".format(iourc_path)) - yield from self._check_iou_licence() - yield from self._start_ubridge() + await self._check_iou_licence() + await self._start_ubridge() self._create_netmap_config() if self.use_default_iou_values: # make sure we have the default nvram amount to correctly push the configs - yield from self.update_default_iou_values() + await self.update_default_iou_values() self._push_configs_to_nvram() # check if there is enough RAM to run @@ -518,11 +512,11 @@ class IOUVM(BaseNode): if "IOURC" not in os.environ: env["IOURC"] = iourc_path - command = yield from self._build_command() + command = await self._build_command() try: log.info("Starting IOU: {}".format(command)) self.command_line = ' '.join(command) - self._iou_process = yield from asyncio.create_subprocess_exec( + self._iou_process = await asyncio.create_subprocess_exec( *command, stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE, @@ -544,17 +538,16 @@ class IOUVM(BaseNode): if self.console and self.console_type == "telnet": server = AsyncioTelnetServer(reader=self._iou_process.stdout, writer=self._iou_process.stdin, binary=True, echo=True) try: - self._telnet_server = yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console) + self._telnet_server = await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console) except OSError as e: - yield from self.stop() + await self.stop() raise IOUError("Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e)) # configure networking support - yield from self._networking() + await self._networking() @locking - @asyncio.coroutine - def _networking(self): + async def _networking(self): """ Configures the IOL bridge in uBridge. """ @@ -562,10 +555,10 @@ class IOUVM(BaseNode): bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512) try: # delete any previous bridge if it exists - yield from self._ubridge_send("iol_bridge delete {name}".format(name=bridge_name)) + await self._ubridge_send("iol_bridge delete {name}".format(name=bridge_name)) except UbridgeError: pass - yield from self._ubridge_send("iol_bridge create {name} {bridge_id}".format(name=bridge_name, bridge_id=self.application_id + 512)) + await self._ubridge_send("iol_bridge create {name} {bridge_id}".format(name=bridge_name, bridge_id=self.application_id + 512)) bay_id = 0 for adapter in self._adapters: @@ -573,7 +566,7 @@ class IOUVM(BaseNode): for unit in adapter.ports.keys(): nio = adapter.get_nio(unit) if nio and isinstance(nio, NIOUDP): - yield from self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name, + await self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name, iol_id=self.application_id, bay=bay_id, unit=unit_id, @@ -581,15 +574,15 @@ class IOUVM(BaseNode): rhost=nio.rhost, rport=nio.rport)) if nio.capturing: - yield from self._ubridge_send('iol_bridge start_capture {name} "{output_file}" {data_link_type}'.format(name=bridge_name, + await self._ubridge_send('iol_bridge start_capture {name} "{output_file}" {data_link_type}'.format(name=bridge_name, output_file=nio.pcap_output_file, data_link_type=re.sub("^DLT_", "", nio.pcap_data_link_type))) - yield from self._ubridge_apply_filters(bay_id, unit_id, nio.filters) + await self._ubridge_apply_filters(bay_id, unit_id, nio.filters) unit_id += 1 bay_id += 1 - yield from self._ubridge_send("iol_bridge start {name}".format(name=bridge_name)) + await self._ubridge_send("iol_bridge start {name}".format(name=bridge_name)) def _termination_callback(self, process_name, returncode): """ @@ -624,13 +617,12 @@ class IOUVM(BaseNode): for file_path in glob.glob(os.path.join(glob.escape(self.working_dir), "vlan.dat-*")): shutil.move(file_path, destination) - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the IOU process. """ - yield from self._stop_ubridge() + await self._stop_ubridge() if self._nvram_watcher: self._nvram_watcher.close() self._nvram_watcher = None @@ -643,7 +635,7 @@ class IOUVM(BaseNode): self._terminate_process_iou() if self._iou_process.returncode is None: try: - yield from gns3server.utils.asyncio.wait_for_process_termination(self._iou_process, timeout=3) + await gns3server.utils.asyncio.wait_for_process_termination(self._iou_process, timeout=3) except asyncio.TimeoutError: if self._iou_process.returncode is None: log.warning("IOU process {} is still running... killing it".format(self._iou_process.pid)) @@ -671,14 +663,13 @@ class IOUVM(BaseNode): self._started = False self.status = "stopped" - @asyncio.coroutine - def reload(self): + async def reload(self): """ Reloads the IOU process (stop & start). """ - yield from self.stop() - yield from self.start() + await self.stop() + await self.start() def is_running(self): """ @@ -723,8 +714,7 @@ class IOUVM(BaseNode): except OSError as e: raise IOUError("Could not create {}: {}".format(netmap_path, e)) - @asyncio.coroutine - def _build_command(self): + async def _build_command(self): """ Command to start the IOU process. (to be passed to subprocess.Popen()) @@ -769,7 +759,7 @@ class IOUVM(BaseNode): # command.extend(["-c", os.path.basename(startup_config_file)]) if self._l1_keepalives: - yield from self._enable_l1_keepalives(command) + await self._enable_l1_keepalives(command) command.extend([str(self.application_id)]) return command @@ -848,8 +838,7 @@ class IOUVM(BaseNode): self._adapters = self._ethernet_adapters + self._serial_adapters - @asyncio.coroutine - def adapter_add_nio_binding(self, adapter_number, port_number, nio): + async def adapter_add_nio_binding(self, adapter_number, port_number, nio): """ Adds a adapter NIO binding. @@ -877,17 +866,16 @@ class IOUVM(BaseNode): if self.ubridge: bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512) - yield from self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name, + await self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name, iol_id=self.application_id, bay=adapter_number, unit=port_number, lport=nio.lport, rhost=nio.rhost, rport=nio.rport)) - yield from self._ubridge_apply_filters(adapter_number, port_number, nio.filters) + await self._ubridge_apply_filters(adapter_number, port_number, nio.filters) - @asyncio.coroutine - def adapter_update_nio_binding(self, adapter_number, port_number, nio): + async def adapter_update_nio_binding(self, adapter_number, port_number, nio): """ Update a port NIO binding. @@ -897,10 +885,9 @@ class IOUVM(BaseNode): """ if self.ubridge: - yield from self._ubridge_apply_filters(adapter_number, port_number, nio.filters) + await self._ubridge_apply_filters(adapter_number, port_number, nio.filters) - @asyncio.coroutine - def _ubridge_apply_filters(self, adapter_number, port_number, filters): + async def _ubridge_apply_filters(self, adapter_number, port_number, filters): """ Apply filter like rate limiting @@ -913,15 +900,14 @@ class IOUVM(BaseNode): bridge_name=bridge_name, bay=adapter_number, unit=port_number) - yield from self._ubridge_send('iol_bridge reset_packet_filters ' + location) + await self._ubridge_send('iol_bridge reset_packet_filters ' + location) for filter in self._build_filter_list(filters): cmd = 'iol_bridge add_packet_filter {} {}'.format( location, filter) - yield from self._ubridge_send(cmd) + await self._ubridge_send(cmd) - @asyncio.coroutine - def adapter_remove_nio_binding(self, adapter_number, port_number): + async def adapter_remove_nio_binding(self, adapter_number, port_number): """ Removes an adapter NIO binding. @@ -952,7 +938,7 @@ class IOUVM(BaseNode): if self.ubridge: bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512) - yield from self._ubridge_send("iol_bridge delete_nio_udp {name} {bay} {unit}".format(name=bridge_name, + await self._ubridge_send("iol_bridge delete_nio_udp {name} {bay} {unit}".format(name=bridge_name, bay=adapter_number, unit=port_number)) @@ -982,8 +968,7 @@ class IOUVM(BaseNode): else: log.info('IOU "{name}" [{id}]: has deactivated layer 1 keepalive messages'.format(name=self._name, id=self._id)) - @asyncio.coroutine - def _enable_l1_keepalives(self, command): + async def _enable_l1_keepalives(self, command): """ Enables L1 keepalive messages if supported. @@ -994,7 +979,7 @@ class IOUVM(BaseNode): if "IOURC" not in os.environ: env["IOURC"] = self.iourc_path try: - output = yield from gns3server.utils.asyncio.subprocess_check_output(self._path, "-h", cwd=self.working_dir, env=env, stderr=True) + output = await gns3server.utils.asyncio.subprocess_check_output(self._path, "-h", cwd=self.working_dir, env=env, stderr=True) if re.search("-l\s+Enable Layer 1 keepalive messages", output): command.extend(["-l"]) else: @@ -1226,8 +1211,7 @@ class IOUVM(BaseNode): except (binascii.Error, OSError) as e: raise IOUError("Could not save the private configuration {}: {}".format(config_path, e)) - @asyncio.coroutine - def start_capture(self, adapter_number, port_number, output_file, data_link_type="DLT_EN10MB"): + async def start_capture(self, adapter_number, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. @@ -1265,14 +1249,13 @@ class IOUVM(BaseNode): if self.ubridge: bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512) - yield from self._ubridge_send('iol_bridge start_capture {name} {bay} {unit} "{output_file}" {data_link_type}'.format(name=bridge_name, + await self._ubridge_send('iol_bridge start_capture {name} {bay} {unit} "{output_file}" {data_link_type}'.format(name=bridge_name, bay=adapter_number, unit=port_number, output_file=output_file, data_link_type=re.sub("^DLT_", "", data_link_type))) - @asyncio.coroutine - def stop_capture(self, adapter_number, port_number): + async def stop_capture(self, adapter_number, port_number): """ Stops a packet capture. @@ -1302,6 +1285,6 @@ class IOUVM(BaseNode): port_number=port_number)) if self.ubridge: bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512) - yield from self._ubridge_send('iol_bridge stop_capture {name} {bay} {unit}'.format(name=bridge_name, + await self._ubridge_send('iol_bridge stop_capture {name} {bay} {unit}'.format(name=bridge_name, bay=adapter_number, unit=port_number)) diff --git a/gns3server/compute/project.py b/gns3server/compute/project.py index d67e9adb..1807ec03 100644 --- a/gns3server/compute/project.py +++ b/gns3server/compute/project.py @@ -26,7 +26,7 @@ from uuid import UUID, uuid4 from .port_manager import PortManager from .notification_manager import NotificationManager from ..config import Config -from ..utils.asyncio import wait_run_in_executor, asyncio_ensure_future +from ..utils.asyncio import wait_run_in_executor from ..utils.path import check_path_allowed, get_default_project_directory import logging @@ -282,7 +282,7 @@ class Project: raise aiohttp.web.HTTPNotFound(text="Node ID {} doesn't exist".format(node_id)) - def remove_node(self, node): + async def remove_node(self, node): """ Removes a node from the project. In theory this should be called by the node manager. @@ -291,11 +291,10 @@ class Project: """ if node in self._nodes: - yield from node.delete() + await node.delete() self._nodes.remove(node) - @asyncio.coroutine - def update(self, variables=None, **kwargs): + async def update(self, variables=None, **kwargs): original_variables = self.variables self.variables = variables @@ -303,10 +302,9 @@ class Project: if original_variables != variables: for node in self.nodes: if hasattr(node, 'update'): - yield from node.update() + await node.update() - @asyncio.coroutine - def close(self): + async def close(self): """ Closes the project, but keep project data on disk """ @@ -317,15 +315,15 @@ class Project: module_nodes_id = set([n.id for n in module.instance().nodes]) # We close the project only for the modules using it if len(module_nodes_id & project_nodes_id): - yield from module.instance().project_closing(self) + await module.instance().project_closing(self) - yield from self._close_and_clean(False) + await self._close_and_clean(False) for module in self.compute(): module_nodes_id = set([n.id for n in module.instance().nodes]) # We close the project only for the modules using it if len(module_nodes_id & project_nodes_id): - yield from module.instance().project_closed(self) + await module.instance().project_closed(self) try: if os.path.exists(self.tmp_working_directory()): @@ -333,8 +331,7 @@ class Project: except OSError: pass - @asyncio.coroutine - def _close_and_clean(self, cleanup): + async def _close_and_clean(self, cleanup): """ Closes the project, and cleanup the disk if cleanup is True @@ -343,10 +340,10 @@ class Project: tasks = [] for node in self._nodes: - tasks.append(asyncio_ensure_future(node.manager.close_node(node.id))) + tasks.append(asyncio.ensure_future(node.manager.close_node(node.id))) if tasks: - done, _ = yield from asyncio.wait(tasks) + done, _ = await asyncio.wait(tasks) for future in done: try: future.result() @@ -356,7 +353,7 @@ class Project: if cleanup and os.path.exists(self.path): self._deleted = True try: - yield from wait_run_in_executor(shutil.rmtree, self.path) + await wait_run_in_executor(shutil.rmtree, self.path) log.info("Project {id} with path '{path}' deleted".format(path=self._path, id=self._id)) except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not delete the project directory: {}".format(e)) @@ -375,17 +372,16 @@ class Project: for port in self._used_udp_ports.copy(): port_manager.release_udp_port(port, self) - @asyncio.coroutine - def delete(self): + async def delete(self): """ Removes project from disk """ for module in self.compute(): - yield from module.instance().project_closing(self) - yield from self._close_and_clean(True) + await module.instance().project_closing(self) + await self._close_and_clean(True) for module in self.compute(): - yield from module.instance().project_closed(self) + await module.instance().project_closed(self) def compute(self): """ @@ -405,8 +401,7 @@ class Project: """ NotificationManager.instance().emit(action, event, project_id=self.id) - @asyncio.coroutine - def list_files(self): + async def list_files(self): """ :returns: Array of files in project without temporary files. The files are dictionary {"path": "test.bin", "md5sum": "aaaaa"} """ @@ -421,7 +416,7 @@ class Project: file_info = {"path": path} try: - file_info["md5sum"] = yield from wait_run_in_executor(self._hash_file, os.path.join(dirpath, filename)) + file_info["md5sum"] = await wait_run_in_executor(self._hash_file, os.path.join(dirpath, filename)) except OSError: continue files.append(file_info) diff --git a/gns3server/compute/qemu/__init__.py b/gns3server/compute/qemu/__init__.py index 0b62cd4b..3be2f952 100644 --- a/gns3server/compute/qemu/__init__.py +++ b/gns3server/compute/qemu/__init__.py @@ -41,8 +41,7 @@ class Qemu(BaseManager): _NODE_TYPE = "qemu" @staticmethod - @asyncio.coroutine - def get_kvm_archs(): + async def get_kvm_archs(): """ Gets a list of architectures for which KVM is available on this server. @@ -108,7 +107,7 @@ class Qemu(BaseManager): return paths @staticmethod - def binary_list(archs=None): + async def binary_list(archs=None): """ Gets QEMU binaries list available on the host. @@ -128,11 +127,11 @@ class Qemu(BaseManager): for arch in archs: if f.endswith(arch) or f.endswith("{}.exe".format(arch)) or f.endswith("{}w.exe".format(arch)): qemu_path = os.path.join(path, f) - version = yield from Qemu.get_qemu_version(qemu_path) + version = await Qemu.get_qemu_version(qemu_path) qemus.append({"path": qemu_path, "version": version}) else: qemu_path = os.path.join(path, f) - version = yield from Qemu.get_qemu_version(qemu_path) + version = await Qemu.get_qemu_version(qemu_path) qemus.append({"path": qemu_path, "version": version}) except OSError: @@ -141,7 +140,7 @@ class Qemu(BaseManager): return qemus @staticmethod - def img_binary_list(): + async def img_binary_list(): """ Gets QEMU-img binaries list available on the host. @@ -155,7 +154,7 @@ class Qemu(BaseManager): os.access(os.path.join(path, f), os.X_OK) and \ os.path.isfile(os.path.join(path, f)): qemu_path = os.path.join(path, f) - version = yield from Qemu._get_qemu_img_version(qemu_path) + version = await Qemu._get_qemu_img_version(qemu_path) qemu_imgs.append({"path": qemu_path, "version": version}) except OSError: continue @@ -163,8 +162,7 @@ class Qemu(BaseManager): return qemu_imgs @staticmethod - @asyncio.coroutine - def get_qemu_version(qemu_path): + async def get_qemu_version(qemu_path): """ Gets the Qemu version. @@ -187,7 +185,7 @@ class Qemu(BaseManager): return "" else: try: - output = yield from subprocess_check_output(qemu_path, "-version") + output = await subprocess_check_output(qemu_path, "-version") match = re.search("version\s+([0-9a-z\-\.]+)", output) if match: version = match.group(1) @@ -198,8 +196,7 @@ class Qemu(BaseManager): raise QemuError("Error while looking for the Qemu version: {}".format(e)) @staticmethod - @asyncio.coroutine - def _get_qemu_img_version(qemu_img_path): + async def _get_qemu_img_version(qemu_img_path): """ Gets the Qemu-img version. @@ -207,7 +204,7 @@ class Qemu(BaseManager): """ try: - output = yield from subprocess_check_output(qemu_img_path, "--version") + output = await subprocess_check_output(qemu_img_path, "--version") match = re.search("version\s+([0-9a-z\-\.]+)", output) if match: version = match.group(1) @@ -258,8 +255,7 @@ class Qemu(BaseManager): return os.path.join("qemu", "vm-{}".format(legacy_vm_id)) - @asyncio.coroutine - def create_disk(self, qemu_img, path, options): + async def create_disk(self, qemu_img, path, options): """ Create a Qemu disk with qemu-img @@ -290,13 +286,12 @@ class Qemu(BaseManager): command.append(path) command.append("{}M".format(img_size)) - process = yield from asyncio.create_subprocess_exec(*command) - yield from process.wait() + process = await asyncio.create_subprocess_exec(*command) + await process.wait() except (OSError, subprocess.SubprocessError) as e: raise QemuError("Could not create disk image {}:{}".format(path, e)) - @asyncio.coroutine - def resize_disk(self, qemu_img, path, extend): + async def resize_disk(self, qemu_img, path, extend): """ Resize a Qemu disk with qemu-img @@ -314,8 +309,8 @@ class Qemu(BaseManager): if not os.path.exists(path): raise QemuError("Qemu disk '{}' does not exist".format(path)) command = [qemu_img, "resize", path, "+{}M".format(extend)] - process = yield from asyncio.create_subprocess_exec(*command) - yield from process.wait() + process = await asyncio.create_subprocess_exec(*command) + await process.wait() log.info("Qemu disk '{}' extended by {} MB".format(path, extend)) except (OSError, subprocess.SubprocessError) as e: raise QemuError("Could not update disk image {}:{}".format(path, e)) diff --git a/gns3server/compute/qemu/qcow2.py b/gns3server/compute/qemu/qcow2.py index c88389e2..efbb54fe 100644 --- a/gns3server/compute/qemu/qcow2.py +++ b/gns3server/compute/qemu/qcow2.py @@ -87,8 +87,7 @@ class Qcow2: return None return path - @asyncio.coroutine - def rebase(self, qemu_img, base_image): + async def rebase(self, qemu_img, base_image): """ Rebase a linked clone in order to use the correct disk @@ -99,8 +98,8 @@ class Qcow2: if not os.path.exists(base_image): raise FileNotFoundError(base_image) command = [qemu_img, "rebase", "-u", "-b", base_image, self._path] - process = yield from asyncio.create_subprocess_exec(*command) - retcode = yield from process.wait() + process = await asyncio.create_subprocess_exec(*command) + retcode = await process.wait() if retcode != 0: raise Qcow2Error("Could not rebase the image") self._reload() diff --git a/gns3server/compute/qemu/qemu_vm.py b/gns3server/compute/qemu/qemu_vm.py index 783b6fe0..40d31819 100644 --- a/gns3server/compute/qemu/qemu_vm.py +++ b/gns3server/compute/qemu/qemu_vm.py @@ -795,8 +795,7 @@ class QemuVM(BaseNode): kernel_command_line=kernel_command_line)) self._kernel_command_line = kernel_command_line - @asyncio.coroutine - def _set_process_priority(self): + async def _set_process_priority(self): """ Changes the process priority """ @@ -844,8 +843,8 @@ class QemuVM(BaseNode): else: priority = 0 try: - process = yield from asyncio.create_subprocess_exec('renice', '-n', str(priority), '-p', str(self._process.pid)) - yield from process.wait() + process = await asyncio.create_subprocess_exec('renice', '-n', str(priority), '-p', str(self._process.pid)) + await process.wait() except (OSError, subprocess.SubprocessError) as e: log.error('Could not change process priority for QEMU VM "{}": {}'.format(self._name, e)) @@ -881,8 +880,7 @@ class QemuVM(BaseNode): except (OSError, subprocess.SubprocessError) as e: raise QemuError("Could not throttle CPU: {}".format(e)) - @asyncio.coroutine - def create(self): + async def create(self): """ Creates QEMU VM and sets proper MD5 hashes """ @@ -890,23 +888,22 @@ class QemuVM(BaseNode): # In case user upload image manually we don't have md5 sums. # We need generate hashes at this point, otherwise they will be generated # at __json__ but not on separate thread. - yield from cancellable_wait_run_in_executor(md5sum, self._hda_disk_image) - yield from cancellable_wait_run_in_executor(md5sum, self._hdb_disk_image) - yield from cancellable_wait_run_in_executor(md5sum, self._hdc_disk_image) - yield from cancellable_wait_run_in_executor(md5sum, self._hdd_disk_image) + await cancellable_wait_run_in_executor(md5sum, self._hda_disk_image) + await cancellable_wait_run_in_executor(md5sum, self._hdb_disk_image) + await cancellable_wait_run_in_executor(md5sum, self._hdc_disk_image) + await cancellable_wait_run_in_executor(md5sum, self._hdd_disk_image) super(QemuVM, self).create() - @asyncio.coroutine - def start(self): + async def start(self): """ Starts this QEMU VM. """ - with (yield from self._execute_lock): + async with self._execute_lock: if self.is_running(): # resume the VM if it is paused - yield from self.resume() + await self.resume() return if self._manager.config.get_section_config("Qemu").getboolean("monitor", True): @@ -927,7 +924,7 @@ class QemuVM(BaseNode): # check if there is enough RAM to run self.check_available_ram(self.ram) - command = yield from self._build_command() + command = await self._build_command() command_string = " ".join(shlex.quote(s) for s in command) try: log.info("Starting QEMU with: {}".format(command_string)) @@ -936,7 +933,7 @@ class QemuVM(BaseNode): with open(self._stdout_file, "w", encoding="utf-8") as fd: fd.write("Start QEMU with {}\n\nExecution log:\n".format(command_string)) self.command_line = ' '.join(command) - self._process = yield from asyncio.create_subprocess_exec(*command, + self._process = await asyncio.create_subprocess_exec(*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir) @@ -948,34 +945,33 @@ class QemuVM(BaseNode): log.error("Could not start QEMU {}: {}\n{}".format(self.qemu_path, e, stdout)) raise QemuError("Could not start QEMU {}: {}\n{}".format(self.qemu_path, e, stdout)) - yield from self._set_process_priority() + await self._set_process_priority() if self._cpu_throttling: self._set_cpu_throttling() if "-enable-kvm" in command_string or "-enable-hax" in command_string: self._hw_virtualization = True - yield from self._start_ubridge() + await self._start_ubridge() set_link_commands = [] for adapter_number, adapter in enumerate(self._ethernet_adapters): nio = adapter.get_nio(0) if nio: - yield from self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number), + await self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number), self._local_udp_tunnels[adapter_number][1], nio) if nio.suspend: set_link_commands.append("set_link gns3-{} off".format(adapter_number)) else: set_link_commands.append("set_link gns3-{} off".format(adapter_number)) - yield from self._control_vm_commands(set_link_commands) + await self._control_vm_commands(set_link_commands) try: - yield from self.start_wrap_console() + await self.start_wrap_console() except OSError as e: raise QemuError("Could not start Telnet QEMU console {}\n".format(e)) - @asyncio.coroutine - def _termination_callback(self, returncode): + async def _termination_callback(self, returncode): """ Called when the process has stopped. @@ -984,19 +980,18 @@ class QemuVM(BaseNode): if self.started: log.info("QEMU process has stopped, return code: %d", returncode) - yield from self.stop() + await self.stop() # A return code of 1 seem fine on Windows if returncode != 0 and (returncode != 1 or not sys.platform.startswith("win")): self.project.emit("log.error", {"message": "QEMU process has stopped, return code: {}\n{}".format(returncode, self.read_stdout())}) - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops this QEMU VM. """ - yield from self._stop_ubridge() - with (yield from self._execute_lock): + await self._stop_ubridge() + async with self._execute_lock: # stop the QEMU process self._hw_virtualization = False if self.is_running(): @@ -1004,22 +999,22 @@ class QemuVM(BaseNode): try: if self.on_close == "save_vm_state": - yield from self._control_vm("stop") - yield from self._control_vm("savevm GNS3_SAVED_STATE") + await self._control_vm("stop") + await self._control_vm("savevm GNS3_SAVED_STATE") wait_for_savevm = 120 while wait_for_savevm: - yield from asyncio.sleep(1) - status = yield from self._saved_state_option() + await asyncio.sleep(1) + status = await self._saved_state_option() wait_for_savevm -= 1 if status != []: break if self.on_close == "shutdown_signal": - yield from self._control_vm("system_powerdown") - yield from gns3server.utils.asyncio.wait_for_process_termination(self._process, timeout=30) + await self._control_vm("system_powerdown") + await gns3server.utils.asyncio.wait_for_process_termination(self._process, timeout=30) else: self._process.terminate() - yield from gns3server.utils.asyncio.wait_for_process_termination(self._process, timeout=3) + await gns3server.utils.asyncio.wait_for_process_termination(self._process, timeout=3) except ProcessLookupError: pass except asyncio.TimeoutError: @@ -1033,11 +1028,10 @@ class QemuVM(BaseNode): self._process = None self._stop_cpulimit() if self.on_close != "save_vm_state": - yield from self._clear_save_vm_stated() - yield from super().stop() + await self._clear_save_vm_stated() + await super().stop() - @asyncio.coroutine - def _open_qemu_monitor_connection_vm(self, timeout=10): + async def _open_qemu_monitor_connection_vm(self, timeout=10): """ Opens a connection to the QEMU monitor. @@ -1050,10 +1044,10 @@ class QemuVM(BaseNode): last_exception = None reader = writer = None while time.time() - begin < timeout: - yield from asyncio.sleep(0.01) + await asyncio.sleep(0.01) try: log.debug("Connecting to Qemu monitor on {}:{}".format(self._monitor_host, self._monitor)) - reader, writer = yield from asyncio.open_connection(self._monitor_host, self._monitor) + reader, writer = await asyncio.open_connection(self._monitor_host, self._monitor) except (asyncio.TimeoutError, OSError) as e: last_exception = e continue @@ -1067,8 +1061,7 @@ class QemuVM(BaseNode): log.info("Connected to QEMU monitor on {}:{} after {:.4f} seconds".format(self._monitor_host, self._monitor, time.time() - begin)) return reader, writer - @asyncio.coroutine - def _control_vm(self, command, expected=None): + async def _control_vm(self, command, expected=None): """ Executes a command with QEMU monitor when this VM is running. @@ -1081,7 +1074,7 @@ class QemuVM(BaseNode): result = None if self.is_running() and self._monitor: log.info("Execute QEMU monitor command: {}".format(command)) - reader, writer = yield from self._open_qemu_monitor_connection_vm() + reader, writer = await self._open_qemu_monitor_connection_vm() if reader is None and writer is None: return result @@ -1094,7 +1087,7 @@ class QemuVM(BaseNode): if expected: try: while result is None: - line = yield from reader.readline() + line = await reader.readline() if not line: break for expect in expected: @@ -1106,8 +1099,7 @@ class QemuVM(BaseNode): writer.close() return result - @asyncio.coroutine - def _control_vm_commands(self, commands): + async def _control_vm_commands(self, commands): """ Executes commands with QEMU monitor when this VM is running. @@ -1116,7 +1108,7 @@ class QemuVM(BaseNode): if self.is_running() and self._monitor: - reader, writer = yield from self._open_qemu_monitor_connection_vm() + reader, writer = await self._open_qemu_monitor_connection_vm() if reader is None and writer is None: return @@ -1128,17 +1120,16 @@ class QemuVM(BaseNode): log.warning("Could not write to QEMU monitor: {}".format(e)) writer.close() - @asyncio.coroutine - def close(self): + async def close(self): """ Closes this QEMU VM. """ - if not (yield from super().close()): + if not (await super().close()): return False self.on_close = "power_off" - yield from self.stop() + await self.stop() for adapter in self._ethernet_adapters: if adapter is not None: @@ -1151,8 +1142,7 @@ class QemuVM(BaseNode): self.manager.port_manager.release_udp_port(udp_tunnel[1].lport, self._project) self._local_udp_tunnels = {} - @asyncio.coroutine - def _get_vm_status(self): + async def _get_vm_status(self): """ Returns this VM suspend status. @@ -1162,7 +1152,7 @@ class QemuVM(BaseNode): :returns: status (string) """ - result = yield from self._control_vm("info status", [ + result = await self._control_vm("info status", [ b"debug", b"inmigrate", b"internal-error", b"io-error", b"paused", b"postmigrate", b"prelaunch", b"finish-migrate", b"restore-vm", b"running", b"save-vm", b"shutdown", b"suspended", @@ -1179,49 +1169,45 @@ class QemuVM(BaseNode): self.status = "stopped" return status - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspends this QEMU VM. """ if self.is_running(): - vm_status = yield from self._get_vm_status() + vm_status = await self._get_vm_status() if vm_status is None: raise QemuError("Suspending a QEMU VM is not supported") elif vm_status == "running" or vm_status == "prelaunch": - yield from self._control_vm("stop") + await self._control_vm("stop") self.status = "suspended" log.debug("QEMU VM has been suspended") else: log.info("QEMU VM is not running to be suspended, current status is {}".format(vm_status)) - @asyncio.coroutine - def reload(self): + async def reload(self): """ Reloads this QEMU VM. """ - yield from self._control_vm("system_reset") + await self._control_vm("system_reset") log.debug("QEMU VM has been reset") - @asyncio.coroutine - def resume(self): + async def resume(self): """ Resumes this QEMU VM. """ - vm_status = yield from self._get_vm_status() + vm_status = await self._get_vm_status() if vm_status is None: raise QemuError("Resuming a QEMU VM is not supported") elif vm_status == "paused": - yield from self._control_vm("cont") + await self._control_vm("cont") log.debug("QEMU VM has been resumed") else: log.info("QEMU VM is not paused to be resumed, current status is {}".format(vm_status)) - @asyncio.coroutine - def adapter_add_nio_binding(self, adapter_number, nio): + async def adapter_add_nio_binding(self, adapter_number, nio): """ Adds a port NIO binding. @@ -1237,10 +1223,10 @@ class QemuVM(BaseNode): if self.is_running(): try: - yield from self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number), + await self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number), self._local_udp_tunnels[adapter_number][1], nio) - yield from self._control_vm("set_link gns3-{} on".format(adapter_number)) + await self._control_vm("set_link gns3-{} on".format(adapter_number)) except (IndexError, KeyError): raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name, adapter_number=adapter_number)) @@ -1251,8 +1237,7 @@ class QemuVM(BaseNode): nio=nio, adapter_number=adapter_number)) - @asyncio.coroutine - def adapter_update_nio_binding(self, adapter_number, nio): + async def adapter_update_nio_binding(self, adapter_number, nio): """ Update a port NIO binding. @@ -1262,19 +1247,18 @@ class QemuVM(BaseNode): if self.is_running(): try: - yield from self.update_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number), + await self.update_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number), self._local_udp_tunnels[adapter_number][1], nio) if nio.suspend: - yield from self._control_vm("set_link gns3-{} off".format(adapter_number)) + await self._control_vm("set_link gns3-{} off".format(adapter_number)) else: - yield from self._control_vm("set_link gns3-{} on".format(adapter_number)) + await self._control_vm("set_link gns3-{} on".format(adapter_number)) except IndexError: raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name, adapter_number=adapter_number)) - @asyncio.coroutine - def adapter_remove_nio_binding(self, adapter_number): + async def adapter_remove_nio_binding(self, adapter_number): """ Removes a port NIO binding. @@ -1290,8 +1274,8 @@ class QemuVM(BaseNode): adapter_number=adapter_number)) if self.is_running(): - yield from self._control_vm("set_link gns3-{} off".format(adapter_number)) - yield from self._ubridge_send("bridge delete {name}".format(name="QEMU-{}-{}".format(self._id, adapter_number))) + await self._control_vm("set_link gns3-{} off".format(adapter_number)) + await self._ubridge_send("bridge delete {name}".format(name="QEMU-{}-{}".format(self._id, adapter_number))) nio = adapter.get_nio(0) if isinstance(nio, NIOUDP): @@ -1304,8 +1288,7 @@ class QemuVM(BaseNode): adapter_number=adapter_number)) return nio - @asyncio.coroutine - def start_capture(self, adapter_number, output_file): + async def start_capture(self, adapter_number, output_file): """ Starts a packet capture. @@ -1330,14 +1313,14 @@ class QemuVM(BaseNode): nio.startPacketCapture(output_file) if self.ubridge: - yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="QEMU-{}-{}".format(self._id, adapter_number), + await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="QEMU-{}-{}".format(self._id, adapter_number), output_file=output_file)) log.info("QEMU VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number)) - def stop_capture(self, adapter_number): + async def stop_capture(self, adapter_number): """ Stops a packet capture. @@ -1357,7 +1340,7 @@ class QemuVM(BaseNode): nio.stopPacketCapture() if self.ubridge: - yield from self._ubridge_send('bridge stop_capture {name}'.format(name="QEMU-{}-{}".format(self._id, adapter_number))) + await self._ubridge_send('bridge stop_capture {name}'.format(name="QEMU-{}-{}".format(self._id, adapter_number))) log.info("QEMU VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, @@ -1511,21 +1494,19 @@ class QemuVM(BaseNode): return qemu_img_path - @asyncio.coroutine - def _qemu_img_exec(self, command): + async def _qemu_img_exec(self, command): self._qemu_img_stdout_file = os.path.join(self.working_dir, "qemu-img.log") log.info("logging to {}".format(self._qemu_img_stdout_file)) command_string = " ".join(shlex.quote(s) for s in command) log.info("Executing qemu-img with: {}".format(command_string)) with open(self._qemu_img_stdout_file, "w", encoding="utf-8") as fd: - process = yield from asyncio.create_subprocess_exec(*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir) - retcode = yield from process.wait() + process = await asyncio.create_subprocess_exec(*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir) + retcode = await process.wait() log.info("{} returned with {}".format(self._get_qemu_img(), retcode)) return retcode - @asyncio.coroutine - def _disk_options(self): + async def _disk_options(self): options = [] qemu_img_path = self._get_qemu_img() @@ -1548,16 +1529,16 @@ class QemuVM(BaseNode): else: try: # check for corrupt disk image - retcode = yield from self._qemu_img_exec([qemu_img_path, "check", disk_image]) + retcode = await self._qemu_img_exec([qemu_img_path, "check", disk_image]) if retcode == 3: # image has leaked clusters, but is not corrupted, let's try to fix it log.warning("Qemu image {} has leaked clusters".format(disk_image)) - if (yield from self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", "{}".format(disk_image)])) == 3: + if (await self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", "{}".format(disk_image)])) == 3: self.project.emit("log.warning", {"message": "Qemu image '{}' has leaked clusters and could not be fixed".format(disk_image)}) elif retcode == 2: # image is corrupted, let's try to fix it log.warning("Qemu image {} is corrupted".format(disk_image)) - if (yield from self._qemu_img_exec([qemu_img_path, "check", "-r", "all", "{}".format(disk_image)])) == 2: + if (await self._qemu_img_exec([qemu_img_path, "check", "-r", "all", "{}".format(disk_image)])) == 2: self.project.emit("log.warning", {"message": "Qemu image '{}' is corrupted and could not be fixed".format(disk_image)}) except (OSError, subprocess.SubprocessError) as e: stdout = self.read_qemu_img_stdout() @@ -1569,7 +1550,7 @@ class QemuVM(BaseNode): # create the disk try: command = [qemu_img_path, "create", "-o", "backing_file={}".format(disk_image), "-f", "qcow2", disk] - retcode = yield from self._qemu_img_exec(command) + retcode = await self._qemu_img_exec(command) if retcode: stdout = self.read_qemu_img_stdout() raise QemuError("Could not create '{}' disk image: qemu-img returned with {}\n{}".format(disk_name, @@ -1582,7 +1563,7 @@ class QemuVM(BaseNode): # The disk exists we check if the clone works try: qcow2 = Qcow2(disk) - yield from qcow2.rebase(qemu_img_path, disk_image) + await qcow2.rebase(qemu_img_path, disk_image) except (Qcow2Error, OSError) as e: raise QemuError("Could not use qcow2 disk image '{}' for {} {}".format(disk_image, disk_name, e)) @@ -1599,8 +1580,7 @@ class QemuVM(BaseNode): return options - @asyncio.coroutine - def resize_disk(self, drive_name, extend): + async def resize_disk(self, drive_name, extend): if self.is_running(): raise QemuError("Cannot resize {} while the VM is running".format(drive_name)) @@ -1613,7 +1593,7 @@ class QemuVM(BaseNode): if not os.path.exists(disk_image_path): raise QemuError("Disk path '{}' does not exist".format(disk_image_path)) qemu_img_path = self._get_qemu_img() - yield from self.manager.resize_disk(qemu_img_path, disk_image_path, extend) + await self.manager.resize_disk(qemu_img_path, disk_image_path, extend) def _cdrom_option(self): @@ -1663,15 +1643,14 @@ class QemuVM(BaseNode): return options - @asyncio.coroutine - def _network_options(self): + async def _network_options(self): network_options = [] network_options.extend(["-net", "none"]) # we do not want any user networking back-end if no adapter is connected. patched_qemu = False if self._legacy_networking: - version = yield from self.manager.get_qemu_version(self.qemu_path) + version = await self.manager.get_qemu_version(self.qemu_path) if version and parse_version(version) < parse_version("1.1.0"): # this is a patched Qemu if version is below 1.1.0 patched_qemu = True @@ -1684,7 +1663,7 @@ class QemuVM(BaseNode): network_options.extend(["-device", "pci-bridge,id=pci-bridge{bridge_id},bus=dmi_pci_bridge{bridge_id},chassis_nr=0x1,addr=0x{bridge_id},shpc=off".format(bridge_id=bridge_id)]) if bridge_id > 1: - qemu_version = yield from self.manager.get_qemu_version(self.qemu_path) + qemu_version = await self.manager.get_qemu_version(self.qemu_path) if qemu_version and parse_version(qemu_version) < parse_version("2.4.0"): raise QemuError("Qemu version 2.4 or later is required to run this VM with a large number of network adapters") @@ -1765,8 +1744,7 @@ class QemuVM(BaseNode): return ["-nographic"] return [] - @asyncio.coroutine - def _run_with_hardware_acceleration(self, qemu_path, options): + async def _run_with_hardware_acceleration(self, qemu_path, options): """ Check if we can run Qemu with hardware acceleration @@ -1806,7 +1784,7 @@ class QemuVM(BaseNode): elif sys.platform.startswith("win"): if require_hardware_accel: # HAXM is only available starting with Qemu version 2.9.0 - version = yield from self.manager.get_qemu_version(self.qemu_path) + version = await self.manager.get_qemu_version(self.qemu_path) if version and parse_version(version) < parse_version("2.9.0"): raise QemuError("HAXM acceleration can only be enable for Qemu version 2.9.0 and above (current version: {})".format(version)) @@ -1824,8 +1802,8 @@ class QemuVM(BaseNode): else: return False elif sys.platform.startswith("darwin"): - process = yield from asyncio.create_subprocess_shell("kextstat | grep com.intel.kext.intelhaxm") - yield from process.wait() + process = await asyncio.create_subprocess_shell("kextstat | grep com.intel.kext.intelhaxm") + await process.wait() if process.returncode != 0: if require_hardware_accel: raise QemuError("HAXM acceleration support is not installed on this host (com.intel.kext.intelhaxm extension not loaded)") @@ -1834,8 +1812,7 @@ class QemuVM(BaseNode): return True return False - @asyncio.coroutine - def _clear_save_vm_stated(self, snapshot_name="GNS3_SAVED_STATE"): + async def _clear_save_vm_stated(self, snapshot_name="GNS3_SAVED_STATE"): drives = ["a", "b", "c", "d"] qemu_img_path = self._get_qemu_img() @@ -1851,7 +1828,7 @@ class QemuVM(BaseNode): if not os.path.exists(disk): continue command = [qemu_img_path, "snapshot", "-d", snapshot_name, disk] - retcode = yield from self._qemu_img_exec(command) + retcode = await self._qemu_img_exec(command) if retcode: stdout = self.read_qemu_img_stdout() log.warning("Could not delete saved VM state from disk {}: {}".format(disk, stdout)) @@ -1860,8 +1837,7 @@ class QemuVM(BaseNode): except subprocess.SubprocessError as e: raise QemuError("Error while looking for the Qemu VM saved state snapshot: {}".format(e)) - @asyncio.coroutine - def _saved_state_option(self, snapshot_name="GNS3_SAVED_STATE"): + async def _saved_state_option(self, snapshot_name="GNS3_SAVED_STATE"): drives = ["a", "b", "c", "d"] qemu_img_path = self._get_qemu_img() @@ -1876,7 +1852,7 @@ class QemuVM(BaseNode): disk = disk_image if not os.path.exists(disk): continue - output = yield from subprocess_check_output(qemu_img_path, "info", "--output=json", disk) + output = await subprocess_check_output(qemu_img_path, "info", "--output=json", disk) json_data = json.loads(output) if "snapshots" in json_data: for snapshot in json_data["snapshots"]: @@ -1889,8 +1865,7 @@ class QemuVM(BaseNode): raise QemuError("Error while looking for the Qemu VM saved state snapshot: {}".format(e)) return [] - @asyncio.coroutine - def _build_command(self): + async def _build_command(self): """ Command to start the QEMU process. (to be passed to subprocess.Popen()) @@ -1905,10 +1880,10 @@ class QemuVM(BaseNode): command.extend(["-name", self._name]) command.extend(["-m", "{}M".format(self._ram)]) command.extend(["-smp", "cpus={}".format(self._cpus)]) - if (yield from self._run_with_hardware_acceleration(self.qemu_path, self._options)): + if (await self._run_with_hardware_acceleration(self.qemu_path, self._options)): if sys.platform.startswith("linux"): command.extend(["-enable-kvm"]) - version = yield from self.manager.get_qemu_version(self.qemu_path) + version = await self.manager.get_qemu_version(self.qemu_path) # Issue on some combo Intel CPU + KVM + Qemu 2.4.0 # https://github.com/GNS3/gns3-server/issues/685 if version and parse_version(version) >= parse_version("2.4.0") and self.platform == "x86_64": @@ -1918,7 +1893,7 @@ class QemuVM(BaseNode): command.extend(["-boot", "order={}".format(self._boot_priority)]) command.extend(self._bios_option()) command.extend(self._cdrom_option()) - command.extend((yield from self._disk_options())) + command.extend((await self._disk_options())) command.extend(self._linux_boot_options()) if "-uuid" not in additional_options: command.extend(["-uuid", self._id]) @@ -1933,12 +1908,12 @@ class QemuVM(BaseNode): elif self._console_type != "none": raise QemuError("Console type {} is unknown".format(self._console_type)) command.extend(self._monitor_options()) - command.extend((yield from self._network_options())) + command.extend((await self._network_options())) command.extend(self._graphic()) if self.on_close != "save_vm_state": - yield from self._clear_save_vm_stated() + await self._clear_save_vm_stated() else: - command.extend((yield from self._saved_state_option())) + command.extend((await self._saved_state_option())) if additional_options: try: diff --git a/gns3server/compute/traceng/__init__.py b/gns3server/compute/traceng/__init__.py index 14b3b1fe..66d7b627 100644 --- a/gns3server/compute/traceng/__init__.py +++ b/gns3server/compute/traceng/__init__.py @@ -34,12 +34,11 @@ class TraceNG(BaseManager): super().__init__() - @asyncio.coroutine - def create_node(self, *args, **kwargs): + async def create_node(self, *args, **kwargs): """ Creates a new TraceNG VM. :returns: TraceNGVM instance """ - return (yield from super().create_node(*args, **kwargs)) + return (await super().create_node(*args, **kwargs)) diff --git a/gns3server/compute/traceng/traceng_vm.py b/gns3server/compute/traceng/traceng_vm.py index 6539b804..c2755e36 100644 --- a/gns3server/compute/traceng/traceng_vm.py +++ b/gns3server/compute/traceng/traceng_vm.py @@ -68,13 +68,12 @@ class TraceNGVM(BaseNode): def ethernet_adapter(self): return self._ethernet_adapter - @asyncio.coroutine - def close(self): + async def close(self): """ Closes this TraceNG VM. """ - if not (yield from super().close()): + if not (await super().close()): return False nio = self._ethernet_adapter.get_nio(0) @@ -86,15 +85,14 @@ class TraceNGVM(BaseNode): self.manager.port_manager.release_udp_port(self._local_udp_tunnel[1].lport, self._project) self._local_udp_tunnel = None - yield from self._stop_ubridge() + await self._stop_ubridge() if self.is_running(): self._terminate_process() return True - @asyncio.coroutine - def _check_requirements(self): + async def _check_requirements(self): """ Check if TraceNG is available. """ @@ -193,33 +191,32 @@ class TraceNGVM(BaseNode): id=self.id, destination=destination)) - @asyncio.coroutine - def start(self, destination=None): + async def start(self, destination=None): """ Starts the TraceNG process. """ if not sys.platform.startswith("win"): raise TraceNGError("Sorry, TraceNG can only run on Windows") - yield from self._check_requirements() + await self._check_requirements() if not self.is_running(): nio = self._ethernet_adapter.get_nio(0) command = self._build_command(destination) - yield from self._stop_ubridge() # make use we start with a fresh uBridge instance + await self._stop_ubridge() # make use we start with a fresh uBridge instance try: log.info("Starting TraceNG: {}".format(command)) flags = 0 if hasattr(subprocess, "CREATE_NEW_CONSOLE"): flags = subprocess.CREATE_NEW_CONSOLE self.command_line = ' '.join(command) - self._process = yield from asyncio.create_subprocess_exec(*command, + self._process = await asyncio.create_subprocess_exec(*command, cwd=self.working_dir, creationflags=flags) monitor_process(self._process, self._termination_callback) - yield from self._start_ubridge() + await self._start_ubridge() if nio: - yield from self.add_ubridge_udp_connection("TraceNG-{}".format(self._id), self._local_udp_tunnel[1], nio) + await self.add_ubridge_udp_connection("TraceNG-{}".format(self._id), self._local_udp_tunnel[1], nio) log.info("TraceNG instance {} started PID={}".format(self.name, self._process.pid)) self._started = True @@ -243,18 +240,17 @@ class TraceNGVM(BaseNode): if returncode != 0: self.project.emit("log.error", {"message": "TraceNG process has stopped, return code: {}\n".format(returncode)}) - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the TraceNG process. """ - yield from self._stop_ubridge() + await self._stop_ubridge() if self.is_running(): self._terminate_process() if self._process.returncode is None: try: - yield from wait_for_process_termination(self._process, timeout=3) + await wait_for_process_termination(self._process, timeout=3) except asyncio.TimeoutError: if self._process.returncode is None: try: @@ -266,16 +262,15 @@ class TraceNGVM(BaseNode): self._process = None self._started = False - yield from super().stop() + await super().stop() - @asyncio.coroutine - def reload(self): + async def reload(self): """ Reloads the TraceNG process (stop & start). """ - yield from self.stop() - yield from self.start(self._destination) + await self.stop() + await self.start(self._destination) def _terminate_process(self): """ @@ -303,8 +298,7 @@ class TraceNGVM(BaseNode): return True return False - @asyncio.coroutine - def port_add_nio_binding(self, port_number, nio): + async def port_add_nio_binding(self, port_number, nio): """ Adds a port NIO binding. @@ -317,7 +311,7 @@ class TraceNGVM(BaseNode): port_number=port_number)) if self.is_running(): - yield from self.add_ubridge_udp_connection("TraceNG-{}".format(self._id), self._local_udp_tunnel[1], nio) + await self.add_ubridge_udp_connection("TraceNG-{}".format(self._id), self._local_udp_tunnel[1], nio) self._ethernet_adapter.add_nio(port_number, nio) log.info('TraceNG "{name}" [{id}]: {nio} added to port {port_number}'.format(name=self._name, @@ -327,16 +321,14 @@ class TraceNGVM(BaseNode): return nio - @asyncio.coroutine - def port_update_nio_binding(self, port_number, nio): + async def port_update_nio_binding(self, port_number, nio): if not self._ethernet_adapter.port_exists(port_number): raise TraceNGError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter, port_number=port_number)) if self.is_running(): - yield from self.update_ubridge_udp_connection("TraceNG-{}".format(self._id), self._local_udp_tunnel[1], nio) + await self.update_ubridge_udp_connection("TraceNG-{}".format(self._id), self._local_udp_tunnel[1], nio) - @asyncio.coroutine - def port_remove_nio_binding(self, port_number): + async def port_remove_nio_binding(self, port_number): """ Removes a port NIO binding. @@ -350,7 +342,7 @@ class TraceNGVM(BaseNode): port_number=port_number)) if self.is_running(): - yield from self._ubridge_send("bridge delete {name}".format(name="TraceNG-{}".format(self._id))) + await self._ubridge_send("bridge delete {name}".format(name="TraceNG-{}".format(self._id))) nio = self._ethernet_adapter.get_nio(port_number) if isinstance(nio, NIOUDP): @@ -363,8 +355,7 @@ class TraceNGVM(BaseNode): port_number=port_number)) return nio - @asyncio.coroutine - def start_capture(self, port_number, output_file): + async def start_capture(self, port_number, output_file): """ Starts a packet capture. @@ -387,15 +378,14 @@ class TraceNGVM(BaseNode): nio.startPacketCapture(output_file) if self.ubridge: - yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="TraceNG-{}".format(self._id), + await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="TraceNG-{}".format(self._id), output_file=output_file)) log.info("TraceNG '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name, id=self.id, port_number=port_number)) - @asyncio.coroutine - def stop_capture(self, port_number): + async def stop_capture(self, port_number): """ Stops a packet capture. @@ -414,7 +404,7 @@ class TraceNGVM(BaseNode): nio.stopPacketCapture() if self.ubridge: - yield from self._ubridge_send('bridge stop_capture {name}'.format(name="TraceNG-{}".format(self._id))) + await self._ubridge_send('bridge stop_capture {name}'.format(name="TraceNG-{}".format(self._id))) log.info("TraceNG '{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name, id=self.id, diff --git a/gns3server/compute/virtualbox/__init__.py b/gns3server/compute/virtualbox/__init__.py index a676bd1a..daace868 100644 --- a/gns3server/compute/virtualbox/__init__.py +++ b/gns3server/compute/virtualbox/__init__.py @@ -93,13 +93,12 @@ class VirtualBox(BaseManager): self._vboxmanage_path = vboxmanage_path return vboxmanage_path - @asyncio.coroutine - def execute(self, subcommand, args, timeout=60): + async def execute(self, subcommand, args, timeout=60): # We use a lock prevent parallel execution due to strange errors # reported by a user and reproduced by us. # https://github.com/GNS3/gns3-gui/issues/261 - with (yield from self._execute_lock): + async with self._execute_lock: vboxmanage_path = self.vboxmanage_path if not vboxmanage_path: vboxmanage_path = self.find_vboxmanage() @@ -111,12 +110,12 @@ class VirtualBox(BaseManager): command_string = " ".join(command) log.info("Executing VBoxManage with command: {}".format(command_string)) try: - process = yield from asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) + process = await asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) except (OSError, subprocess.SubprocessError) as e: raise VirtualBoxError("Could not execute VBoxManage: {}".format(e)) try: - stdout_data, stderr_data = yield from asyncio.wait_for(process.communicate(), timeout=timeout) + stdout_data, stderr_data = await asyncio.wait_for(process.communicate(), timeout=timeout) except asyncio.TimeoutError: raise VirtualBoxError("VBoxManage has timed out after {} seconds!".format(timeout)) @@ -126,15 +125,14 @@ class VirtualBox(BaseManager): return stdout_data.decode("utf-8", errors="ignore").splitlines() - @asyncio.coroutine - def _find_inaccessible_hdd_files(self): + async def _find_inaccessible_hdd_files(self): """ Finds inaccessible disk files (to clean up the VirtualBox media manager) """ hdds = [] try: - properties = yield from self.execute("list", ["hdds"]) + properties = await self.execute("list", ["hdds"]) # If VirtualBox is not available we have no inaccessible hdd except VirtualBoxError: return hdds @@ -152,32 +150,30 @@ class VirtualBox(BaseManager): flag_inaccessible = False return reversed(hdds) - @asyncio.coroutine - def project_closed(self, project): + async def project_closed(self, project): """ Called when a project is closed. :param project: Project instance """ - yield from super().project_closed(project) - hdd_files_to_close = yield from self._find_inaccessible_hdd_files() + await super().project_closed(project) + hdd_files_to_close = await self._find_inaccessible_hdd_files() for hdd_file in hdd_files_to_close: log.info("Closing VirtualBox VM disk file {}".format(os.path.basename(hdd_file))) try: - yield from self.execute("closemedium", ["disk", hdd_file]) + await self.execute("closemedium", ["disk", hdd_file]) except VirtualBoxError as e: log.warning("Could not close VirtualBox VM disk file {}: {}".format(os.path.basename(hdd_file), e)) continue - @asyncio.coroutine - def list_vms(self, allow_clone=False): + async def list_vms(self, allow_clone=False): """ Gets VirtualBox VM list. """ vbox_vms = [] - result = yield from self.execute("list", ["vms"]) + result = await self.execute("list", ["vms"]) for line in result: if len(line) == 0 or line[0] != '"' or line[-1:] != "}": continue # Broken output (perhaps a carriage return in VM name) @@ -185,10 +181,10 @@ class VirtualBox(BaseManager): vmname = vmname.strip('"') if vmname == "": continue # ignore inaccessible VMs - extra_data = yield from self.execute("getextradata", [vmname, "GNS3/Clone"]) + extra_data = await self.execute("getextradata", [vmname, "GNS3/Clone"]) if allow_clone or len(extra_data) == 0 or not extra_data[0].strip() == "Value: yes": # get the amount of RAM - info_results = yield from self.execute("showvminfo", [vmname, "--machinereadable"]) + info_results = await self.execute("showvminfo", [vmname, "--machinereadable"]) ram = 0 for info in info_results: try: diff --git a/gns3server/compute/virtualbox/virtualbox_vm.py b/gns3server/compute/virtualbox/virtualbox_vm.py index e12bc04b..36342711 100644 --- a/gns3server/compute/virtualbox/virtualbox_vm.py +++ b/gns3server/compute/virtualbox/virtualbox_vm.py @@ -98,10 +98,9 @@ class VirtualBoxVM(BaseNode): def ethernet_adapters(self): return self._ethernet_adapters - @asyncio.coroutine - def _get_system_properties(self): + async def _get_system_properties(self): - properties = yield from self.manager.execute("list", ["systemproperties"]) + properties = await self.manager.execute("list", ["systemproperties"]) for prop in properties: try: name, value = prop.split(':', 1) @@ -109,15 +108,14 @@ class VirtualBoxVM(BaseNode): continue self._system_properties[name.strip()] = value.strip() - @asyncio.coroutine - def _get_vm_state(self): + async def _get_vm_state(self): """ Returns the VM state (e.g. running, paused etc.) :returns: state (string) """ - results = yield from self.manager.execute("showvminfo", [self._vmname, "--machinereadable"]) + results = await self.manager.execute("showvminfo", [self._vmname, "--machinereadable"]) for info in results: if '=' in info: name, value = info.split('=', 1) @@ -125,8 +123,7 @@ class VirtualBoxVM(BaseNode): return value.strip('"') raise VirtualBoxError("Could not get VM state for {}".format(self._vmname)) - @asyncio.coroutine - def _control_vm(self, params): + async def _control_vm(self, params): """ Change setting in this VM when running. @@ -136,11 +133,10 @@ class VirtualBoxVM(BaseNode): """ args = shlex.split(params) - result = yield from self.manager.execute("controlvm", [self._vmname] + args) + result = await self.manager.execute("controlvm", [self._vmname] + args) return result - @asyncio.coroutine - def _modify_vm(self, params): + async def _modify_vm(self, params): """ Change setting in this VM when not running. @@ -148,10 +144,9 @@ class VirtualBoxVM(BaseNode): """ args = shlex.split(params) - yield from self.manager.execute("modifyvm", [self._vmname] + args) + await self.manager.execute("modifyvm", [self._vmname] + args) - @asyncio.coroutine - def _check_duplicate_linked_clone(self): + async def _check_duplicate_linked_clone(self): """ Without linked clone two VM using the same image can't run at the same time. @@ -175,14 +170,13 @@ class VirtualBoxVM(BaseNode): if not found: return trial += 1 - yield from asyncio.sleep(1) + await asyncio.sleep(1) - @asyncio.coroutine - def create(self): + async def create(self): if not self.linked_clone: - yield from self._check_duplicate_linked_clone() + await self._check_duplicate_linked_clone() - yield from self._get_system_properties() + await self._get_system_properties() if "API version" not in self._system_properties: raise VirtualBoxError("Can't access to VirtualBox API version:\n{}".format(self._system_properties)) if parse_version(self._system_properties["API version"]) < parse_version("4_3"): @@ -192,15 +186,15 @@ class VirtualBoxVM(BaseNode): if self.linked_clone: if self.id and os.path.isdir(os.path.join(self.working_dir, self._vmname)): self._patch_vm_uuid() - yield from self.manager.execute("registervm", [self._linked_vbox_file()]) - yield from self._reattach_linked_hdds() + await self.manager.execute("registervm", [self._linked_vbox_file()]) + await self._reattach_linked_hdds() else: - yield from self._create_linked_clone() + await self._create_linked_clone() if self._adapters: - yield from self.set_adapters(self._adapters) + await self.set_adapters(self._adapters) - vm_info = yield from self._get_vm_info() + vm_info = await self._get_vm_info() if "memory" in vm_info: self._ram = int(vm_info["memory"]) @@ -234,22 +228,20 @@ class VirtualBoxVM(BaseNode): machine.set("uuid", "{" + self.id + "}") tree.write(self._linked_vbox_file()) - @asyncio.coroutine - def check_hw_virtualization(self): + async def check_hw_virtualization(self): """ Returns either hardware virtualization is activated or not. :returns: boolean """ - vm_info = yield from self._get_vm_info() + vm_info = await self._get_vm_info() if "hwvirtex" in vm_info and vm_info["hwvirtex"] == "on": return True return False @locking - @asyncio.coroutine - def start(self): + async def start(self): """ Starts this VirtualBox VM. """ @@ -258,17 +250,17 @@ class VirtualBoxVM(BaseNode): return # resume the VM if it is paused - vm_state = yield from self._get_vm_state() + vm_state = await self._get_vm_state() if vm_state == "paused": - yield from self.resume() + await self.resume() return # VM must be powered off to start it if vm_state != "poweroff": raise VirtualBoxError("VirtualBox VM not powered off") - yield from self._set_network_options() - yield from self._set_serial_console() + await self._set_network_options() + await self._set_serial_console() # check if there is enough RAM to run self.check_available_ram(self.ram) @@ -276,92 +268,90 @@ class VirtualBoxVM(BaseNode): args = [self._vmname] if self._headless: args.extend(["--type", "headless"]) - result = yield from self.manager.execute("startvm", args) + result = await self.manager.execute("startvm", args) self.status = "started" log.info("VirtualBox VM '{name}' [{id}] started".format(name=self.name, id=self.id)) log.debug("Start result: {}".format(result)) # add a guest property to let the VM know about the GNS3 name - yield from self.manager.execute("guestproperty", ["set", self._vmname, "NameInGNS3", self.name]) + await self.manager.execute("guestproperty", ["set", self._vmname, "NameInGNS3", self.name]) # add a guest property to let the VM know about the GNS3 project directory - yield from self.manager.execute("guestproperty", ["set", self._vmname, "ProjectDirInGNS3", self.working_dir]) + await self.manager.execute("guestproperty", ["set", self._vmname, "ProjectDirInGNS3", self.working_dir]) - yield from self._start_ubridge() + await self._start_ubridge() for adapter_number in range(0, self._adapters): nio = self._ethernet_adapters[adapter_number].get_nio(0) if nio: - yield from self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number), + await self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number), self._local_udp_tunnels[adapter_number][1], nio) - yield from self._start_console() + await self._start_console() - if (yield from self.check_hw_virtualization()): + if (await self.check_hw_virtualization()): self._hw_virtualization = True @locking - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops this VirtualBox VM. """ self._hw_virtualization = False - yield from self._stop_ubridge() - yield from self._stop_remote_console() - vm_state = yield from self._get_vm_state() + await self._stop_ubridge() + await self._stop_remote_console() + vm_state = await self._get_vm_state() if vm_state == "running" or vm_state == "paused" or vm_state == "stuck": if self.on_close == "save_vm_state": - result = yield from self._control_vm("savestate") + result = await self._control_vm("savestate") self.status = "stopped" log.debug("Stop result: {}".format(result)) elif self.on_close == "shutdown_signal": # use ACPI to shutdown the VM - result = yield from self._control_vm("acpipowerbutton") + result = await self._control_vm("acpipowerbutton") trial = 0 while True: - vm_state = yield from self._get_vm_state() + vm_state = await self._get_vm_state() if vm_state == "poweroff": break - yield from asyncio.sleep(1) + await asyncio.sleep(1) trial += 1 if trial >= 120: - yield from self._control_vm("poweroff") + await self._control_vm("poweroff") break self.status = "stopped" log.debug("ACPI shutdown result: {}".format(result)) else: # power off the VM - result = yield from self._control_vm("poweroff") + result = await self._control_vm("poweroff") self.status = "stopped" log.debug("Stop result: {}".format(result)) log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id)) - yield from asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM + await asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM try: # deactivate the first serial port - yield from self._modify_vm("--uart1 off") + await self._modify_vm("--uart1 off") except VirtualBoxError as e: log.warning("Could not deactivate the first serial port: {}".format(e)) for adapter_number in range(0, self._adapters): nio = self._ethernet_adapters[adapter_number].get_nio(0) if nio: - yield from self._modify_vm("--nictrace{} off".format(adapter_number + 1)) - yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) - yield from self._modify_vm("--nic{} null".format(adapter_number + 1)) - yield from super().stop() + await self._modify_vm("--nictrace{} off".format(adapter_number + 1)) + await self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) + await self._modify_vm("--nic{} null".format(adapter_number + 1)) + await super().stop() - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspends this VirtualBox VM. """ - vm_state = yield from self._get_vm_state() + vm_state = await self._get_vm_state() if vm_state == "running": - yield from self._control_vm("pause") + await self._control_vm("pause") self.status = "suspended" log.info("VirtualBox VM '{name}' [{id}] suspended".format(name=self.name, id=self.id)) else: @@ -369,31 +359,28 @@ class VirtualBoxVM(BaseNode): id=self.id, state=vm_state)) - @asyncio.coroutine - def resume(self): + async def resume(self): """ Resumes this VirtualBox VM. """ - yield from self._control_vm("resume") + await self._control_vm("resume") self.status = "started" log.info("VirtualBox VM '{name}' [{id}] resumed".format(name=self.name, id=self.id)) - @asyncio.coroutine - def reload(self): + async def reload(self): """ Reloads this VirtualBox VM. """ - result = yield from self._control_vm("reset") + result = await self._control_vm("reset") log.info("VirtualBox VM '{name}' [{id}] reloaded".format(name=self.name, id=self.id)) log.debug("Reload result: {}".format(result)) - @asyncio.coroutine - def _get_all_hdd_files(self): + async def _get_all_hdd_files(self): hdds = [] - properties = yield from self.manager.execute("list", ["hdds"]) + properties = await self.manager.execute("list", ["hdds"]) for prop in properties: try: name, value = prop.split(':', 1) @@ -403,8 +390,7 @@ class VirtualBoxVM(BaseNode): hdds.append(value.strip()) return hdds - @asyncio.coroutine - def _reattach_linked_hdds(self): + async def _reattach_linked_hdds(self): """ Reattach linked cloned hard disks. """ @@ -428,7 +414,7 @@ class VirtualBoxVM(BaseNode): medium=hdd_file)) try: - yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium "{}"'.format(hdd_info["controller"], + await self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium "{}"'.format(hdd_info["controller"], hdd_info["port"], hdd_info["device"], hdd_file)) @@ -443,8 +429,7 @@ class VirtualBoxVM(BaseNode): error=e)) continue - @asyncio.coroutine - def save_linked_hdds_info(self): + async def save_linked_hdds_info(self): """ Save linked cloned hard disks information. @@ -454,8 +439,8 @@ class VirtualBoxVM(BaseNode): hdd_table = [] if self.linked_clone: if os.path.exists(self.working_dir): - hdd_files = yield from self._get_all_hdd_files() - vm_info = yield from self._get_vm_info() + hdd_files = await self._get_all_hdd_files() + vm_info = await self._get_vm_info() for entry, value in vm_info.items(): match = re.search("^([\s\w]+)\-(\d)\-(\d)$", entry) # match Controller-PortNumber-DeviceNumber entry if match: @@ -489,8 +474,7 @@ class VirtualBoxVM(BaseNode): return hdd_table - @asyncio.coroutine - def close(self): + async def close(self): """ Closes this VirtualBox VM. """ @@ -499,7 +483,7 @@ class VirtualBoxVM(BaseNode): # VM is already closed return - if not (yield from super().close()): + if not (await super().close()): return False log.debug("VirtualBox VM '{name}' [{id}] is closing".format(name=self.name, id=self.id)) @@ -519,10 +503,10 @@ class VirtualBoxVM(BaseNode): self._local_udp_tunnels = {} self.on_close = "power_off" - yield from self.stop() + await self.stop() if self.linked_clone: - hdd_table = yield from self.save_linked_hdds_info() + hdd_table = await self.save_linked_hdds_info() for hdd in hdd_table.copy(): log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name, id=self.id, @@ -530,7 +514,7 @@ class VirtualBoxVM(BaseNode): port=hdd["port"], device=hdd["device"])) try: - yield from self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(hdd["controller"], + await self._storage_attach('--storagectl "{}" --port {} --device {} --type hdd --medium none'.format(hdd["controller"], hdd["port"], hdd["device"])) except VirtualBoxError as e: @@ -543,7 +527,7 @@ class VirtualBoxVM(BaseNode): continue log.info("VirtualBox VM '{name}' [{id}] unregistering".format(name=self.name, id=self.id)) - yield from self.manager.execute("unregistervm", [self._name]) + await self.manager.execute("unregistervm", [self._name]) log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id)) self._closed = True @@ -603,8 +587,7 @@ class VirtualBoxVM(BaseNode): return self._ram - @asyncio.coroutine - def set_ram(self, ram): + async def set_ram(self, ram): """ Set the amount of RAM allocated to this VirtualBox VM. @@ -614,7 +597,7 @@ class VirtualBoxVM(BaseNode): if ram == 0: return - yield from self._modify_vm('--memory {}'.format(ram)) + await self._modify_vm('--memory {}'.format(ram)) log.info("VirtualBox VM '{name}' [{id}] has set amount of RAM to {ram}".format(name=self.name, id=self.id, ram=ram)) self._ram = ram @@ -629,8 +612,7 @@ class VirtualBoxVM(BaseNode): return self._vmname - @asyncio.coroutine - def set_vmname(self, vmname): + async def set_vmname(self, vmname): """ Renames the VirtualBox VM. @@ -644,10 +626,10 @@ class VirtualBoxVM(BaseNode): if self.status == "started": raise VirtualBoxError("You can't change the name of running VM {}".format(self._name)) # We can't rename a VM to name that already exists - vms = yield from self.manager.list_vms(allow_clone=True) + vms = await self.manager.list_vms(allow_clone=True) if vmname in [vm["vmname"] for vm in vms]: raise VirtualBoxError("You can't change the name to {} it's already use in VirtualBox".format(vmname)) - yield from self._modify_vm('--name "{}"'.format(vmname)) + await self._modify_vm('--name "{}"'.format(vmname)) log.info("VirtualBox VM '{name}' [{id}] has set the VM name to '{vmname}'".format(name=self.name, id=self.id, vmname=vmname)) self._vmname = vmname @@ -662,8 +644,7 @@ class VirtualBoxVM(BaseNode): return self._adapters - @asyncio.coroutine - def set_adapters(self, adapters): + async def set_adapters(self, adapters): """ Sets the number of Ethernet adapters for this VirtualBox VM instance. @@ -671,7 +652,7 @@ class VirtualBoxVM(BaseNode): """ # check for the maximum adapters supported by the VM - vm_info = yield from self._get_vm_info() + vm_info = await self._get_vm_info() chipset = "piix3" # default chipset for VirtualBox VMs self._maximum_adapters = 8 # default maximum network adapter count for PIIX3 chipset if "chipset" in vm_info: @@ -749,8 +730,7 @@ class VirtualBoxVM(BaseNode): id=self.id, adapter_type=adapter_type)) - @asyncio.coroutine - def _get_vm_info(self): + async def _get_vm_info(self): """ Returns this VM info. @@ -758,7 +738,7 @@ class VirtualBoxVM(BaseNode): """ vm_info = {} - results = yield from self.manager.execute("showvminfo", [self._vmname, "--machinereadable"]) + results = await self.manager.execute("showvminfo", [self._vmname, "--machinereadable"]) for info in results: try: name, value = info.split('=', 1) @@ -784,22 +764,20 @@ class VirtualBoxVM(BaseNode): raise VirtualBoxError("Could not create the VirtualBox pipe directory: {}".format(e)) return pipe_name - @asyncio.coroutine - def _set_serial_console(self): + async def _set_serial_console(self): """ Configures the first serial port to allow a serial console connection. """ # activate the first serial port - yield from self._modify_vm("--uart1 0x3F8 4") + await self._modify_vm("--uart1 0x3F8 4") # set server mode with a pipe on the first serial port pipe_name = self._get_pipe_name() args = [self._vmname, "--uartmode1", "server", pipe_name] - yield from self.manager.execute("modifyvm", args) + await self.manager.execute("modifyvm", args) - @asyncio.coroutine - def _storage_attach(self, params): + async def _storage_attach(self, params): """ Change storage medium in this VM. @@ -807,10 +785,9 @@ class VirtualBoxVM(BaseNode): """ args = shlex.split(params) - yield from self.manager.execute("storageattach", [self._vmname] + args) + await self.manager.execute("storageattach", [self._vmname] + args) - @asyncio.coroutine - def _get_nic_attachements(self, maximum_adapters): + async def _get_nic_attachements(self, maximum_adapters): """ Returns NIC attachements. @@ -819,7 +796,7 @@ class VirtualBoxVM(BaseNode): """ nics = [] - vm_info = yield from self._get_vm_info() + vm_info = await self._get_vm_info() for adapter_number in range(0, maximum_adapters): entry = "nic{}".format(adapter_number + 1) if entry in vm_info: @@ -829,22 +806,21 @@ class VirtualBoxVM(BaseNode): nics.append(None) return nics - @asyncio.coroutine - def _set_network_options(self): + async def _set_network_options(self): """ Configures network options. """ - nic_attachments = yield from self._get_nic_attachements(self._maximum_adapters) + nic_attachments = await self._get_nic_attachements(self._maximum_adapters) for adapter_number in range(0, self._adapters): attachment = nic_attachments[adapter_number] if attachment == "null": # disconnect the cable if no backend is attached. - yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) + await self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) if attachment == "none": # set the backend to null to avoid a difference in the number of interfaces in the Guest. - yield from self._modify_vm("--nic{} null".format(adapter_number + 1)) - yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) + await self._modify_vm("--nic{} null".format(adapter_number + 1)) + await self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) # use a local UDP tunnel to connect to uBridge instead if adapter_number not in self._local_udp_tunnels: @@ -855,7 +831,7 @@ class VirtualBoxVM(BaseNode): if not self._use_any_adapter and attachment in ("nat", "bridged", "intnet", "hostonly", "natnetwork"): continue - yield from self._modify_vm("--nictrace{} off".format(adapter_number + 1)) + await self._modify_vm("--nictrace{} off".format(adapter_number + 1)) custom_adapter = self._get_custom_adapter_settings(adapter_number) adapter_type = custom_adapter.get("adapter_type", self._adapter_type) @@ -874,45 +850,44 @@ class VirtualBoxVM(BaseNode): if adapter_type == "Paravirtualized Network (virtio-net)": vbox_adapter_type = "virtio" args = [self._vmname, "--nictype{}".format(adapter_number + 1), vbox_adapter_type] - yield from self.manager.execute("modifyvm", args) + await self.manager.execute("modifyvm", args) if isinstance(nio, NIOUDP): log.debug("setting UDP params on adapter {}".format(adapter_number)) - yield from self._modify_vm("--nic{} generic".format(adapter_number + 1)) - yield from self._modify_vm("--nicgenericdrv{} UDPTunnel".format(adapter_number + 1)) - yield from self._modify_vm("--nicproperty{} sport={}".format(adapter_number + 1, nio.lport)) - yield from self._modify_vm("--nicproperty{} dest={}".format(adapter_number + 1, nio.rhost)) - yield from self._modify_vm("--nicproperty{} dport={}".format(adapter_number + 1, nio.rport)) + await self._modify_vm("--nic{} generic".format(adapter_number + 1)) + await self._modify_vm("--nicgenericdrv{} UDPTunnel".format(adapter_number + 1)) + await self._modify_vm("--nicproperty{} sport={}".format(adapter_number + 1, nio.lport)) + await self._modify_vm("--nicproperty{} dest={}".format(adapter_number + 1, nio.rhost)) + await self._modify_vm("--nicproperty{} dport={}".format(adapter_number + 1, nio.rport)) if nio.suspend: - yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) + await self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) else: - yield from self._modify_vm("--cableconnected{} on".format(adapter_number + 1)) + await self._modify_vm("--cableconnected{} on".format(adapter_number + 1)) if nio.capturing: - yield from self._modify_vm("--nictrace{} on".format(adapter_number + 1)) - yield from self._modify_vm('--nictracefile{} "{}"'.format(adapter_number + 1, nio.pcap_output_file)) + await self._modify_vm("--nictrace{} on".format(adapter_number + 1)) + await self._modify_vm('--nictracefile{} "{}"'.format(adapter_number + 1, nio.pcap_output_file)) if not self._ethernet_adapters[adapter_number].get_nio(0): - yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) + await self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) for adapter_number in range(self._adapters, self._maximum_adapters): log.debug("disabling remaining adapter {}".format(adapter_number)) - yield from self._modify_vm("--nic{} none".format(adapter_number + 1)) + await self._modify_vm("--nic{} none".format(adapter_number + 1)) - @asyncio.coroutine - def _create_linked_clone(self): + async def _create_linked_clone(self): """ Creates a new linked clone. """ gns3_snapshot_exists = False - vm_info = yield from self._get_vm_info() + vm_info = await self._get_vm_info() for entry, value in vm_info.items(): if entry.startswith("SnapshotName") and value == "GNS3 Linked Base for clones": gns3_snapshot_exists = True if not gns3_snapshot_exists: - result = yield from self.manager.execute("snapshot", [self._vmname, "take", "GNS3 Linked Base for clones"]) + result = await self.manager.execute("snapshot", [self._vmname, "take", "GNS3 Linked Base for clones"]) log.debug("GNS3 snapshot created: {}".format(result)) args = [self._vmname, @@ -926,17 +901,17 @@ class VirtualBoxVM(BaseNode): self.working_dir, "--register"] - result = yield from self.manager.execute("clonevm", args) + result = await self.manager.execute("clonevm", args) log.debug("VirtualBox VM: {} cloned".format(result)) self._vmname = self._name - yield from self.manager.execute("setextradata", [self._vmname, "GNS3/Clone", "yes"]) + await self.manager.execute("setextradata", [self._vmname, "GNS3/Clone", "yes"]) # We create a reset snapshot in order to simplify life of user who want to rollback their VM # Warning: Do not document this it's seem buggy we keep it because Raizo students use it. try: args = [self._vmname, "take", "reset"] - result = yield from self.manager.execute("snapshot", args) + result = await self.manager.execute("snapshot", args) log.debug("Snapshot 'reset' created: {}".format(result)) # It seem sometimes this failed due to internal race condition of Vbox # we have no real explanation of this. @@ -945,8 +920,7 @@ class VirtualBoxVM(BaseNode): os.makedirs(os.path.join(self.working_dir, self._vmname), exist_ok=True) - @asyncio.coroutine - def _start_console(self): + async def _start_console(self): """ Starts remote console support for this VM. """ @@ -954,7 +928,7 @@ class VirtualBoxVM(BaseNode): if self.console and self.console_type == "telnet": pipe_name = self._get_pipe_name() try: - self._remote_pipe = yield from asyncio_open_serial(pipe_name) + self._remote_pipe = await asyncio_open_serial(pipe_name) except OSError as e: raise VirtualBoxError("Could not open serial pipe '{}': {}".format(pipe_name, e)) server = AsyncioTelnetServer(reader=self._remote_pipe, @@ -962,18 +936,17 @@ class VirtualBoxVM(BaseNode): binary=True, echo=True) try: - self._telnet_server = yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console) + self._telnet_server = await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console) except OSError as e: self.project.emit("log.warning", {"message": "Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e)}) - @asyncio.coroutine - def _stop_remote_console(self): + async def _stop_remote_console(self): """ Stops remote console support for this VM. """ if self._telnet_server: self._telnet_server.close() - yield from self._telnet_server.wait_closed() + await self._telnet_server.wait_closed() self._remote_pipe.close() self._telnet_server = None @@ -990,8 +963,7 @@ class VirtualBoxVM(BaseNode): super(VirtualBoxVM, VirtualBoxVM).console_type.__set__(self, new_console_type) - @asyncio.coroutine - def adapter_add_nio_binding(self, adapter_number, nio): + async def adapter_add_nio_binding(self, adapter_number, nio): """ Adds an adapter NIO binding. @@ -1006,7 +978,7 @@ class VirtualBoxVM(BaseNode): adapter_number=adapter_number)) # check if trying to connect to a nat, bridged, host-only or any other special adapter - nic_attachments = yield from self._get_nic_attachements(self._maximum_adapters) + nic_attachments = await self._get_nic_attachements(self._maximum_adapters) attachment = nic_attachments[adapter_number] if attachment in ("nat", "bridged", "intnet", "hostonly", "natnetwork"): if not self._use_any_adapter: @@ -1018,21 +990,21 @@ class VirtualBoxVM(BaseNode): # dynamically configure an UDP tunnel attachment if the VM is already running local_nio = self._local_udp_tunnels[adapter_number][0] if local_nio and isinstance(local_nio, NIOUDP): - yield from self._control_vm("nic{} generic UDPTunnel".format(adapter_number + 1)) - yield from self._control_vm("nicproperty{} sport={}".format(adapter_number + 1, local_nio.lport)) - yield from self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, local_nio.rhost)) - yield from self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, local_nio.rport)) - yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1)) + await self._control_vm("nic{} generic UDPTunnel".format(adapter_number + 1)) + await self._control_vm("nicproperty{} sport={}".format(adapter_number + 1, local_nio.lport)) + await self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, local_nio.rhost)) + await self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, local_nio.rport)) + await self._control_vm("setlinkstate{} on".format(adapter_number + 1)) if self.is_running(): try: - yield from self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number), + await self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number), self._local_udp_tunnels[adapter_number][1], nio) except KeyError: raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name, adapter_number=adapter_number)) - yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1)) + await self._control_vm("setlinkstate{} on".format(adapter_number + 1)) adapter.add_nio(0, nio) log.info("VirtualBox VM '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(name=self.name, @@ -1040,8 +1012,7 @@ class VirtualBoxVM(BaseNode): nio=nio, adapter_number=adapter_number)) - @asyncio.coroutine - def adapter_update_nio_binding(self, adapter_number, nio): + async def adapter_update_nio_binding(self, adapter_number, nio): """ Update a port NIO binding. @@ -1051,21 +1022,20 @@ class VirtualBoxVM(BaseNode): if self.is_running(): try: - yield from self.update_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number), + await self.update_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number), self._local_udp_tunnels[adapter_number][1], nio) if nio.suspend: - yield from self._control_vm("setlinkstate{} off".format(adapter_number + 1)) + await self._control_vm("setlinkstate{} off".format(adapter_number + 1)) else: - yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1)) + await self._control_vm("setlinkstate{} on".format(adapter_number + 1)) except IndexError: raise VirtualBoxError('Adapter {adapter_number} does not exist on VirtualBox VM "{name}"'.format( name=self._name, adapter_number=adapter_number )) - @asyncio.coroutine - def adapter_remove_nio_binding(self, adapter_number): + async def adapter_remove_nio_binding(self, adapter_number): """ Removes an adapter NIO binding. @@ -1081,10 +1051,10 @@ class VirtualBoxVM(BaseNode): adapter_number=adapter_number)) if self.is_running(): - yield from self._ubridge_send("bridge delete {name}".format(name="VBOX-{}-{}".format(self._id, adapter_number))) - vm_state = yield from self._get_vm_state() + await self._ubridge_send("bridge delete {name}".format(name="VBOX-{}-{}".format(self._id, adapter_number))) + vm_state = await self._get_vm_state() if vm_state == "running": - yield from self._control_vm("setlinkstate{} off".format(adapter_number + 1)) + await self._control_vm("setlinkstate{} off".format(adapter_number + 1)) nio = adapter.get_nio(0) if isinstance(nio, NIOUDP): @@ -1103,8 +1073,7 @@ class VirtualBoxVM(BaseNode): """ return self.ubridge is not None - @asyncio.coroutine - def start_capture(self, adapter_number, output_file): + async def start_capture(self, adapter_number, output_file): """ Starts a packet capture. @@ -1129,14 +1098,14 @@ class VirtualBoxVM(BaseNode): nio.startPacketCapture(output_file) if self.ubridge: - yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VBOX-{}-{}".format(self._id, adapter_number), + await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VBOX-{}-{}".format(self._id, adapter_number), output_file=output_file)) log.info("VirtualBox VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number)) - def stop_capture(self, adapter_number): + async def stop_capture(self, adapter_number): """ Stops a packet capture. @@ -1157,7 +1126,7 @@ class VirtualBoxVM(BaseNode): nio.stopPacketCapture() if self.ubridge: - yield from self._ubridge_send('bridge stop_capture {name}'.format(name="VBOX-{}-{}".format(self._id, adapter_number))) + await self._ubridge_send('bridge stop_capture {name}'.format(name="VBOX-{}-{}".format(self._id, adapter_number))) log.info("VirtualBox VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, diff --git a/gns3server/compute/vmware/__init__.py b/gns3server/compute/vmware/__init__.py index b404ebfa..c474f562 100644 --- a/gns3server/compute/vmware/__init__.py +++ b/gns3server/compute/vmware/__init__.py @@ -32,7 +32,6 @@ import shlex from collections import OrderedDict from gns3server.utils.interfaces import interfaces from gns3server.utils.asyncio import subprocess_check_output -from gns3server.utils.asyncio import asyncio_ensure_future from gns3server.utils import parse_version log = logging.getLogger(__name__) @@ -40,7 +39,6 @@ log = logging.getLogger(__name__) from gns3server.compute.base_manager import BaseManager from gns3server.compute.vmware.vmware_vm import VMwareVM from gns3server.compute.vmware.vmware_error import VMwareError -from gns3server.utils.asyncio import asyncio_ensure_future class VMware(BaseManager): @@ -142,8 +140,7 @@ class VMware(BaseManager): version = match.group(1) return version - @asyncio.coroutine - def _check_vmware_player_requirements(self, player_version): + async def _check_vmware_player_requirements(self, player_version): """ Check minimum requirements to use VMware Player. @@ -159,17 +156,16 @@ class VMware(BaseManager): if player_version < 6: raise VMwareError("Using VMware Player requires version 6 or above") elif player_version == 6: - yield from self.check_vmrun_version(minimum_required_version="1.13.0") + await self.check_vmrun_version(minimum_required_version="1.13.0") elif player_version == 7: - yield from self.check_vmrun_version(minimum_required_version="1.14.0") + await self.check_vmrun_version(minimum_required_version="1.14.0") elif player_version >= 12: - yield from self.check_vmrun_version(minimum_required_version="1.15.0") + await self.check_vmrun_version(minimum_required_version="1.15.0") elif player_version >= 14: - yield from self.check_vmrun_version(minimum_required_version="1.17.0") + await self.check_vmrun_version(minimum_required_version="1.17.0") self._host_type = "player" - @asyncio.coroutine - def _check_vmware_workstation_requirements(self, ws_version): + async def _check_vmware_workstation_requirements(self, ws_version): """ Check minimum requirements to use VMware Workstation. @@ -185,17 +181,16 @@ class VMware(BaseManager): if ws_version < 10: raise VMwareError("Using VMware Workstation requires version 10 or above") elif ws_version == 10: - yield from self.check_vmrun_version(minimum_required_version="1.13.0") + await self.check_vmrun_version(minimum_required_version="1.13.0") elif ws_version == 11: - yield from self.check_vmrun_version(minimum_required_version="1.14.0") + await self.check_vmrun_version(minimum_required_version="1.14.0") elif ws_version >= 12: - yield from self.check_vmrun_version(minimum_required_version="1.15.0") + await self.check_vmrun_version(minimum_required_version="1.15.0") elif ws_version >= 14: - yield from self.check_vmrun_version(minimum_required_version="1.17.0") + await self.check_vmrun_version(minimum_required_version="1.17.0") self._host_type = "ws" - @asyncio.coroutine - def check_vmware_version(self): + async def check_vmware_version(self): """ Check VMware version """ @@ -207,13 +202,13 @@ class VMware(BaseManager): player_version = self._find_vmware_version_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Player") if player_version: log.debug("VMware Player version {} detected".format(player_version)) - yield from self._check_vmware_player_requirements(player_version) + await self._check_vmware_player_requirements(player_version) else: log.warning("Could not find VMware version") self._host_type = "ws" else: log.debug("VMware Workstation version {} detected".format(ws_version)) - yield from self._check_vmware_workstation_requirements(ws_version) + await self._check_vmware_workstation_requirements(ws_version) else: if sys.platform.startswith("darwin"): if not os.path.isdir("/Applications/VMware Fusion.app"): @@ -226,20 +221,20 @@ class VMware(BaseManager): raise VMwareError("VMware is not installed (vmware or vmplayer executable could not be found in $PATH)") try: - output = yield from subprocess_check_output(vmware_path, "-v") + output = await subprocess_check_output(vmware_path, "-v") match = re.search("VMware Workstation ([0-9]+)\.", output) version = None if match: # VMware Workstation has been detected version = match.group(1) log.debug("VMware Workstation version {} detected".format(version)) - yield from self._check_vmware_workstation_requirements(version) + await self._check_vmware_workstation_requirements(version) match = re.search("VMware Player ([0-9]+)\.", output) if match: # VMware Player has been detected version = match.group(1) log.debug("VMware Player version {} detected".format(version)) - yield from self._check_vmware_player_requirements(version) + await self._check_vmware_player_requirements(version) if version is None: log.warning("Could not find VMware version. Output of VMware: {}".format(output)) raise VMwareError("Could not find VMware version. Output of VMware: {}".format(output)) @@ -365,28 +360,26 @@ class VMware(BaseManager): return self._host_type - @asyncio.coroutine - def execute(self, subcommand, args, timeout=120, log_level=logging.INFO): + async def execute(self, subcommand, args, timeout=120, log_level=logging.INFO): trial = 2 while True: try: - return (yield from self._execute(subcommand, args, timeout=timeout, log_level=log_level)) + return (await self._execute(subcommand, args, timeout=timeout, log_level=log_level)) except VMwareError as e: # We can fail to detect that it's VMware player instead of Workstation (due to marketing change Player is now Player Workstation) if self.host_type == "ws" and "VIX_SERVICEPROVIDER_VMWARE_WORKSTATION" in str(e): self._host_type = "player" - return (yield from self._execute(subcommand, args, timeout=timeout, log_level=log_level)) + return (await self._execute(subcommand, args, timeout=timeout, log_level=log_level)) else: if trial <= 0: raise e trial -= 1 - yield from asyncio.sleep(0.5) + await asyncio.sleep(0.5) - @asyncio.coroutine - def _execute(self, subcommand, args, timeout=120, log_level=logging.INFO): + async def _execute(self, subcommand, args, timeout=120, log_level=logging.INFO): if self.host_type is None: - yield from self.check_vmware_version() + await self.check_vmware_version() vmrun_path = self.vmrun_path if not vmrun_path: @@ -397,12 +390,12 @@ class VMware(BaseManager): command_string = " ".join([shlex.quote(c) for c in command]) log.log(log_level, "Executing vmrun with command: {}".format(command_string)) try: - process = yield from asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) + process = await asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) except (OSError, subprocess.SubprocessError) as e: raise VMwareError("Could not execute vmrun: {}".format(e)) try: - stdout_data, _ = yield from asyncio.wait_for(process.communicate(), timeout=timeout) + stdout_data, _ = await asyncio.wait_for(process.communicate(), timeout=timeout) except asyncio.TimeoutError: raise VMwareError("vmrun has timed out after {} seconds!\nTry to run {} in a terminal to see more details.\n\nMake sure GNS3 and VMware run under the same user and whitelist vmrun.exe in your antivirus.".format(timeout, command_string)) @@ -413,8 +406,7 @@ class VMware(BaseManager): return stdout_data.decode("utf-8", errors="ignore").splitlines() - @asyncio.coroutine - def check_vmrun_version(self, minimum_required_version="1.13.0"): + async def check_vmrun_version(self, minimum_required_version="1.13.0"): """ Checks the vmrun version. @@ -431,7 +423,7 @@ class VMware(BaseManager): vmrun_path = self.find_vmrun() try: - output = yield from subprocess_check_output(vmrun_path) + output = await subprocess_check_output(vmrun_path) match = re.search("vmrun version ([0-9\.]+)", output) version = None if match: @@ -446,15 +438,14 @@ class VMware(BaseManager): log.error("Error while looking for the VMware vmrun version: {}".format(e)) raise VMwareError("Error while looking for the VMware vmrun version: {}".format(e)) - @asyncio.coroutine - def remove_from_vmware_inventory(self, vmx_path): + async def remove_from_vmware_inventory(self, vmx_path): """ Removes a linked clone from the VMware inventory file. :param vmx_path: path of the linked clone VMX file """ - with (yield from self._vmware_inventory_lock): + async with self._vmware_inventory_lock: inventory_path = self.get_vmware_inventory_path() if os.path.exists(inventory_path): try: @@ -677,14 +668,13 @@ class VMware(BaseManager): else: return [os.path.expanduser("~/vmware")] - @asyncio.coroutine - def list_vms(self): + async def list_vms(self): """ Gets VMware VM list. """ # check for the right VMware version - yield from self.check_vmware_version() + await self.check_vmware_version() vmware_vms = [] inventory_path = self.get_vmware_inventory_path() if os.path.exists(inventory_path) and self.host_type != "player": @@ -746,4 +736,4 @@ if __name__ == '__main__': loop = asyncio.get_event_loop() vmware = VMware.instance() print("=> Check version") - loop.run_until_complete(asyncio_ensure_future(vmware.check_vmware_version())) + loop.run_until_complete(asyncio.ensure_future(vmware.check_vmware_version())) diff --git a/gns3server/compute/vmware/vmware_vm.py b/gns3server/compute/vmware/vmware_vm.py index 9edb8042..41410cb3 100644 --- a/gns3server/compute/vmware/vmware_vm.py +++ b/gns3server/compute/vmware/vmware_vm.py @@ -95,12 +95,11 @@ class VMwareVM(BaseNode): return self._vmnets @locking - @asyncio.coroutine - def _control_vm(self, subcommand, *additional_args): + async def _control_vm(self, subcommand, *additional_args): args = [self._vmx_path] args.extend(additional_args) - result = yield from self.manager.execute(subcommand, args) + result = await self.manager.execute(subcommand, args) log.debug("Control VM '{}' result: {}".format(subcommand, result)) return result @@ -124,16 +123,14 @@ class VMwareVM(BaseNode): except OSError as e: raise VMwareError('Could not write VMware VMX file "{}": {}'.format(self._vmx_path, e)) - @asyncio.coroutine - def is_running(self): + async def is_running(self): - result = yield from self.manager.execute("list", []) + result = await self.manager.execute("list", []) if self._vmx_path in result: return True return False - @asyncio.coroutine - def _check_duplicate_linked_clone(self): + async def _check_duplicate_linked_clone(self): """ Without linked clone two VM using the same image can't run at the same time. @@ -157,17 +154,16 @@ class VMwareVM(BaseNode): if not found: return trial += 1 - yield from asyncio.sleep(1) + await asyncio.sleep(1) - @asyncio.coroutine - def create(self): + async def create(self): """ Creates this VM and handle linked clones. """ if not self.linked_clone: - yield from self._check_duplicate_linked_clone() + await self._check_duplicate_linked_clone() - yield from self.manager.check_vmrun_version() + await self.manager.check_vmrun_version() if self.linked_clone and not os.path.exists(os.path.join(self.working_dir, os.path.basename(self._vmx_path))): if self.manager.host_type == "player": raise VMwareError("Linked clones are not supported by VMware Player") @@ -187,11 +183,11 @@ class VMwareVM(BaseNode): break if not gns3_snapshot_exists: log.info("Creating snapshot '{}'".format(base_snapshot_name)) - yield from self._control_vm("snapshot", base_snapshot_name) + await self._control_vm("snapshot", base_snapshot_name) # create the linked clone based on the base snapshot new_vmx_path = os.path.join(self.working_dir, self.name + ".vmx") - yield from self._control_vm("clone", + await self._control_vm("clone", new_vmx_path, "linked", "-snapshot={}".format(base_snapshot_name), @@ -323,8 +319,7 @@ class VMwareVM(BaseNode): raise VMwareError("vnet {} not in VMX file".format(vnet)) return vnet - @asyncio.coroutine - def _add_ubridge_connection(self, nio, adapter_number): + async def _add_ubridge_connection(self, nio, adapter_number): """ Creates a connection in uBridge. @@ -333,30 +328,29 @@ class VMwareVM(BaseNode): """ vnet = self._get_vnet(adapter_number) - yield from self._ubridge_send("bridge create {name}".format(name=vnet)) + await self._ubridge_send("bridge create {name}".format(name=vnet)) vmnet_interface = os.path.basename(self._vmx_pairs[vnet]) if sys.platform.startswith("darwin"): # special case on OSX, we cannot bind VMnet interfaces using the libpcap - yield from self._ubridge_send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=vnet, interface=vmnet_interface)) + await self._ubridge_send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=vnet, interface=vmnet_interface)) else: block_host_traffic = self.manager.config.get_section_config("VMware").getboolean("block_host_traffic", False) - yield from self._add_ubridge_ethernet_connection(vnet, vmnet_interface, block_host_traffic) + await self._add_ubridge_ethernet_connection(vnet, vmnet_interface, block_host_traffic) if isinstance(nio, NIOUDP): - yield from self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=vnet, + await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=vnet, lport=nio.lport, rhost=nio.rhost, rport=nio.rport)) if nio.capturing: - yield from self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=vnet, pcap_file=nio.pcap_output_file)) + await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=vnet, pcap_file=nio.pcap_output_file)) - yield from self._ubridge_send('bridge start {name}'.format(name=vnet)) - yield from self._ubridge_apply_filters(vnet, nio.filters) + await self._ubridge_send('bridge start {name}'.format(name=vnet)) + await self._ubridge_apply_filters(vnet, nio.filters) - @asyncio.coroutine - def _update_ubridge_connection(self, adapter_number, nio): + async def _update_ubridge_connection(self, adapter_number, nio): """ Update a connection in uBridge. @@ -367,10 +361,9 @@ class VMwareVM(BaseNode): bridge_name = self._get_vnet(adapter_number) except VMwareError: return # vnet not yet available - yield from self._ubridge_apply_filters(bridge_name, nio.filters) + await self._ubridge_apply_filters(bridge_name, nio.filters) - @asyncio.coroutine - def _delete_ubridge_connection(self, adapter_number): + async def _delete_ubridge_connection(self, adapter_number): """ Deletes a connection in uBridge. @@ -380,10 +373,9 @@ class VMwareVM(BaseNode): vnet = "ethernet{}.vnet".format(adapter_number) if vnet not in self._vmx_pairs: raise VMwareError("vnet {} not in VMX file".format(vnet)) - yield from self._ubridge_send("bridge delete {name}".format(name=vnet)) + await self._ubridge_send("bridge delete {name}".format(name=vnet)) - @asyncio.coroutine - def _start_ubridge_capture(self, adapter_number, output_file): + async def _start_ubridge_capture(self, adapter_number, output_file): """ Start a packet capture in uBridge. @@ -396,11 +388,10 @@ class VMwareVM(BaseNode): raise VMwareError("vnet {} not in VMX file".format(vnet)) if not self._ubridge_hypervisor: raise VMwareError("Cannot start the packet capture: uBridge is not running") - yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=vnet, + await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=vnet, output_file=output_file)) - @asyncio.coroutine - def _stop_ubridge_capture(self, adapter_number): + async def _stop_ubridge_capture(self, adapter_number): """ Stop a packet capture in uBridge. @@ -412,7 +403,7 @@ class VMwareVM(BaseNode): raise VMwareError("vnet {} not in VMX file".format(vnet)) if not self._ubridge_hypervisor: raise VMwareError("Cannot stop the packet capture: uBridge is not running") - yield from self._ubridge_send("bridge stop_capture {name}".format(name=vnet)) + await self._ubridge_send("bridge stop_capture {name}".format(name=vnet)) def check_hw_virtualization(self): """ @@ -426,8 +417,7 @@ class VMwareVM(BaseNode): return True return False - @asyncio.coroutine - def start(self): + async def start(self): """ Starts this VMware VM. """ @@ -435,14 +425,14 @@ class VMwareVM(BaseNode): if self.status == "started": return - if (yield from self.is_running()): + if (await self.is_running()): raise VMwareError("The VM is already running in VMware") ubridge_path = self.ubridge_path if not ubridge_path or not os.path.isfile(ubridge_path): raise VMwareError("ubridge is necessary to start a VMware VM") - yield from self._start_ubridge() + await self._start_ubridge() self._read_vmx_file() # check if there is enough RAM to run if "memsize" in self._vmx_pairs: @@ -452,20 +442,20 @@ class VMwareVM(BaseNode): self._write_vmx_file() if self._headless: - yield from self._control_vm("start", "nogui") + await self._control_vm("start", "nogui") else: - yield from self._control_vm("start") + await self._control_vm("start") try: if self._ubridge_hypervisor: for adapter_number in range(0, self._adapters): nio = self._ethernet_adapters[adapter_number].get_nio(0) if nio: - yield from self._add_ubridge_connection(nio, adapter_number) + await self._add_ubridge_connection(nio, adapter_number) - yield from self._start_console() + await self._start_console() except VMwareError: - yield from self.stop() + await self.stop() raise if self._get_vmx_setting("vhv.enable", "TRUE"): @@ -475,25 +465,24 @@ class VMwareVM(BaseNode): self.status = "started" log.info("VMware VM '{name}' [{id}] started".format(name=self.name, id=self.id)) - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops this VMware VM. """ self._hw_virtualization = False - yield from self._stop_remote_console() - yield from self._stop_ubridge() + await self._stop_remote_console() + await self._stop_ubridge() try: - if (yield from self.is_running()): + if (await self.is_running()): if self.on_close == "save_vm_state": - yield from self._control_vm("suspend") + await self._control_vm("suspend") elif self.on_close == "shutdown_signal": # use ACPI to shutdown the VM - yield from self._control_vm("stop", "soft") + await self._control_vm("stop", "soft") else: - yield from self._control_vm("stop") + await self._control_vm("stop") finally: self._started = False self.status = "stopped" @@ -519,49 +508,45 @@ class VMwareVM(BaseNode): self._vmx_pairs["ethernet{}.startconnected".format(adapter_number)] = "TRUE" self._write_vmx_file() - yield from super().stop() + await super().stop() log.info("VMware VM '{name}' [{id}] stopped".format(name=self.name, id=self.id)) - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspends this VMware VM. """ if self.manager.host_type != "ws": raise VMwareError("Pausing a VM is only supported by VMware Workstation") - yield from self._control_vm("pause") + await self._control_vm("pause") self.status = "suspended" log.info("VMware VM '{name}' [{id}] paused".format(name=self.name, id=self.id)) - @asyncio.coroutine - def resume(self): + async def resume(self): """ Resumes this VMware VM. """ if self.manager.host_type != "ws": raise VMwareError("Unpausing a VM is only supported by VMware Workstation") - yield from self._control_vm("unpause") + await self._control_vm("unpause") self.status = "started" log.info("VMware VM '{name}' [{id}] resumed".format(name=self.name, id=self.id)) - @asyncio.coroutine - def reload(self): + async def reload(self): """ Reloads this VMware VM. """ - yield from self._control_vm("reset") + await self._control_vm("reset") log.info("VMware VM '{name}' [{id}] reloaded".format(name=self.name, id=self.id)) - @asyncio.coroutine - def close(self): + async def close(self): """ Closes this VMware VM. """ - if not (yield from super().close()): + if not (await super().close()): return False for adapter in self._ethernet_adapters.values(): @@ -571,12 +556,12 @@ class VMwareVM(BaseNode): self.manager.port_manager.release_udp_port(nio.lport, self._project) try: self.on_close = "power_off" - yield from self.stop() + await self.stop() except VMwareError: pass if self.linked_clone: - yield from self.manager.remove_from_vmware_inventory(self._vmx_path) + await self.manager.remove_from_vmware_inventory(self._vmx_path) @property def headless(self): @@ -722,8 +707,7 @@ class VMwareVM(BaseNode): log.info("VMware VM '{name}' [{id}] is not allowed to use any adapter".format(name=self.name, id=self.id)) self._use_any_adapter = use_any_adapter - @asyncio.coroutine - def adapter_add_nio_binding(self, adapter_number, nio): + async def adapter_add_nio_binding(self, adapter_number, nio): """ Adds an adapter NIO binding. @@ -743,7 +727,7 @@ class VMwareVM(BaseNode): # check for the connection type connection_type = "ethernet{}.connectiontype".format(adapter_number) if not self._use_any_adapter and connection_type in self._vmx_pairs and self._vmx_pairs[connection_type] in ("nat", "bridged", "hostonly"): - if (yield from self.is_running()): + if (await self.is_running()): raise VMwareError("Attachment '{attachment}' is configured on network adapter {adapter_number}. " "Please stop VMware VM '{name}' to link to this adapter and allow GNS3 to change the attachment type.".format(attachment=self._vmx_pairs[connection_type], adapter_number=adapter_number, @@ -757,15 +741,14 @@ class VMwareVM(BaseNode): adapter.add_nio(0, nio) if self._started and self._ubridge_hypervisor: - yield from self._add_ubridge_connection(nio, adapter_number) + await self._add_ubridge_connection(nio, adapter_number) log.info("VMware VM '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(name=self.name, id=self.id, nio=nio, adapter_number=adapter_number)) - @asyncio.coroutine - def adapter_update_nio_binding(self, adapter_number, nio): + async def adapter_update_nio_binding(self, adapter_number, nio): """ Update a port NIO binding. @@ -775,15 +758,14 @@ class VMwareVM(BaseNode): if self._ubridge_hypervisor: try: - yield from self._update_ubridge_connection(adapter_number, nio) + await self._update_ubridge_connection(adapter_number, nio) except IndexError: raise VMwareError('Adapter {adapter_number} does not exist on VMware VM "{name}"'.format( name=self._name, adapter_number=adapter_number )) - @asyncio.coroutine - def adapter_remove_nio_binding(self, adapter_number): + async def adapter_remove_nio_binding(self, adapter_number): """ Removes an adapter NIO binding. @@ -803,7 +785,7 @@ class VMwareVM(BaseNode): self.manager.port_manager.release_udp_port(nio.lport, self._project) adapter.remove_nio(0) if self._started and self._ubridge_hypervisor: - yield from self._delete_ubridge_connection(adapter_number) + await self._delete_ubridge_connection(adapter_number) log.info("VMware VM '{name}' [{id}]: {nio} removed from adapter {adapter_number}".format(name=self.name, id=self.id, @@ -842,8 +824,7 @@ class VMwareVM(BaseNode): "serial0.startconnected": "TRUE"} self._vmx_pairs.update(serial_port) - @asyncio.coroutine - def _start_console(self): + async def _start_console(self): """ Starts remote console support for this VM. """ @@ -851,7 +832,7 @@ class VMwareVM(BaseNode): if self.console and self.console_type == "telnet": pipe_name = self._get_pipe_name() try: - self._remote_pipe = yield from asyncio_open_serial(self._get_pipe_name()) + self._remote_pipe = await asyncio_open_serial(self._get_pipe_name()) except OSError as e: raise VMwareError("Could not open serial pipe '{}': {}".format(pipe_name, e)) server = AsyncioTelnetServer(reader=self._remote_pipe, @@ -859,18 +840,17 @@ class VMwareVM(BaseNode): binary=True, echo=True) try: - self._telnet_server = yield from asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console) + self._telnet_server = await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console) except OSError as e: self.project.emit("log.warning", {"message": "Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e)}) - @asyncio.coroutine - def _stop_remote_console(self): + async def _stop_remote_console(self): """ Stops remote console support for this VM. """ if self._telnet_server: self._telnet_server.close() - yield from self._telnet_server.wait_closed() + await self._telnet_server.wait_closed() self._remote_pipe.close() self._telnet_server = None @@ -887,8 +867,7 @@ class VMwareVM(BaseNode): super(VMwareVM, VMwareVM).console_type.__set__(self, new_console_type) - @asyncio.coroutine - def start_capture(self, adapter_number, output_file): + async def start_capture(self, adapter_number, output_file): """ Starts a packet capture. @@ -913,13 +892,13 @@ class VMwareVM(BaseNode): nio.startPacketCapture(output_file) if self._started: - yield from self._start_ubridge_capture(adapter_number, output_file) + await self._start_ubridge_capture(adapter_number, output_file) log.info("VMware VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number)) - def stop_capture(self, adapter_number): + async def stop_capture(self, adapter_number): """ Stops a packet capture. @@ -940,7 +919,7 @@ class VMwareVM(BaseNode): nio.stopPacketCapture() if self._started: - yield from self._stop_ubridge_capture(adapter_number) + await self._stop_ubridge_capture(adapter_number) log.info("VMware VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, diff --git a/gns3server/compute/vpcs/__init__.py b/gns3server/compute/vpcs/__init__.py index acd0087a..d1ee9abd 100644 --- a/gns3server/compute/vpcs/__init__.py +++ b/gns3server/compute/vpcs/__init__.py @@ -37,15 +37,14 @@ class VPCS(BaseManager): self._free_mac_ids = {} self._used_mac_ids = {} - @asyncio.coroutine - def create_node(self, *args, **kwargs): + async def create_node(self, *args, **kwargs): """ Creates a new VPCS VM. :returns: VPCSVM instance """ - node = yield from super().create_node(*args, **kwargs) + node = await super().create_node(*args, **kwargs) self._free_mac_ids.setdefault(node.project.id, list(range(0, 255))) try: self._used_mac_ids[node.id] = self._free_mac_ids[node.project.id].pop(0) @@ -53,8 +52,7 @@ class VPCS(BaseManager): raise VPCSError("Cannot create a new VPCS VM (limit of 255 VMs reached on this host)") return node - @asyncio.coroutine - def close_node(self, node_id, *args, **kwargs): + async def close_node(self, node_id, *args, **kwargs): """ Closes a VPCS VM. @@ -66,7 +64,7 @@ class VPCS(BaseManager): i = self._used_mac_ids[node_id] self._free_mac_ids[node.project.id].insert(0, i) del self._used_mac_ids[node_id] - yield from super().close_node(node_id, *args, **kwargs) + await super().close_node(node_id, *args, **kwargs) return node def get_mac_id(self, node_id): diff --git a/gns3server/compute/vpcs/vpcs_vm.py b/gns3server/compute/vpcs/vpcs_vm.py index ef6ee79c..003bec15 100644 --- a/gns3server/compute/vpcs/vpcs_vm.py +++ b/gns3server/compute/vpcs/vpcs_vm.py @@ -77,13 +77,12 @@ class VPCSVM(BaseNode): def ethernet_adapter(self): return self._ethernet_adapter - @asyncio.coroutine - def close(self): + async def close(self): """ Closes this VPCS VM. """ - if not (yield from super().close()): + if not (await super().close()): return False nio = self._ethernet_adapter.get_nio(0) @@ -95,15 +94,14 @@ class VPCSVM(BaseNode): self.manager.port_manager.release_udp_port(self._local_udp_tunnel[1].lport, self._project) self._local_udp_tunnel = None - yield from self._stop_ubridge() + await self._stop_ubridge() if self.is_running(): self._terminate_process() return True - @asyncio.coroutine - def _check_requirements(self): + async def _check_requirements(self): """ Check if VPCS is available with the correct version. """ @@ -121,7 +119,7 @@ class VPCSVM(BaseNode): if not os.access(path, os.X_OK): raise VPCSError("VPCS program '{}' is not executable".format(path)) - yield from self._check_vpcs_version() + await self._check_vpcs_version() def __json__(self): @@ -200,13 +198,12 @@ class VPCSVM(BaseNode): except OSError as e: raise VPCSError('Cannot write the startup script file "{}": {}'.format(startup_script_path, e)) - @asyncio.coroutine - def _check_vpcs_version(self): + async def _check_vpcs_version(self): """ Checks if the VPCS executable version is >= 0.8b or == 0.6.1. """ try: - output = yield from subprocess_check_output(self._vpcs_path(), "-v", cwd=self.working_dir) + output = await subprocess_check_output(self._vpcs_path(), "-v", cwd=self.working_dir) match = re.search("Welcome to Virtual PC Simulator, version ([0-9a-z\.]+)", output) if match: version = match.group(1) @@ -218,13 +215,12 @@ class VPCSVM(BaseNode): except (OSError, subprocess.SubprocessError) as e: raise VPCSError("Error while looking for the VPCS version: {}".format(e)) - @asyncio.coroutine - def start(self): + async def start(self): """ Starts the VPCS process. """ - yield from self._check_requirements() + await self._check_requirements() if not self.is_running(): nio = self._ethernet_adapter.get_nio(0) command = self._build_command() @@ -237,18 +233,18 @@ class VPCSVM(BaseNode): flags = subprocess.CREATE_NEW_PROCESS_GROUP with open(self._vpcs_stdout_file, "w", encoding="utf-8") as fd: self.command_line = ' '.join(command) - self._process = yield from asyncio.create_subprocess_exec(*command, + self._process = await asyncio.create_subprocess_exec(*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir, creationflags=flags) monitor_process(self._process, self._termination_callback) - yield from self._start_ubridge() + await self._start_ubridge() if nio: - yield from self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) + await self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) - yield from self.start_wrap_console() + await self.start_wrap_console() log.info("VPCS instance {} started PID={}".format(self.name, self._process.pid)) self._started = True @@ -273,18 +269,17 @@ class VPCSVM(BaseNode): if returncode != 0: self.project.emit("log.error", {"message": "VPCS process has stopped, return code: {}\n{}".format(returncode, self.read_vpcs_stdout())}) - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the VPCS process. """ - yield from self._stop_ubridge() + await self._stop_ubridge() if self.is_running(): self._terminate_process() if self._process.returncode is None: try: - yield from wait_for_process_termination(self._process, timeout=3) + await wait_for_process_termination(self._process, timeout=3) except asyncio.TimeoutError: if self._process.returncode is None: try: @@ -296,16 +291,15 @@ class VPCSVM(BaseNode): self._process = None self._started = False - yield from super().stop() + await super().stop() - @asyncio.coroutine - def reload(self): + async def reload(self): """ Reloads the VPCS process (stop & start). """ - yield from self.stop() - yield from self.start() + await self.stop() + await self.start() def _terminate_process(self): """ @@ -364,8 +358,7 @@ class VPCSVM(BaseNode): super(VPCSVM, VPCSVM).console_type.__set__(self, new_console_type) - @asyncio.coroutine - def port_add_nio_binding(self, port_number, nio): + async def port_add_nio_binding(self, port_number, nio): """ Adds a port NIO binding. @@ -378,7 +371,7 @@ class VPCSVM(BaseNode): port_number=port_number)) if self.is_running(): - yield from self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) + await self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) self._ethernet_adapter.add_nio(port_number, nio) log.info('VPCS "{name}" [{id}]: {nio} added to port {port_number}'.format(name=self._name, @@ -388,16 +381,14 @@ class VPCSVM(BaseNode): return nio - @asyncio.coroutine - def port_update_nio_binding(self, port_number, nio): + async def port_update_nio_binding(self, port_number, nio): if not self._ethernet_adapter.port_exists(port_number): raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter, port_number=port_number)) if self.is_running(): - yield from self.update_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) + await self.update_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) - @asyncio.coroutine - def port_remove_nio_binding(self, port_number): + async def port_remove_nio_binding(self, port_number): """ Removes a port NIO binding. @@ -411,7 +402,7 @@ class VPCSVM(BaseNode): port_number=port_number)) if self.is_running(): - yield from self._ubridge_send("bridge delete {name}".format(name="VPCS-{}".format(self._id))) + await self._ubridge_send("bridge delete {name}".format(name="VPCS-{}".format(self._id))) nio = self._ethernet_adapter.get_nio(port_number) if isinstance(nio, NIOUDP): @@ -424,8 +415,7 @@ class VPCSVM(BaseNode): port_number=port_number)) return nio - @asyncio.coroutine - def start_capture(self, port_number, output_file): + async def start_capture(self, port_number, output_file): """ Starts a packet capture. @@ -448,15 +438,14 @@ class VPCSVM(BaseNode): nio.startPacketCapture(output_file) if self.ubridge: - yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VPCS-{}".format(self._id), + await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VPCS-{}".format(self._id), output_file=output_file)) log.info("VPCS '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name, id=self.id, port_number=port_number)) - @asyncio.coroutine - def stop_capture(self, port_number): + async def stop_capture(self, port_number): """ Stops a packet capture. @@ -475,7 +464,7 @@ class VPCSVM(BaseNode): nio.stopPacketCapture() if self.ubridge: - yield from self._ubridge_send('bridge stop_capture {name}'.format(name="VPCS-{}".format(self._id))) + await self._ubridge_send('bridge stop_capture {name}'.format(name="VPCS-{}".format(self._id))) log.info("VPCS '{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name, id=self.id, diff --git a/gns3server/controller/__init__.py b/gns3server/controller/__init__.py index f2acb27e..fb7f2a60 100644 --- a/gns3server/controller/__init__.py +++ b/gns3server/controller/__init__.py @@ -65,8 +65,7 @@ class Controller: log.info("Load controller configuration file {}".format(self._config_file)) @locking - @asyncio.coroutine - def download_appliance_templates(self): + async def download_appliance_templates(self): session = aiohttp.ClientSession() try: @@ -74,7 +73,7 @@ class Controller: if self._appliance_templates_etag: log.info("Checking if appliance templates are up-to-date (ETag {})".format(self._appliance_templates_etag)) headers["If-None-Match"] = self._appliance_templates_etag - response = yield from session.get('https://api.github.com/repos/GNS3/gns3-registry/contents/appliances', headers=headers) + response = await session.get('https://api.github.com/repos/GNS3/gns3-registry/contents/appliances', headers=headers) if response.status == 304: log.info("Appliance templates are already up-to-date (ETag {})".format(self._appliance_templates_etag)) return @@ -84,19 +83,19 @@ class Controller: if etag: self._appliance_templates_etag = etag self.save() - json_data = yield from response.json() + json_data = await response.json() response.close() appliances_dir = get_resource('appliances') for appliance in json_data: if appliance["type"] == "file": appliance_name = appliance["name"] log.info("Download appliance template file from '{}'".format(appliance["download_url"])) - response = yield from session.get(appliance["download_url"]) + response = await session.get(appliance["download_url"]) if response.status != 200: log.warning("Could not download '{}' due to HTTP error code {}".format(appliance["download_url"], response.status)) continue try: - appliance_data = yield from response.read() + appliance_data = await response.read() except asyncio.TimeoutError: log.warning("Timeout while downloading '{}'".format(appliance["download_url"])) continue @@ -215,8 +214,7 @@ class Controller: for b in builtins: self._appliances[b.id] = b - @asyncio.coroutine - def start(self): + async def start(self): log.info("Controller is starting") self.load_base_files() @@ -235,9 +233,9 @@ class Controller: if name == "gns3vm": name = "Main server" - computes = yield from self._load_controller_settings() + computes = await self._load_controller_settings() try: - self._local_server = yield from self.add_compute(compute_id="local", + self._local_server = await self.add_compute(compute_id="local", name=name, protocol=server_config.get("protocol", "http"), host=host, @@ -251,15 +249,15 @@ class Controller: sys.exit(1) for c in computes: try: - yield from self.add_compute(**c) + await self.add_compute(**c) except (aiohttp.web.HTTPError, KeyError): pass # Skip not available servers at loading - yield from self.load_projects() + await self.load_projects() try: - yield from self.gns3vm.auto_start_vm() + await self.gns3vm.auto_start_vm() except GNS3VMError as e: log.warning(str(e)) - yield from self._project_auto_open() + await self._project_auto_open() def _update_config(self): """ @@ -271,19 +269,18 @@ class Controller: self._local_server.user = server_config.get("user") self._local_server.password = server_config.get("password") - @asyncio.coroutine - def stop(self): + async def stop(self): log.info("Controller is Stopping") for project in self._projects.values(): - yield from project.close() + await project.close() for compute in self._computes.values(): try: - yield from compute.close() + await compute.close() # We don't care if a compute is down at this step except (ComputeError, aiohttp.web.HTTPError, OSError): pass - yield from self.gns3vm.exit_vm() + await self.gns3vm.exit_vm() self._computes = {} self._projects = {} @@ -321,15 +318,14 @@ class Controller: except OSError as e: log.error("Cannnot write configuration file '{}': {}".format(self._config_file, e)) - @asyncio.coroutine - def _load_controller_settings(self): + async def _load_controller_settings(self): """ Reload the controller configuration from disk """ try: if not os.path.exists(self._config_file): - yield from self._import_gns3_gui_conf() + await self._import_gns3_gui_conf() self.save() with open(self._config_file) as f: data = json.load(f) @@ -350,8 +346,7 @@ class Controller: self.load_appliances() return data.get("computes", []) - @asyncio.coroutine - def load_projects(self): + async def load_projects(self): """ Preload the list of projects from disk """ @@ -366,7 +361,7 @@ class Controller: for file in os.listdir(project_dir): if file.endswith(".gns3"): try: - yield from self.load_project(os.path.join(project_dir, file), load=False) + await self.load_project(os.path.join(project_dir, file), load=False) except (aiohttp.web.HTTPConflict, NotImplementedError): pass # Skip not compatible projects except OSError as e: @@ -417,8 +412,7 @@ class Controller: os.makedirs(appliances_path, exist_ok=True) return appliances_path - @asyncio.coroutine - def _import_gns3_gui_conf(self): + async def _import_gns3_gui_conf(self): """ Import old config from GNS3 GUI """ @@ -430,7 +424,7 @@ class Controller: server_settings = data.get("Servers", {}) for remote in server_settings.get("remote_servers", []): try: - yield from self.add_compute( + await self.add_compute( host=remote.get("host", "localhost"), port=remote.get("port", 3080), protocol=remote.get("protocol", "http"), @@ -488,8 +482,7 @@ class Controller: self.load_appliances() self.notification.controller_emit("settings.updated", val) - @asyncio.coroutine - def add_compute(self, compute_id=None, name=None, force=False, connect=True, **kwargs): + async def add_compute(self, compute_id=None, name=None, force=False, connect=True, **kwargs): """ Add a server to the dictionary of compute servers controlled by this controller @@ -519,24 +512,23 @@ class Controller: self._computes[compute.id] = compute self.save() if connect: - yield from compute.connect() + await compute.connect() self.notification.controller_emit("compute.created", compute.__json__()) return compute else: if connect: - yield from self._computes[compute_id].connect() + await self._computes[compute_id].connect() self.notification.controller_emit("compute.updated", self._computes[compute_id].__json__()) return self._computes[compute_id] - @asyncio.coroutine - def close_compute_projects(self, compute): + async def close_compute_projects(self, compute): """ Close projects running on a compute """ for project in self._projects.values(): if compute in project.computes: - yield from project.close() + await project.close() def compute_has_open_project(self, compute): """ @@ -550,8 +542,7 @@ class Controller: return True return False - @asyncio.coroutine - def delete_compute(self, compute_id): + async def delete_compute(self, compute_id): """ Delete a compute node. Project using this compute will be close @@ -562,8 +553,8 @@ class Controller: compute = self.get_compute(compute_id) except aiohttp.web.HTTPNotFound: return - yield from self.close_compute_projects(compute) - yield from compute.close() + await self.close_compute_projects(compute) + await compute.close() del self._computes[compute_id] self.save() self.notification.controller_emit("compute.deleted", compute.__json__()) @@ -603,8 +594,7 @@ class Controller: return compute_id in self._computes - @asyncio.coroutine - def add_project(self, project_id=None, name=None, path=None, **kwargs): + async def add_project(self, project_id=None, name=None, path=None, **kwargs): """ Creates a project or returns an existing project @@ -635,8 +625,7 @@ class Controller: except KeyError: raise aiohttp.web.HTTPNotFound(text="Project ID {} doesn't exist".format(project_id)) - @asyncio.coroutine - def get_loaded_project(self, project_id): + async def get_loaded_project(self, project_id): """ Returns a project or raise a 404 error. @@ -644,7 +633,7 @@ class Controller: """ project = self.get_project(project_id) - yield from project.wait_loaded() + await project.wait_loaded() return project def remove_project(self, project): @@ -652,8 +641,7 @@ class Controller: if project.id in self._projects: del self._projects[project.id] - @asyncio.coroutine - def load_project(self, path, load=True): + async def load_project(self, path, load=True): """ Load a project from a .gns3 @@ -670,20 +658,19 @@ class Controller: if topo_data["project_id"] in self._projects: project = self._projects[topo_data["project_id"]] else: - project = yield from self.add_project(path=os.path.dirname(path), status="closed", filename=os.path.basename(path), **topo_data) + project = await self.add_project(path=os.path.dirname(path), status="closed", filename=os.path.basename(path), **topo_data) if load or project.auto_open: - yield from project.open() + await project.open() return project - @asyncio.coroutine - def _project_auto_open(self): + async def _project_auto_open(self): """ Auto open the project with auto open enable """ for project in self._projects.values(): if project.auto_open: - yield from project.open() + await project.open() def get_free_project_name(self, base_name): """ @@ -747,8 +734,7 @@ class Controller: Controller._instance = Controller() return Controller._instance - @asyncio.coroutine - def autoidlepc(self, compute_id, platform, image, ram): + async def autoidlepc(self, compute_id, platform, image, ram): """ Compute and IDLE PC value for an image @@ -761,17 +747,16 @@ class Controller: compute = self.get_compute(compute_id) for project in list(self._projects.values()): if project.name == "AUTOIDLEPC": - yield from project.delete() + await project.delete() self.remove_project(project) - project = yield from self.add_project(name="AUTOIDLEPC") - node = yield from project.add_node(compute, "AUTOIDLEPC", str(uuid.uuid4()), node_type="dynamips", platform=platform, image=image, ram=ram) - res = yield from node.dynamips_auto_idlepc() - yield from project.delete() + project = await self.add_project(name="AUTOIDLEPC") + node = await project.add_node(compute, "AUTOIDLEPC", str(uuid.uuid4()), node_type="dynamips", platform=platform, image=image, ram=ram) + res = await node.dynamips_auto_idlepc() + await project.delete() self.remove_project(project) return res - @asyncio.coroutine - def compute_ports(self, compute_id): + async def compute_ports(self, compute_id): """ Get the ports used by a compute. @@ -779,5 +764,5 @@ class Controller: """ compute = self.get_compute(compute_id) - response = yield from compute.get("/network/ports") + response = await compute.get("/network/ports") return response.json diff --git a/gns3server/controller/compute.py b/gns3server/controller/compute.py index da6b999a..61532a68 100644 --- a/gns3server/controller/compute.py +++ b/gns3server/controller/compute.py @@ -27,7 +27,7 @@ from operator import itemgetter from ..utils import parse_version from ..utils.images import list_images -from ..utils.asyncio import locking, asyncio_ensure_future +from ..utils.asyncio import locking from ..controller.controller_error import ControllerError from ..version import __version__, __version_info__ @@ -146,18 +146,16 @@ class Compute: """ self._last_error = msg - @asyncio.coroutine - def interfaces(self): + async def interfaces(self): """ Get the list of network on compute """ if not self._interfaces_cache: - response = yield from self.get("/network/interfaces") + response = await self.get("/network/interfaces") self._interfaces_cache = response.json return self._interfaces_cache - @asyncio.coroutine - def update(self, **kwargs): + async def update(self, **kwargs): for kw in kwargs: if kw not in ("user", "password"): setattr(self, kw, kwargs[kw]) @@ -170,13 +168,12 @@ class Compute: self._controller.notification.controller_emit("compute.updated", self.__json__()) self._controller.save() - @asyncio.coroutine - def close(self): + async def close(self): self._connected = False if self._http_session: self._http_session.close() if self._ws: - yield from self._ws.close() + await self._ws.close() self._ws = None self._closed = True @@ -314,8 +311,7 @@ class Compute: "last_error": self._last_error } - @asyncio.coroutine - def download_file(self, project, path): + async def download_file(self, project, path): """ Read file of a project and download it @@ -325,13 +321,12 @@ class Compute: """ url = self._getUrl("/projects/{}/files/{}".format(project.id, path)) - response = yield from self._session().request("GET", url, auth=self._auth) + response = await self._session().request("GET", url, auth=self._auth) if response.status == 404: raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(path)) return response - @asyncio.coroutine - def download_image(self, image_type, image): + async def download_image(self, image_type, image): """ Read file of a project and download it @@ -341,13 +336,12 @@ class Compute: """ url = self._getUrl("/{}/images/{}".format(image_type, image)) - response = yield from self._session().request("GET", url, auth=self._auth) + response = await self._session().request("GET", url, auth=self._auth) if response.status == 404: raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(image)) return response - @asyncio.coroutine - def stream_file(self, project, path, timeout=None): + async def stream_file(self, project, path, timeout=None): """ Read file of a project and stream it @@ -372,7 +366,7 @@ class Compute: self._response.close() url = self._getUrl("/projects/{}/stream/{}".format(project.id, path)) - response = yield from self._session().request("GET", url, auth=self._auth, timeout=timeout) + response = await self._session().request("GET", url, auth=self._auth, timeout=timeout) if response.status == 404: raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(path)) elif response.status == 403: @@ -383,34 +377,31 @@ class Compute: path)) return StreamResponse(response) - @asyncio.coroutine - def http_query(self, method, path, data=None, dont_connect=False, **kwargs): + async def http_query(self, method, path, data=None, dont_connect=False, **kwargs): """ :param dont_connect: If true do not reconnect if not connected """ if not self._connected and not dont_connect: if self._id == "vm" and not self._controller.gns3vm.running: - yield from self._controller.gns3vm.start() - yield from self.connect() + await self._controller.gns3vm.start() + await self.connect() if not self._connected and not dont_connect: raise ComputeError("Cannot connect to compute '{}' with request {} {}".format(self._name, method, path)) - response = yield from self._run_http_query(method, path, data=data, **kwargs) + response = await self._run_http_query(method, path, data=data, **kwargs) return response - @asyncio.coroutine - def _try_reconnect(self): + async def _try_reconnect(self): """ We catch error during reconnect """ try: - yield from self.connect() + await self.connect() except aiohttp.web.HTTPConflict: pass @locking - @asyncio.coroutine - def connect(self): + async def connect(self): """ Check if remote server is accessible """ @@ -418,7 +409,7 @@ class Compute: if not self._connected and not self._closed and self.host: try: log.info("Connecting to compute '{}'".format(self._id)) - response = yield from self._run_http_query("GET", "/capabilities") + response = await self._run_http_query("GET", "/capabilities") except ComputeError as e: log.warning("Cannot connect to compute '{}': {}".format(self._id, e)) # Try to reconnect after 2 seconds if server unavailable only if not during tests (otherwise we create a ressource usage bomb) @@ -430,8 +421,8 @@ class Compute: # After 5 failure we close the project using the compute to avoid sync issues if self._connection_failure == 10: log.error("Could not connect to compute '{}' after multiple attempts: {}".format(self._id, e)) - yield from self._controller.close_compute_projects(self) - asyncio.get_event_loop().call_later(2, lambda: asyncio_ensure_future(self._try_reconnect())) + await self._controller.close_compute_projects(self) + asyncio.get_event_loop().call_later(2, lambda: asyncio.ensure_future(self._try_reconnect())) return except aiohttp.web.HTTPNotFound: raise aiohttp.web.HTTPConflict(text="The server {} is not a GNS3 server or it's a 1.X server".format(self._id)) @@ -479,18 +470,17 @@ class Compute: self._last_error = None self._controller.notification.controller_emit("compute.updated", self.__json__()) - @asyncio.coroutine - def _connect_notification(self): + async def _connect_notification(self): """ Connect to the notification stream """ try: - self._ws = yield from self._session().ws_connect(self._getUrl("/notifications/ws"), auth=self._auth) + self._ws = await self._session().ws_connect(self._getUrl("/notifications/ws"), auth=self._auth) except (aiohttp.WSServerHandshakeError, aiohttp.ClientResponseError): self._ws = None while self._ws is not None: try: - response = yield from self._ws.receive() + response = await self._ws.receive() except aiohttp.WSServerHandshakeError: self._ws = None break @@ -505,13 +495,13 @@ class Compute: self._memory_usage_percent = event["memory_usage_percent"] self._controller.notification.controller_emit("compute.updated", self.__json__()) else: - yield from self._controller.notification.dispatch(action, event, compute_id=self.id) + await self._controller.notification.dispatch(action, event, compute_id=self.id) if self._ws: - yield from self._ws.close() + await self._ws.close() # Try to reconnect after 1 seconds if server unavailable only if not during tests (otherwise we create a ressources usage bomb) if not hasattr(sys, "_called_from_test") or not sys._called_from_test: - asyncio.get_event_loop().call_later(1, lambda: asyncio_ensure_future(self.connect())) + asyncio.get_event_loop().call_later(1, lambda: asyncio.ensure_future(self.connect())) self._ws = None self._cpu_usage_percent = None self._memory_usage_percent = None @@ -536,8 +526,7 @@ class Compute: """ Returns URL for specific path at Compute""" return self._getUrl(path) - @asyncio.coroutine - def _run_http_query(self, method, path, data=None, timeout=20, raw=False): + async def _run_http_query(self, method, path, data=None, timeout=20, raw=False): with Timeout(timeout): url = self._getUrl(path) headers = {} @@ -562,13 +551,13 @@ class Compute: data = json.dumps(data).encode("utf-8") try: log.debug("Attempting request to compute: {method} {url} {headers}".format(method=method, url=url, headers=headers)) - response = yield from self._session().request(method, url, headers=headers, data=data, auth=self._auth, chunked=chunked, timeout=timeout) + response = await self._session().request(method, url, headers=headers, data=data, auth=self._auth, chunked=chunked, timeout=timeout) except asyncio.TimeoutError: raise ComputeError("Timeout error for {} call to {} after {}s".format(method, url, timeout)) except (aiohttp.ClientError, aiohttp.ServerDisconnectedError, ValueError, KeyError, socket.gaierror) as e: # aiohttp 2.3.1 raises socket.gaierror when cannot find host raise ComputeError(str(e)) - body = yield from response.read() + body = await response.read() if body and not raw: body = body.decode() @@ -617,46 +606,40 @@ class Compute: response.body = b"" return response - @asyncio.coroutine - def get(self, path, **kwargs): - return (yield from self.http_query("GET", path, **kwargs)) + async def get(self, path, **kwargs): + return (await self.http_query("GET", path, **kwargs)) - @asyncio.coroutine - def post(self, path, data={}, **kwargs): - response = yield from self.http_query("POST", path, data, **kwargs) + async def post(self, path, data={}, **kwargs): + response = await self.http_query("POST", path, data, **kwargs) return response - @asyncio.coroutine - def put(self, path, data={}, **kwargs): - response = yield from self.http_query("PUT", path, data, **kwargs) + async def put(self, path, data={}, **kwargs): + response = await self.http_query("PUT", path, data, **kwargs) return response - @asyncio.coroutine - def delete(self, path, **kwargs): - return (yield from self.http_query("DELETE", path, **kwargs)) + async def delete(self, path, **kwargs): + return (await self.http_query("DELETE", path, **kwargs)) - @asyncio.coroutine - def forward(self, method, type, path, data=None): + async def forward(self, method, type, path, data=None): """ Forward a call to the emulator on compute """ try: action = "/{}/{}".format(type, path) - res = yield from self.http_query(method, action, data=data, timeout=None) + res = await self.http_query(method, action, data=data, timeout=None) except aiohttp.ServerDisconnectedError: log.error("Connection lost to %s during %s %s", self._id, method, action) raise aiohttp.web.HTTPGatewayTimeout() return res.json - @asyncio.coroutine - def images(self, type): + async def images(self, type): """ Return the list of images available for this type on controller and on the compute node. """ images = [] - res = yield from self.http_query("GET", "/{}/images".format(type), timeout=None) + res = await self.http_query("GET", "/{}/images".format(type), timeout=None) images = res.json try: @@ -671,17 +654,15 @@ class Compute: raise ComputeError("Cannot list images: {}".format(str(e))) return images - @asyncio.coroutine - def list_files(self, project): + async def list_files(self, project): """ List files in the project on computes """ path = "/projects/{}/files".format(project.id) - res = yield from self.http_query("GET", path, timeout=None) + res = await self.http_query("GET", path, timeout=None) return res.json - @asyncio.coroutine - def get_ip_on_same_subnet(self, other_compute): + async def get_ip_on_same_subnet(self, other_compute): """ Try to find the best ip for communication from one compute to another @@ -695,8 +676,8 @@ class Compute: if (self.host_ip not in ('0.0.0.0', '127.0.0.1') and other_compute.host_ip not in ('0.0.0.0', '127.0.0.1')): return (self.host_ip, other_compute.host_ip) - this_compute_interfaces = yield from self.interfaces() - other_compute_interfaces = yield from other_compute.interfaces() + this_compute_interfaces = await self.interfaces() + other_compute_interfaces = await other_compute.interfaces() # Sort interface to put the compute host in first position # we guess that if user specified this host it could have a reason (VMware Nat / Host only interface) diff --git a/gns3server/controller/drawing.py b/gns3server/controller/drawing.py index 581bc5d8..e20573f8 100644 --- a/gns3server/controller/drawing.py +++ b/gns3server/controller/drawing.py @@ -165,8 +165,7 @@ class Drawing: def rotation(self, val): self._rotation = val - @asyncio.coroutine - def update(self, **kwargs): + async def update(self, **kwargs): """ Update the drawing diff --git a/gns3server/controller/export_project.py b/gns3server/controller/export_project.py index c8725a5e..00baf332 100644 --- a/gns3server/controller/export_project.py +++ b/gns3server/controller/export_project.py @@ -30,8 +30,7 @@ import logging log = logging.getLogger(__name__) -@asyncio.coroutine -def export_project(project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False): +async def export_project(project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False): """ Export a project to a zip file. @@ -61,7 +60,7 @@ def export_project(project, temporary_dir, include_images=False, keep_compute_id # First we process the .gns3 in order to be sure we don't have an error for file in os.listdir(project._path): if file.endswith(".gns3"): - yield from _patch_project_file(project, os.path.join(project._path, file), zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir) + await _patch_project_file(project, os.path.join(project._path, file), zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir) # Export the local files for root, dirs, files in os.walk(project._path, topdown=True): @@ -86,15 +85,15 @@ def export_project(project, temporary_dir, include_images=False, keep_compute_id downloaded_files = set() for compute in project.computes: if compute.id != "local": - compute_files = yield from compute.list_files(project) + compute_files = await compute.list_files(project) for compute_file in compute_files: if _is_exportable(compute_file["path"]): (fd, temp_path) = tempfile.mkstemp(dir=temporary_dir) f = open(fd, "wb", closefd=True) - response = yield from compute.download_file(project, compute_file["path"]) + response = await compute.download_file(project, compute_file["path"]) while True: try: - data = yield from response.content.read(1024) + data = await response.content.read(1024) except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading file '{}' from remote compute server {}:{}".format(compute_file["path"], compute.host, compute.port)) if not data: @@ -154,8 +153,7 @@ def _is_exportable(path): return True -@asyncio.coroutine -def _patch_project_file(project, path, zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir): +async def _patch_project_file(project, path, zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir): """ Patch a project file (.gns3) to export a project. The .gns3 file is renamed to project.gns3 @@ -219,7 +217,7 @@ def _patch_project_file(project, path, zstream, include_images, keep_compute_id, for i in images if i['compute_id'] != 'local']) for compute_id, image_type, image in remote_images: - yield from _export_remote_images(project, compute_id, image_type, image, zstream, temporary_dir) + await _export_remote_images(project, compute_id, image_type, image, zstream, temporary_dir) zstream.writestr("project.gns3", json.dumps(topology).encode()) return images @@ -253,8 +251,7 @@ def _export_local_image(image, zstream): return -@asyncio.coroutine -def _export_remote_images(project, compute_id, image_type, image, project_zipfile, temporary_dir): +async def _export_remote_images(project, compute_id, image_type, image, project_zipfile, temporary_dir): """ Export specific image from remote compute. """ @@ -268,14 +265,14 @@ def _export_remote_images(project, compute_id, image_type, image, project_zipfil (fd, temp_path) = tempfile.mkstemp(dir=temporary_dir) f = open(fd, "wb", closefd=True) - response = yield from compute.download_image(image_type, image) + response = await compute.download_image(image_type, image) if response.status != 200: raise aiohttp.web.HTTPConflict(text="Cannot export image from '{}' compute. Compute returned status code {}.".format(compute_id, response.status)) while True: try: - data = yield from response.content.read(1024) + data = await response.content.read(1024) except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading image '{}' from remote compute server {}:{}".format(image, compute.host, compute.port)) if not data: diff --git a/gns3server/controller/gns3vm/__init__.py b/gns3server/controller/gns3vm/__init__.py index 44fcf950..6ed9c3a1 100644 --- a/gns3server/controller/gns3vm/__init__.py +++ b/gns3server/controller/gns3vm/__init__.py @@ -21,7 +21,7 @@ import asyncio import aiohttp import ipaddress -from ...utils.asyncio import locking, asyncio_ensure_future +from ...utils.asyncio import locking from .vmware_gns3_vm import VMwareGNS3VM from .virtualbox_gns3_vm import VirtualBoxGNS3VM from .hyperv_gns3_vm import HyperVGNS3VM @@ -204,8 +204,7 @@ class GNS3VM: self._settings.update(val) - @asyncio.coroutine - def update_settings(self, settings): + async def update_settings(self, settings): """ Update settings and will restart the VM if require """ @@ -213,15 +212,15 @@ class GNS3VM: new_settings = copy.copy(self._settings) new_settings.update(settings) if self.settings != new_settings: - yield from self._stop() + await self._stop() self._settings = settings self._controller.save() if self.enable: - yield from self.start() + await self.start() else: # When user fix something on his system and try again if self.enable and not self.current_engine().running: - yield from self.start() + await self.start() def _get_engine(self, engine): """ @@ -248,8 +247,7 @@ class GNS3VM: def __json__(self): return self._settings - @asyncio.coroutine - def list(self, engine): + async def list(self, engine): """ List VMS for an engine """ @@ -257,7 +255,7 @@ class GNS3VM: engine = self._get_engine(engine) vms = [] try: - for vm in (yield from engine.list()): + for vm in (await engine.list()): vms.append({"vmname": vm["vmname"]}) except GNS3VMError as e: # We raise error only if user activated the GNS3 VM @@ -266,19 +264,18 @@ class GNS3VM: raise e return vms - @asyncio.coroutine - def auto_start_vm(self): + async def auto_start_vm(self): """ Auto start the GNS3 VM if require """ if self.enable: try: - yield from self.start() + await self.start() except GNS3VMError as e: # User will receive the error later when they will try to use the node try: - compute = yield from self._controller.add_compute(compute_id="vm", + compute = await self._controller.add_compute(compute_id="vm", name="GNS3 VM ({})".format(self.current_engine().vmname), host=None, force=True) @@ -288,21 +285,19 @@ class GNS3VM: pass log.error("Cannot start the GNS3 VM: {}".format(e)) - @asyncio.coroutine - def exit_vm(self): + async def exit_vm(self): if self.enable: try: if self._settings["when_exit"] == "stop": - yield from self._stop() + await self._stop() elif self._settings["when_exit"] == "suspend": - yield from self._suspend() + await self._suspend() except GNS3VMError as e: log.warning(str(e)) @locking - @asyncio.coroutine - def start(self): + async def start(self): """ Start the GNS3 VM """ @@ -316,22 +311,22 @@ class GNS3VM: engine.ram = self._settings["ram"] engine.vcpus = self._settings["vcpus"] engine.headless = self._settings["headless"] - compute = yield from self._controller.add_compute(compute_id="vm", + compute = await self._controller.add_compute(compute_id="vm", name="GNS3 VM is starting ({})".format(engine.vmname), host=None, force=True, connect=False) try: - yield from engine.start() + await engine.start() except Exception as e: - yield from self._controller.delete_compute("vm") + await self._controller.delete_compute("vm") log.error("Cannot start the GNS3 VM: {}".format(str(e))) - yield from compute.update(name="GNS3 VM ({})".format(engine.vmname)) + await compute.update(name="GNS3 VM ({})".format(engine.vmname)) compute.set_last_error(str(e)) raise e - yield from compute.connect() # we can connect now that the VM has started - yield from compute.update(name="GNS3 VM ({})".format(engine.vmname), + await compute.connect() # we can connect now that the VM has started + await compute.update(name="GNS3 VM ({})".format(engine.vmname), protocol=self.protocol, host=self.ip_address, port=self.port, @@ -340,16 +335,15 @@ class GNS3VM: # check if the VM is in the same subnet as the local server, start 10 seconds later to give # some time for the compute in the VM to be ready for requests - asyncio.get_event_loop().call_later(10, lambda: asyncio_ensure_future(self._check_network(compute))) + asyncio.get_event_loop().call_later(10, lambda: asyncio.ensure_future(self._check_network(compute))) - @asyncio.coroutine - def _check_network(self, compute): + async def _check_network(self, compute): """ Check that the VM is in the same subnet as the local server """ try: - vm_interfaces = yield from compute.interfaces() + vm_interfaces = await compute.interfaces() vm_interface_netmask = None for interface in vm_interfaces: if interface["ip_address"] == self.ip_address: @@ -360,7 +354,7 @@ class GNS3VM: for compute_id in self._controller.computes: if compute_id == "local": compute = self._controller.get_compute(compute_id) - interfaces = yield from compute.interfaces() + interfaces = await compute.interfaces() netmask = None for interface in interfaces: if interface["ip_address"] == compute.host_ip: @@ -378,27 +372,25 @@ class GNS3VM: log.warning("Could not check the VM is in the same subnet as the local server: {}".format(e.text)) @locking - @asyncio.coroutine - def _suspend(self): + async def _suspend(self): """ Suspend the GNS3 VM """ engine = self.current_engine() if "vm" in self._controller.computes: - yield from self._controller.delete_compute("vm") + await self._controller.delete_compute("vm") if engine.running: log.info("Suspend the GNS3 VM") - yield from engine.suspend() + await engine.suspend() @locking - @asyncio.coroutine - def _stop(self): + async def _stop(self): """ Stop the GNS3 VM """ engine = self.current_engine() if "vm" in self._controller.computes: - yield from self._controller.delete_compute("vm") + await self._controller.delete_compute("vm") if engine.running: log.info("Stop the GNS3 VM") - yield from engine.stop() + await engine.stop() diff --git a/gns3server/controller/gns3vm/base_gns3_vm.py b/gns3server/controller/gns3vm/base_gns3_vm.py index a277182c..64305573 100644 --- a/gns3server/controller/gns3vm/base_gns3_vm.py +++ b/gns3server/controller/gns3vm/base_gns3_vm.py @@ -254,32 +254,28 @@ class BaseGNS3VM: return self._engine - @asyncio.coroutine - def list(self): + async def list(self): """ List all VMs """ raise NotImplementedError - @asyncio.coroutine - def start(self): + async def start(self): """ Starts the GNS3 VM. """ raise NotImplementedError - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspend the GNS3 VM. """ raise NotImplementedError - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the GNS3 VM. """ diff --git a/gns3server/controller/gns3vm/hyperv_gns3_vm.py b/gns3server/controller/gns3vm/hyperv_gns3_vm.py index 5cc27dac..61515eb5 100644 --- a/gns3server/controller/gns3vm/hyperv_gns3_vm.py +++ b/gns3server/controller/gns3vm/hyperv_gns3_vm.py @@ -164,8 +164,7 @@ class HyperVGNS3VM(BaseGNS3VM): except Exception as e: raise GNS3VMError("Could not set to {} and RAM amount set to {}: {}".format(vcpus, ram, e)) - @asyncio.coroutine - def list(self): + async def list(self): """ List all Hyper-V VMs """ @@ -189,8 +188,7 @@ class HyperVGNS3VM(BaseGNS3VM): return wmi.WMI(moniker=path.replace('\\', '/')) - @asyncio.coroutine - def _set_state(self, state): + async def _set_state(self, state): """ Set the desired state of the VM """ @@ -203,15 +201,14 @@ class HyperVGNS3VM(BaseGNS3VM): if ret == HyperVGNS3VM._WMI_JOB_STATUS_STARTED: job = self._get_wmi_obj(job_path) while job.JobState == HyperVGNS3VM._WMI_JOB_STATE_RUNNING: - yield from asyncio.sleep(0.1) + await asyncio.sleep(0.1) job = self._get_wmi_obj(job_path) if job.JobState != HyperVGNS3VM._WMI_JOB_STATE_COMPLETED: raise GNS3VMError("Error while changing state: {}".format(job.ErrorSummaryDescription)) elif ret != 0 or ret != 32775: raise GNS3VMError("Failed to change state to {}".format(state)) - @asyncio.coroutine - def start(self): + async def start(self): """ Starts the GNS3 VM. """ @@ -228,7 +225,7 @@ class HyperVGNS3VM(BaseGNS3VM): # start the VM try: - yield from self._set_state(HyperVGNS3VM._HYPERV_VM_STATE_ENABLED) + await self._set_state(HyperVGNS3VM._HYPERV_VM_STATE_ENABLED) except GNS3VMError as e: raise GNS3VMError("Failed to start the GNS3 VM: {}".format(e)) log.info("GNS3 VM has been started") @@ -259,32 +256,30 @@ class HyperVGNS3VM(BaseGNS3VM): break elif trial == 0: raise GNS3VMError("Could not find guest IP address for {}".format(self.vmname)) - yield from asyncio.sleep(1) + await asyncio.sleep(1) self.ip_address = guest_ip_address log.info("GNS3 VM IP address set to {}".format(guest_ip_address)) self.running = True - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspends the GNS3 VM. """ try: - yield from self._set_state(HyperVGNS3VM._HYPERV_VM_STATE_PAUSED) + await self._set_state(HyperVGNS3VM._HYPERV_VM_STATE_PAUSED) except GNS3VMError as e: raise GNS3VMError("Failed to suspend the GNS3 VM: {}".format(e)) log.info("GNS3 VM has been suspended") self.running = False - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the GNS3 VM. """ try: - yield from self._set_state(HyperVGNS3VM._HYPERV_VM_STATE_SHUTDOWN) + await self._set_state(HyperVGNS3VM._HYPERV_VM_STATE_SHUTDOWN) except GNS3VMError as e: raise GNS3VMError("Failed to stop the GNS3 VM: {}".format(e)) log.info("GNS3 VM has been stopped") diff --git a/gns3server/controller/gns3vm/remote_gns3_vm.py b/gns3server/controller/gns3vm/remote_gns3_vm.py index 70d827a5..8ef533e4 100644 --- a/gns3server/controller/gns3vm/remote_gns3_vm.py +++ b/gns3server/controller/gns3vm/remote_gns3_vm.py @@ -32,8 +32,7 @@ class RemoteGNS3VM(BaseGNS3VM): self._engine = "remote" super().__init__(controller) - @asyncio.coroutine - def list(self): + async def list(self): """ List all VMs """ @@ -45,8 +44,7 @@ class RemoteGNS3VM(BaseGNS3VM): res.append({"vmname": compute.name}) return res - @asyncio.coroutine - def start(self): + async def start(self): """ Starts the GNS3 VM. """ @@ -65,15 +63,13 @@ class RemoteGNS3VM(BaseGNS3VM): return raise GNS3VMError("Can't start the GNS3 VM remote VM {} not found".format(self.vmname)) - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspend do nothing for remote server """ self.running = False - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the GNS3 VM. """ diff --git a/gns3server/controller/gns3vm/virtualbox_gns3_vm.py b/gns3server/controller/gns3vm/virtualbox_gns3_vm.py index ef1fba52..8ef060e5 100644 --- a/gns3server/controller/gns3vm/virtualbox_gns3_vm.py +++ b/gns3server/controller/gns3vm/virtualbox_gns3_vm.py @@ -40,24 +40,22 @@ class VirtualBoxGNS3VM(BaseGNS3VM): super().__init__(controller) self._virtualbox_manager = VirtualBox() - @asyncio.coroutine - def _execute(self, subcommand, args, timeout=60): + async def _execute(self, subcommand, args, timeout=60): try: - result = yield from self._virtualbox_manager.execute(subcommand, args, timeout) + result = await self._virtualbox_manager.execute(subcommand, args, timeout) return ("\n".join(result)) except VirtualBoxError as e: raise GNS3VMError("Error while executing VBoxManage command: {}".format(e)) - @asyncio.coroutine - def _get_state(self): + async def _get_state(self): """ Returns the VM state (e.g. running, paused etc.) :returns: state (string) """ - result = yield from self._execute("showvminfo", [self._vmname, "--machinereadable"]) + result = await self._execute("showvminfo", [self._vmname, "--machinereadable"]) for info in result.splitlines(): if '=' in info: name, value = info.split('=', 1) @@ -65,15 +63,14 @@ class VirtualBoxGNS3VM(BaseGNS3VM): return value.strip('"') return "unknown" - @asyncio.coroutine - def _look_for_interface(self, network_backend): + async def _look_for_interface(self, network_backend): """ Look for an interface with a specific network backend. :returns: interface number or -1 if none is found """ - result = yield from self._execute("showvminfo", [self._vmname, "--machinereadable"]) + result = await self._execute("showvminfo", [self._vmname, "--machinereadable"]) interface = -1 for info in result.splitlines(): if '=' in info: @@ -86,15 +83,14 @@ class VirtualBoxGNS3VM(BaseGNS3VM): continue return interface - @asyncio.coroutine - def _look_for_vboxnet(self, interface_number): + async def _look_for_vboxnet(self, interface_number): """ Look for the VirtualBox network name associated with a host only interface. :returns: None or vboxnet name """ - result = yield from self._execute("showvminfo", [self._vmname, "--machinereadable"]) + result = await self._execute("showvminfo", [self._vmname, "--machinereadable"]) for info in result.splitlines(): if '=' in info: name, value = info.split('=', 1) @@ -102,8 +98,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM): return value.strip('"') return None - @asyncio.coroutine - def _check_dhcp_server(self, vboxnet): + async def _check_dhcp_server(self, vboxnet): """ Check if the DHCP server associated with a vboxnet is enabled. @@ -111,7 +106,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM): :returns: boolean """ - properties = yield from self._execute("list", ["dhcpservers"]) + properties = await self._execute("list", ["dhcpservers"]) flag_dhcp_server_found = False for prop in properties.splitlines(): try: @@ -125,8 +120,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM): return True return False - @asyncio.coroutine - def _check_vboxnet_exists(self, vboxnet): + async def _check_vboxnet_exists(self, vboxnet): """ Check if the vboxnet interface exists @@ -134,7 +128,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM): :returns: boolean """ - properties = yield from self._execute("list", ["hostonlyifs"]) + properties = await self._execute("list", ["hostonlyifs"]) for prop in properties.splitlines(): try: name, value = prop.split(':', 1) @@ -144,13 +138,12 @@ class VirtualBoxGNS3VM(BaseGNS3VM): return True return False - @asyncio.coroutine - def _find_first_available_vboxnet(self): + async def _find_first_available_vboxnet(self): """ Find the first available vboxnet. """ - properties = yield from self._execute("list", ["hostonlyifs"]) + properties = await self._execute("list", ["hostonlyifs"]) for prop in properties.splitlines(): try: name, value = prop.split(':', 1) @@ -160,15 +153,14 @@ class VirtualBoxGNS3VM(BaseGNS3VM): return value.strip() return None - @asyncio.coroutine - def _check_vbox_port_forwarding(self): + async def _check_vbox_port_forwarding(self): """ Checks if the NAT port forwarding rule exists. :returns: boolean """ - result = yield from self._execute("showvminfo", [self._vmname, "--machinereadable"]) + result = await self._execute("showvminfo", [self._vmname, "--machinereadable"]) for info in result.splitlines(): if '=' in info: name, value = info.split('=', 1) @@ -176,66 +168,64 @@ class VirtualBoxGNS3VM(BaseGNS3VM): return True return False - @asyncio.coroutine - def list(self): + async def list(self): """ List all VirtualBox VMs """ - return (yield from self._virtualbox_manager.list_vms()) + return (await self._virtualbox_manager.list_vms()) - @asyncio.coroutine - def start(self): + async def start(self): """ Start the GNS3 VM. """ # get a NAT interface number - nat_interface_number = yield from self._look_for_interface("nat") + nat_interface_number = await self._look_for_interface("nat") if nat_interface_number < 0: raise GNS3VMError('VM "{}" must have a NAT interface configured in order to start'.format(self.vmname)) - hostonly_interface_number = yield from self._look_for_interface("hostonly") + hostonly_interface_number = await self._look_for_interface("hostonly") if hostonly_interface_number < 0: raise GNS3VMError('VM "{}" must have a host-only interface configured in order to start'.format(self.vmname)) - vboxnet = yield from self._look_for_vboxnet(hostonly_interface_number) + vboxnet = await self._look_for_vboxnet(hostonly_interface_number) if vboxnet is None: raise GNS3VMError('A VirtualBox host-only network could not be found on network adapter {} for "{}"'.format(hostonly_interface_number, self._vmname)) - if not (yield from self._check_vboxnet_exists(vboxnet)): + if not (await self._check_vboxnet_exists(vboxnet)): if sys.platform.startswith("win") and vboxnet == "vboxnet0": # The GNS3 VM is configured with vboxnet0 by default which is not available # on Windows. Try to patch this with the first available vboxnet we find. - first_available_vboxnet = yield from self._find_first_available_vboxnet() + first_available_vboxnet = await self._find_first_available_vboxnet() if first_available_vboxnet is None: raise GNS3VMError('Please add a VirtualBox host-only network with DHCP enabled and attached it to network adapter {} for "{}"'.format(hostonly_interface_number, self._vmname)) - yield from self.set_hostonly_network(hostonly_interface_number, first_available_vboxnet) + await self.set_hostonly_network(hostonly_interface_number, first_available_vboxnet) vboxnet = first_available_vboxnet else: raise GNS3VMError('VirtualBox host-only network "{}" does not exist, please make the sure the network adapter {} configuration is valid for "{}"'.format(vboxnet, hostonly_interface_number, self._vmname)) - if not (yield from self._check_dhcp_server(vboxnet)): + if not (await self._check_dhcp_server(vboxnet)): raise GNS3VMError('DHCP must be enabled on VirtualBox host-only network "{}"'.format(vboxnet)) - vm_state = yield from self._get_state() + vm_state = await self._get_state() log.info('"{}" state is {}'.format(self._vmname, vm_state)) if vm_state == "poweroff": - yield from self.set_vcpus(self.vcpus) - yield from self.set_ram(self.ram) + await self.set_vcpus(self.vcpus) + await self.set_ram(self.ram) if vm_state in ("poweroff", "saved"): # start the VM if it is not running args = [self._vmname] if self._headless: args.extend(["--type", "headless"]) - yield from self._execute("startvm", args) + await self._execute("startvm", args) elif vm_state == "paused": args = [self._vmname, "resume"] - yield from self._execute("controlvm", args) + await self._execute("controlvm", args) ip_address = "127.0.0.1" try: # get a random port on localhost @@ -246,23 +236,22 @@ class VirtualBoxGNS3VM(BaseGNS3VM): except OSError as e: raise GNS3VMError("Error while getting random port: {}".format(e)) - if (yield from self._check_vbox_port_forwarding()): + if (await self._check_vbox_port_forwarding()): # delete the GNS3VM NAT port forwarding rule if it exists log.info("Removing GNS3VM NAT port forwarding rule from interface {}".format(nat_interface_number)) - yield from self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "delete", "GNS3VM"]) + await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "delete", "GNS3VM"]) # add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to port 3080 in the VM log.info("Adding GNS3VM NAT port forwarding rule with port {} to interface {}".format(api_port, nat_interface_number)) - yield from self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), + await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "GNS3VM,tcp,{},{},,3080".format(ip_address, api_port)]) - self.ip_address = yield from self._get_ip(hostonly_interface_number, api_port) + self.ip_address = await self._get_ip(hostonly_interface_number, api_port) self.port = 3080 log.info("GNS3 VM has been started with IP {}".format(self.ip_address)) self.running = True - @asyncio.coroutine - def _get_ip(self, hostonly_interface_number, api_port): + async def _get_ip(self, hostonly_interface_number, api_port): """ Get the IP from VirtualBox. @@ -276,14 +265,14 @@ class VirtualBoxGNS3VM(BaseGNS3VM): session = aiohttp.ClientSession() try: resp = None - resp = yield from session.get('http://127.0.0.1:{}/v2/compute/network/interfaces'.format(api_port)) + resp = await session.get('http://127.0.0.1:{}/v2/compute/network/interfaces'.format(api_port)) except (OSError, aiohttp.ClientError, TimeoutError, asyncio.TimeoutError): pass if resp: if resp.status < 300: try: - json_data = yield from resp.json() + json_data = await resp.json() except ValueError: pass resp.close() @@ -296,35 +285,33 @@ class VirtualBoxGNS3VM(BaseGNS3VM): if "ip_address" in interface and len(interface["ip_address"]) > 0: return interface["ip_address"] remaining_try -= 1 - yield from asyncio.sleep(1) + await asyncio.sleep(1) raise GNS3VMError("Could not get the GNS3 VM ip make sure the VM receive an IP from VirtualBox") - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspend the GNS3 VM. """ - yield from self._execute("controlvm", [self._vmname, "savestate"], timeout=3) + await self._execute("controlvm", [self._vmname, "savestate"], timeout=3) log.info("GNS3 VM has been suspend") self.running = False - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the GNS3 VM. """ - vm_state = yield from self._get_state() + vm_state = await self._get_state() if vm_state == "poweroff": self.running = False return - yield from self._execute("controlvm", [self._vmname, "acpipowerbutton"], timeout=3) + await self._execute("controlvm", [self._vmname, "acpipowerbutton"], timeout=3) trial = 120 while True: try: - vm_state = yield from self._get_state() + vm_state = await self._get_state() # During a small amount of time the command will fail except GNS3VMError: vm_state = "running" @@ -332,37 +319,34 @@ class VirtualBoxGNS3VM(BaseGNS3VM): break trial -= 1 if trial == 0: - yield from self._execute("controlvm", [self._vmname, "poweroff"], timeout=3) + await self._execute("controlvm", [self._vmname, "poweroff"], timeout=3) break - yield from asyncio.sleep(1) + await asyncio.sleep(1) log.info("GNS3 VM has been stopped") self.running = False - @asyncio.coroutine - def set_vcpus(self, vcpus): + async def set_vcpus(self, vcpus): """ Set the number of vCPU cores for the GNS3 VM. :param vcpus: number of vCPU cores """ - yield from self._execute("modifyvm", [self._vmname, "--cpus", str(vcpus)], timeout=3) + await self._execute("modifyvm", [self._vmname, "--cpus", str(vcpus)], timeout=3) log.info("GNS3 VM vCPU count set to {}".format(vcpus)) - @asyncio.coroutine - def set_ram(self, ram): + async def set_ram(self, ram): """ Set the RAM amount for the GNS3 VM. :param ram: amount of memory """ - yield from self._execute("modifyvm", [self._vmname, "--memory", str(ram)], timeout=3) + await self._execute("modifyvm", [self._vmname, "--memory", str(ram)], timeout=3) log.info("GNS3 VM RAM amount set to {}".format(ram)) - @asyncio.coroutine - def set_hostonly_network(self, adapter_number, hostonly_network_name): + async def set_hostonly_network(self, adapter_number, hostonly_network_name): """ Set a VirtualBox host-only network on a network adapter for the GNS3 VM. @@ -370,7 +354,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM): :param hostonly_network_name: name of the VirtualBox host-only network """ - yield from self._execute("modifyvm", [self._vmname, "--hostonlyadapter{}".format(adapter_number), hostonly_network_name], timeout=3) + await self._execute("modifyvm", [self._vmname, "--hostonlyadapter{}".format(adapter_number), hostonly_network_name], timeout=3) log.info('VirtualBox host-only network "{}" set on network adapter {} for "{}"'.format(hostonly_network_name, adapter_number, self._vmname)) diff --git a/gns3server/controller/gns3vm/vmware_gns3_vm.py b/gns3server/controller/gns3vm/vmware_gns3_vm.py index c14f0732..9976000a 100644 --- a/gns3server/controller/gns3vm/vmware_gns3_vm.py +++ b/gns3server/controller/gns3vm/vmware_gns3_vm.py @@ -43,24 +43,21 @@ class VMwareGNS3VM(BaseGNS3VM): def vmx_path(self): return self._vmx_path - @asyncio.coroutine - def _execute(self, subcommand, args, timeout=60, log_level=logging.INFO): + async def _execute(self, subcommand, args, timeout=60, log_level=logging.INFO): try: - result = yield from self._vmware_manager.execute(subcommand, args, timeout, log_level=log_level) + result = await self._vmware_manager.execute(subcommand, args, timeout, log_level=log_level) return (''.join(result)) except VMwareError as e: raise GNS3VMError("Error while executing VMware command: {}".format(e)) - @asyncio.coroutine - def _is_running(self): - result = yield from self._vmware_manager.execute("list", []) + async def _is_running(self): + result = await self._vmware_manager.execute("list", []) if self._vmx_path in result: return True return False - @asyncio.coroutine - def _set_vcpus_ram(self, vcpus, ram): + async def _set_vcpus_ram(self, vcpus, ram): """ Set the number of vCPU cores and amount of RAM for the GNS3 VM. @@ -85,8 +82,7 @@ class VMwareGNS3VM(BaseGNS3VM): except OSError as e: raise GNS3VMError('Could not read/write VMware VMX file "{}": {}'.format(self._vmx_path, e)) - @asyncio.coroutine - def _set_extra_options(self): + async def _set_extra_options(self): try: """ Due to bug/change in VMWare 14 we're not able to pass Hardware Virtualization in GNS3VM. @@ -109,23 +105,21 @@ class VMwareGNS3VM(BaseGNS3VM): except OSError as e: raise GNS3VMError('Could not read/write VMware VMX file "{}": {}'.format(self._vmx_path, e)) - @asyncio.coroutine - def list(self): + async def list(self): """ List all VMware VMs """ try: - return (yield from self._vmware_manager.list_vms()) + return (await self._vmware_manager.list_vms()) except VMwareError as e: raise GNS3VMError("Could not list VMware VMs: {}".format(str(e))) - @asyncio.coroutine - def start(self): + async def start(self): """ Starts the GNS3 VM. """ - vms = yield from self.list() + vms = await self.list() for vm in vms: if vm["vmname"] == self.vmname: self._vmx_path = vm["vmx_path"] @@ -138,25 +132,25 @@ class VMwareGNS3VM(BaseGNS3VM): raise GNS3VMError("VMware VMX file {} doesn't exist".format(self._vmx_path)) # check if the VMware guest tools are installed - vmware_tools_state = yield from self._execute("checkToolsState", [self._vmx_path]) + vmware_tools_state = await self._execute("checkToolsState", [self._vmx_path]) if vmware_tools_state not in ("installed", "running"): raise GNS3VMError("VMware tools are not installed in {}".format(self.vmname)) try: - running = yield from self._is_running() + running = await self._is_running() except VMwareError as e: raise GNS3VMError("Could not list VMware VMs: {}".format(str(e))) if not running: log.info("Update GNS3 VM settings") # set the number of vCPUs and amount of RAM - yield from self._set_vcpus_ram(self.vcpus, self.ram) - yield from self._set_extra_options() + await self._set_vcpus_ram(self.vcpus, self.ram) + await self._set_extra_options() # start the VM args = [self._vmx_path] if self._headless: args.extend(["nogui"]) - yield from self._execute("start", args) + await self._execute("start", args) log.info("GNS3 VM has been started") # get the guest IP address (first adapter only) @@ -164,7 +158,7 @@ class VMwareGNS3VM(BaseGNS3VM): guest_ip_address = "" log.info("Waiting for GNS3 VM IP") while True: - guest_ip_address = yield from self._execute("readVariable", [self._vmx_path, "guestVar", "gns3.eth0"], timeout=120, log_level=logging.DEBUG) + guest_ip_address = await self._execute("readVariable", [self._vmx_path, "guestVar", "gns3.eth0"], timeout=120, log_level=logging.DEBUG) guest_ip_address = guest_ip_address.strip() if len(guest_ip_address) != 0: break @@ -172,15 +166,14 @@ class VMwareGNS3VM(BaseGNS3VM): # If ip not found fallback on old method if trial == 0: log.warning("No IP found for the VM via readVariable fallback to getGuestIPAddress") - guest_ip_address = yield from self._execute("getGuestIPAddress", [self._vmx_path, "-wait"], timeout=120) + guest_ip_address = await self._execute("getGuestIPAddress", [self._vmx_path, "-wait"], timeout=120) break - yield from asyncio.sleep(1) + await asyncio.sleep(1) self.ip_address = guest_ip_address log.info("GNS3 VM IP address set to {}".format(guest_ip_address)) self.running = True - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspend the GNS3 VM. """ @@ -188,14 +181,13 @@ class VMwareGNS3VM(BaseGNS3VM): if self._vmx_path is None: raise GNS3VMError("No VMX path configured, can't suspend the VM") try: - yield from self._execute("suspend", [self._vmx_path]) + await self._execute("suspend", [self._vmx_path]) except GNS3VMError as e: log.warning("Error when suspending the VM: {}".format(str(e))) log.info("GNS3 VM has been suspended") self.running = False - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the GNS3 VM. """ @@ -203,7 +195,7 @@ class VMwareGNS3VM(BaseGNS3VM): if self._vmx_path is None: raise GNS3VMError("No VMX path configured, can't stop the VM") try: - yield from self._execute("stop", [self._vmx_path, "soft"]) + await self._execute("stop", [self._vmx_path, "soft"]) except GNS3VMError as e: log.warning("Error when stopping the VM: {}".format(str(e))) log.info("GNS3 VM has been stopped") diff --git a/gns3server/controller/import_project.py b/gns3server/controller/import_project.py index 3cb9ec8d..54763136 100644 --- a/gns3server/controller/import_project.py +++ b/gns3server/controller/import_project.py @@ -33,8 +33,7 @@ Handle the import of project from a .gns3project """ -@asyncio.coroutine -def import_project(controller, project_id, stream, location=None, name=None, keep_compute_id=False): +async def import_project(controller, project_id, stream, location=None, name=None, keep_compute_id=False): """ Import a project contain in a zip file @@ -87,7 +86,7 @@ def import_project(controller, project_id, stream, location=None, name=None, kee try: with zipfile.ZipFile(stream) as zip_file: - yield from wait_run_in_executor(zip_file.extractall, path) + await wait_run_in_executor(zip_file.extractall, path) except zipfile.BadZipFile: raise aiohttp.web.HTTPConflict(text="Cannot extract files from GNS3 project (invalid zip)") @@ -138,9 +137,9 @@ def import_project(controller, project_id, stream, location=None, name=None, kee # Project created on the remote GNS3 VM? if node["compute_id"] not in compute_created: compute = controller.get_compute(node["compute_id"]) - yield from compute.post("/projects", data={"name": project_name, "project_id": project_id,}) + await compute.post("/projects", data={"name": project_name, "project_id": project_id,}) compute_created.add(node["compute_id"]) - yield from _move_files_to_compute(compute, project_id, path, os.path.join("project-files", node["node_type"], node["node_id"])) + await _move_files_to_compute(compute, project_id, path, os.path.join("project-files", node["node_type"], node["node_id"])) # And we dump the updated.gns3 dot_gns3_path = os.path.join(path, project_name + ".gns3") @@ -153,7 +152,7 @@ def import_project(controller, project_id, stream, location=None, name=None, kee if os.path.exists(os.path.join(path, "images")): _import_images(controller, path) - project = yield from controller.load_project(dot_gns3_path, load=False) + project = await controller.load_project(dot_gns3_path, load=False) return project @@ -176,8 +175,7 @@ def _move_node_file(path, old_id, new_id): shutil.move(node_dir, os.path.join(module_dir, new_id)) -@asyncio.coroutine -def _move_files_to_compute(compute, project_id, directory, files_path): +async def _move_files_to_compute(compute, project_id, directory, files_path): """ Move files to a remote compute """ @@ -188,12 +186,11 @@ def _move_files_to_compute(compute, project_id, directory, files_path): for filename in filenames: path = os.path.join(dirpath, filename) dst = os.path.relpath(path, directory) - yield from _upload_file(compute, project_id, path, dst) - yield from wait_run_in_executor(shutil.rmtree, os.path.join(directory, files_path)) + await _upload_file(compute, project_id, path, dst) + await wait_run_in_executor(shutil.rmtree, os.path.join(directory, files_path)) -@asyncio.coroutine -def _upload_file(compute, project_id, file_path, path): +async def _upload_file(compute, project_id, file_path, path): """ Upload a file to a remote project @@ -203,7 +200,7 @@ def _upload_file(compute, project_id, file_path, path): path = "/projects/{}/files/{}".format(project_id, path.replace("\\", "/")) with open(file_path, "rb") as f: - yield from compute.http_query("POST", path, f, timeout=None) + await compute.http_query("POST", path, f, timeout=None) def _import_images(controller, path): diff --git a/gns3server/controller/link.py b/gns3server/controller/link.py index 90ecc10c..226b4a7b 100644 --- a/gns3server/controller/link.py +++ b/gns3server/controller/link.py @@ -22,8 +22,6 @@ import html import asyncio import aiohttp -from gns3server.utils.asyncio import asyncio_ensure_future - import logging log = logging.getLogger(__name__) @@ -151,8 +149,7 @@ class Link: return {"frequency_drop": [-1]} return self._filters - @asyncio.coroutine - def update_filters(self, filters): + async def update_filters(self, filters): """ Modify the filters list. @@ -173,15 +170,14 @@ class Link: if new_filters != self.filters: self._filters = new_filters if self._created: - yield from self.update() + await self.update() self._project.controller.notification.project_emit("link.updated", self.__json__()) self._project.dump() - @asyncio.coroutine - def update_suspend(self, value): + async def update_suspend(self, value): if value != self._suspended: self._suspended = value - yield from self.update() + await self.update() self._project.controller.notification.project_emit("link.updated", self.__json__()) self._project.dump() @@ -192,8 +188,7 @@ class Link: """ return self._created - @asyncio.coroutine - def add_node(self, node, adapter_number, port_number, label=None, dump=True): + async def add_node(self, node, adapter_number, port_number, label=None, dump=True): """ Add a node to the link @@ -241,7 +236,7 @@ class Link: }) if len(self._nodes) == 2: - yield from self.create() + await self.create() for n in self._nodes: n["node"].add_link(self) n["port"].link = self @@ -251,8 +246,7 @@ class Link: if dump: self._project.dump() - @asyncio.coroutine - def update_nodes(self, nodes): + async def update_nodes(self, nodes): for node_data in nodes: node = self._project.get_node(node_data["node_id"]) for port in self._nodes: @@ -263,23 +257,20 @@ class Link: self._project.controller.notification.project_emit("link.updated", self.__json__()) self._project.dump() - @asyncio.coroutine - def create(self): + async def create(self): """ Create the link """ raise NotImplementedError - @asyncio.coroutine - def update(self): + async def update(self): """ Update a link """ raise NotImplementedError - @asyncio.coroutine - def delete(self): + async def delete(self): """ Delete the link """ @@ -289,8 +280,7 @@ class Link: n["port"].link = None n["node"].remove_link(self) - @asyncio.coroutine - def start_capture(self, data_link_type="DLT_EN10MB", capture_file_name=None): + async def start_capture(self, data_link_type="DLT_EN10MB", capture_file_name=None): """ Start capture on the link @@ -299,11 +289,10 @@ class Link: self._capturing = True self._capture_file_name = capture_file_name - self._streaming_pcap = asyncio_ensure_future(self._start_streaming_pcap()) + self._streaming_pcap = asyncio.ensure_future(self._start_streaming_pcap()) self._project.controller.notification.project_emit("link.updated", self.__json__()) - @asyncio.coroutine - def _start_streaming_pcap(self): + async def _start_streaming_pcap(self): """ Dump a pcap file on disk """ @@ -315,7 +304,7 @@ class Link: raise aiohttp.web.HTTPConflict(text="Could not delete old capture file '{}': {}".format(self.capture_file_path, e)) try: - stream_content = yield from self.read_pcap_from_source() + stream_content = await self.read_pcap_from_source() except aiohttp.web.HTTPException as e: error_msg = "Could not stream PCAP file: error {}: {}".format(e.status, e.text) log.error(error_msg) @@ -328,7 +317,7 @@ class Link: with open(self.capture_file_path, "wb") as f: while self._capturing: # We read 1 bytes by 1 otherwise the remaining data is not read if the traffic stops - data = yield from stream.read(1) + data = await stream.read(1) if data: f.write(data) # Flush to disk otherwise the live is not really live @@ -338,8 +327,7 @@ class Link: except OSError as e: raise aiohttp.web.HTTPConflict(text="Could not write capture file '{}': {}".format(self.capture_file_path, e)) - @asyncio.coroutine - def stop_capture(self): + async def stop_capture(self): """ Stop capture on the link """ @@ -347,16 +335,14 @@ class Link: self._capturing = False self._project.controller.notification.project_emit("link.updated", self.__json__()) - @asyncio.coroutine - def _read_pcap_from_source(self): + async def _read_pcap_from_source(self): """ Return a FileStream of the Pcap from the compute server """ raise NotImplementedError - @asyncio.coroutine - def node_updated(self, node): + async def node_updated(self, node): """ Called when a node member of the link is updated """ diff --git a/gns3server/controller/node.py b/gns3server/controller/node.py index 776eb2f1..642d07af 100644 --- a/gns3server/controller/node.py +++ b/gns3server/controller/node.py @@ -343,8 +343,7 @@ class Node: def links(self): return self._links - @asyncio.coroutine - def create(self): + async def create(self): """ Create the node on the compute server """ @@ -358,21 +357,20 @@ class Node: trial = 0 while trial != 6: try: - response = yield from self._compute.post("/projects/{}/{}/nodes".format(self._project.id, self._node_type), data=data, timeout=timeout) + response = await self._compute.post("/projects/{}/{}/nodes".format(self._project.id, self._node_type), data=data, timeout=timeout) except ComputeConflict as e: if e.response.get("exception") == "ImageMissingError": - res = yield from self._upload_missing_image(self._node_type, e.response["image"]) + res = await self._upload_missing_image(self._node_type, e.response["image"]) if not res: raise e else: raise e else: - yield from self.parse_node_response(response.json) + await self.parse_node_response(response.json) return True trial += 1 - @asyncio.coroutine - def update(self, **kwargs): + async def update(self, **kwargs): """ Update the node on the compute server @@ -403,15 +401,14 @@ class Node: self._list_ports() if update_compute: data = self._node_data(properties=compute_properties) - response = yield from self.put(None, data=data) - yield from self.parse_node_response(response.json) + response = await self.put(None, data=data) + await self.parse_node_response(response.json) elif old_json != self.__json__(): # We send notif only if object has changed self.project.controller.notification.project_emit("node.updated", self.__json__()) self.project.dump() - @asyncio.coroutine - def parse_node_response(self, response): + async def parse_node_response(self, response): """ Update the object with the remote node object """ @@ -439,7 +436,7 @@ class Node: self._properties[key] = value self._list_ports() for link in self._links: - yield from link.node_updated(self) + await link.node_updated(self) def _node_data(self, properties=None): """ @@ -477,12 +474,10 @@ class Node: del data[key] return data - @asyncio.coroutine - def destroy(self): - yield from self.delete() + async def destroy(self): + await self.delete() - @asyncio.coroutine - def start(self, data=None): + async def start(self, data=None): """ Start a node """ @@ -493,57 +488,52 @@ class Node: licence = self._project.controller.settings["IOU"]["iourc_content"] except KeyError: raise aiohttp.web.HTTPConflict(text="IOU licence is not configured") - yield from self.post("/start", timeout=240, data={"iourc_content": licence}) + await self.post("/start", timeout=240, data={"iourc_content": licence}) else: - yield from self.post("/start", data=data, timeout=240) + await self.post("/start", data=data, timeout=240) except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when starting {}".format(self._name)) - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stop a node """ try: - yield from self.post("/stop", timeout=240, dont_connect=True) + await self.post("/stop", timeout=240, dont_connect=True) # We don't care if a node is down at this step except (ComputeError, aiohttp.ClientError, aiohttp.web.HTTPError): pass except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when stopping {}".format(self._name)) - @asyncio.coroutine - def suspend(self): + async def suspend(self): """ Suspend a node """ try: - yield from self.post("/suspend", timeout=240) + await self.post("/suspend", timeout=240) except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when reloading {}".format(self._name)) - @asyncio.coroutine - def reload(self): + async def reload(self): """ Suspend a node """ try: - yield from self.post("/reload", timeout=240) + await self.post("/reload", timeout=240) except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when reloading {}".format(self._name)) - @asyncio.coroutine - def post(self, path, data=None, **kwargs): + async def post(self, path, data=None, **kwargs): """ HTTP post on the node """ if data: - return (yield from self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), data=data, **kwargs)) + return (await self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), data=data, **kwargs)) else: - return (yield from self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), **kwargs)) + return (await self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), **kwargs)) - @asyncio.coroutine - def put(self, path, data=None, **kwargs): + async def put(self, path, data=None, **kwargs): """ HTTP post on the node """ @@ -552,22 +542,20 @@ class Node: else: path = "/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path) if data: - return (yield from self._compute.put(path, data=data, **kwargs)) + return (await self._compute.put(path, data=data, **kwargs)) else: - return (yield from self._compute.put(path, **kwargs)) + return (await self._compute.put(path, **kwargs)) - @asyncio.coroutine - def delete(self, path=None, **kwargs): + async def delete(self, path=None, **kwargs): """ HTTP post on the node """ if path is None: - return (yield from self._compute.delete("/projects/{}/{}/nodes/{}".format(self._project.id, self._node_type, self._id), **kwargs)) + return (await self._compute.delete("/projects/{}/{}/nodes/{}".format(self._project.id, self._node_type, self._id), **kwargs)) else: - return (yield from self._compute.delete("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), **kwargs)) + return (await self._compute.delete("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), **kwargs)) - @asyncio.coroutine - def _upload_missing_image(self, type, img): + async def _upload_missing_image(self, type, img): """ Search an image on local computer and upload it to remote compute if the image exists @@ -578,26 +566,24 @@ class Node: self.project.controller.notification.project_emit("log.info", {"message": "Uploading missing image {}".format(img)}) try: with open(image, 'rb') as f: - yield from self._compute.post("/{}/images/{}".format(self._node_type, os.path.basename(img)), data=f, timeout=None) + await self._compute.post("/{}/images/{}".format(self._node_type, os.path.basename(img)), data=f, timeout=None) except OSError as e: raise aiohttp.web.HTTPConflict(text="Can't upload {}: {}".format(image, str(e))) self.project.controller.notification.project_emit("log.info", {"message": "Upload finished for {}".format(img)}) return True return False - @asyncio.coroutine - def dynamips_auto_idlepc(self): + async def dynamips_auto_idlepc(self): """ Compute the idle PC for a dynamips node """ - return (yield from self._compute.get("/projects/{}/{}/nodes/{}/auto_idlepc".format(self._project.id, self._node_type, self._id), timeout=240)).json + return (await self._compute.get("/projects/{}/{}/nodes/{}/auto_idlepc".format(self._project.id, self._node_type, self._id), timeout=240)).json - @asyncio.coroutine - def dynamips_idlepc_proposals(self): + async def dynamips_idlepc_proposals(self): """ Compute a list of potential idle PC """ - return (yield from self._compute.get("/projects/{}/{}/nodes/{}/idlepc_proposals".format(self._project.id, self._node_type, self._id), timeout=240)).json + return (await self._compute.get("/projects/{}/{}/nodes/{}/idlepc_proposals".format(self._project.id, self._node_type, self._id), timeout=240)).json def get_port(self, adapter_number, port_number): """ diff --git a/gns3server/controller/notification.py b/gns3server/controller/notification.py index e05d89db..1e0ef4a6 100644 --- a/gns3server/controller/notification.py +++ b/gns3server/controller/notification.py @@ -88,8 +88,7 @@ class Notification: """ return project.id in self._project_listeners and len(self._project_listeners[project.id]) > 0 - @asyncio.coroutine - def dispatch(self, action, event, compute_id): + async def dispatch(self, action, event, compute_id): """ Notification received from compute node. Send it directly to clients or process it @@ -103,7 +102,7 @@ class Notification: # Update controller node data and send the event node.updated project = self._controller.get_project(event["project_id"]) node = project.get_node(event["node_id"]) - yield from node.parse_node_response(event) + await node.parse_node_response(event) self.project_emit("node.updated", node.__json__()) except (aiohttp.web.HTTPNotFound, aiohttp.web.HTTPForbidden): # Project closing diff --git a/gns3server/controller/project.py b/gns3server/controller/project.py index 18540082..457473f4 100644 --- a/gns3server/controller/project.py +++ b/gns3server/controller/project.py @@ -38,7 +38,6 @@ from ..utils.path import check_path_allowed, get_default_project_directory from ..utils.asyncio.pool import Pool from ..utils.asyncio import locking from ..utils.asyncio import wait_run_in_executor -from ..utils.asyncio import asyncio_ensure_future from .export_project import export_project from .import_project import import_project @@ -122,8 +121,7 @@ class Project: assert self._status != "closed" self.dump() - @asyncio.coroutine - def update(self, **kwargs): + async def update(self, **kwargs): """ Update the project :param kwargs: Project properties @@ -141,7 +139,7 @@ class Project: # update on computes for compute in list(self._project_created_on_compute): - yield from compute.put( + await compute.put( "/projects/{}".format(self._id), { "variables": self.variables } @@ -462,8 +460,7 @@ class Project: return new_name @open_required - @asyncio.coroutine - def add_node_from_appliance(self, appliance_id, x=0, y=0, compute_id=None): + async def add_node_from_appliance(self, appliance_id, x=0, y=0, compute_id=None): """ Create a node from an appliance """ @@ -481,12 +478,11 @@ class Project: default_name_format = template.pop("default_name_format", "{name}-{0}") name = default_name_format.replace("{name}", name) node_id = str(uuid.uuid4()) - node = yield from self.add_node(compute, name, node_id, node_type=node_type, appliance_id=appliance_id, **template) + node = await self.add_node(compute, name, node_id, node_type=node_type, appliance_id=appliance_id, **template) return node @open_required - @asyncio.coroutine - def add_node(self, compute, name, node_id, dump=True, node_type=None, **kwargs): + async def add_node(self, compute, name, node_id, dump=True, node_type=None, **kwargs): """ Create a node or return an existing node @@ -515,10 +511,10 @@ class Project: if self._variables: data["variables"] = self._variables - yield from compute.post("/projects", data=data) + await compute.post("/projects", data=data) self._project_created_on_compute.add(compute) - yield from node.create() + await node.create() self._nodes[node.id] = node self.controller.notification.project_emit("node.created", node.__json__()) if dump: @@ -526,8 +522,7 @@ class Project: return node @locking - @asyncio.coroutine - def __delete_node_links(self, node): + async def __delete_node_links(self, node): """ Delete all link connected to this node. @@ -536,16 +531,15 @@ class Project: """ for link in list(self._links.values()): if node in link.nodes: - yield from self.delete_link(link.id, force_delete=True) + await self.delete_link(link.id, force_delete=True) @open_required - @asyncio.coroutine - def delete_node(self, node_id): + async def delete_node(self, node_id): node = self.get_node(node_id) - yield from self.__delete_node_links(node) + await self.__delete_node_links(node) self.remove_allocated_node_name(node.name) del self._nodes[node.id] - yield from node.destroy() + await node.destroy() self.dump() self.controller.notification.project_emit("node.deleted", node.__json__()) @@ -602,8 +596,7 @@ class Project: return self._drawings @open_required - @asyncio.coroutine - def add_drawing(self, drawing_id=None, dump=True, **kwargs): + async def add_drawing(self, drawing_id=None, dump=True, **kwargs): """ Create an drawing or return an existing drawing @@ -630,16 +623,14 @@ class Project: raise aiohttp.web.HTTPNotFound(text="Drawing ID {} doesn't exist".format(drawing_id)) @open_required - @asyncio.coroutine - def delete_drawing(self, drawing_id): + async def delete_drawing(self, drawing_id): drawing = self.get_drawing(drawing_id) del self._drawings[drawing.id] self.dump() self.controller.notification.project_emit("drawing.deleted", drawing.__json__()) @open_required - @asyncio.coroutine - def add_link(self, link_id=None, dump=True): + async def add_link(self, link_id=None, dump=True): """ Create a link. By default the link is empty @@ -654,12 +645,11 @@ class Project: return link @open_required - @asyncio.coroutine - def delete_link(self, link_id, force_delete=False): + async def delete_link(self, link_id, force_delete=False): link = self.get_link(link_id) del self._links[link.id] try: - yield from link.delete() + await link.delete() except Exception: if force_delete is False: raise @@ -703,8 +693,7 @@ class Project: raise aiohttp.web.HTTPNotFound(text="Snapshot ID {} doesn't exist".format(snapshot_id)) @open_required - @asyncio.coroutine - def snapshot(self, name): + async def snapshot(self, name): """ Snapshot the project @@ -714,25 +703,23 @@ class Project: if name in [snap.name for snap in self._snapshots.values()]: raise aiohttp.web.HTTPConflict(text="The snapshot name {} already exists".format(name)) snapshot = Snapshot(self, name=name) - yield from snapshot.create() + await snapshot.create() self._snapshots[snapshot.id] = snapshot return snapshot @open_required - @asyncio.coroutine - def delete_snapshot(self, snapshot_id): + async def delete_snapshot(self, snapshot_id): snapshot = self.get_snapshot(snapshot_id) del self._snapshots[snapshot.id] os.remove(snapshot.path) - @asyncio.coroutine - def close(self, ignore_notification=False): + async def close(self, ignore_notification=False): if self._status == "closed": return - yield from self.stop_all() + await self.stop_all() for compute in list(self._project_created_on_compute): try: - yield from compute.post("/projects/{}/close".format(self._id), dont_connect=True) + await compute.post("/projects/{}/close".format(self._id), dont_connect=True) # We don't care if a compute is down at this step except (ComputeError, aiohttp.web.HTTPError, aiohttp.ClientResponseError, TimeoutError): pass @@ -771,30 +758,28 @@ class Project: except OSError as e: log.warning(str(e)) - @asyncio.coroutine - def delete(self): + async def delete(self): if self._status != "opened": try: - yield from self.open() + await self.open() except aiohttp.web.HTTPConflict as e: # ignore missing images or other conflicts when deleting a project log.warning("Conflict while deleting project: {}".format(e.text)) - yield from self.delete_on_computes() - yield from self.close() + await self.delete_on_computes() + await self.close() try: shutil.rmtree(self.path) except OSError as e: raise aiohttp.web.HTTPConflict(text="Can not delete project directory {}: {}".format(self.path, str(e))) - @asyncio.coroutine - def delete_on_computes(self): + async def delete_on_computes(self): """ Delete the project on computes but not on controller """ for compute in list(self._project_created_on_compute): if compute.id != "local": - yield from compute.delete("/projects/{}".format(self._id)) + await compute.delete("/projects/{}".format(self._id)) self._project_created_on_compute.remove(compute) @classmethod @@ -817,8 +802,7 @@ class Project: return os.path.join(self.path, self._filename) @locking - @asyncio.coroutine - def open(self): + async def open(self): """ Load topology elements """ @@ -862,19 +846,19 @@ class Project: topology = project_data["topology"] for compute in topology.get("computes", []): - yield from self.controller.add_compute(**compute) + await self.controller.add_compute(**compute) for node in topology.get("nodes", []): compute = self.controller.get_compute(node.pop("compute_id")) name = node.pop("name") node_id = node.pop("node_id", str(uuid.uuid4())) - yield from self.add_node(compute, name, node_id, dump=False, **node) + await self.add_node(compute, name, node_id, dump=False, **node) for link_data in topology.get("links", []): if 'link_id' not in link_data.keys(): # skip the link continue - link = yield from self.add_link(link_id=link_data["link_id"]) + link = await self.add_link(link_id=link_data["link_id"]) if "filters" in link_data: - yield from link.update_filters(link_data["filters"]) + await link.update_filters(link_data["filters"]) for node_link in link_data.get("nodes", []): node = self.get_node(node_link["node_id"]) port = node.get_port(node_link["adapter_number"], node_link["port_number"]) @@ -884,19 +868,19 @@ class Project: if port.link is not None: log.warning("Port {}/{} is already connected to link ID {}".format(node_link["adapter_number"], node_link["port_number"], port.link.id)) continue - yield from link.add_node(node, node_link["adapter_number"], node_link["port_number"], label=node_link.get("label"), dump=False) + await link.add_node(node, node_link["adapter_number"], node_link["port_number"], label=node_link.get("label"), dump=False) if len(link.nodes) != 2: # a link should have 2 attached nodes, this can happen with corrupted projects - yield from self.delete_link(link.id, force_delete=True) + await self.delete_link(link.id, force_delete=True) for drawing_data in topology.get("drawings", []): - yield from self.add_drawing(dump=False, **drawing_data) + await self.add_drawing(dump=False, **drawing_data) self.dump() # We catch all error to be able to rollback the .gns3 to the previous state except Exception as e: for compute in list(self._project_created_on_compute): try: - yield from compute.post("/projects/{}/close".format(self._id)) + await compute.post("/projects/{}/close".format(self._id)) # We don't care if a compute is down at this step except (ComputeError, aiohttp.web.HTTPNotFound, aiohttp.web.HTTPConflict, aiohttp.ServerDisconnectedError): pass @@ -922,15 +906,14 @@ class Project: # Start all in the background without waiting for completion # we ignore errors because we want to let the user open # their project and fix it - asyncio_ensure_future(self.start_all()) + asyncio.ensure_future(self.start_all()) - @asyncio.coroutine - def wait_loaded(self): + async def wait_loaded(self): """ Wait until the project finish loading """ while self._loading: - yield from asyncio.sleep(0.5) + await asyncio.sleep(0.5) def _create_duplicate_project_file(self, path, zipstream): """ @@ -941,8 +924,7 @@ class Project: for data in zipstream: f.write(data) - @asyncio.coroutine - def duplicate(self, name=None, location=None): + async def duplicate(self, name=None, location=None): """ Duplicate a project @@ -956,22 +938,22 @@ class Project: # If the project was not open we open it temporary previous_status = self._status if self._status == "closed": - yield from self.open() + await self.open() self.dump() assert self._status != "closed" try: with tempfile.TemporaryDirectory() as tmpdir: - zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True) + zipstream = await export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True) project_path = os.path.join(tmpdir, "project.gns3p") - yield from wait_run_in_executor(self._create_duplicate_project_file, project_path, zipstream) + await wait_run_in_executor(self._create_duplicate_project_file, project_path, zipstream) with open(project_path, "rb") as f: - project = yield from import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True) + project = await import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True) except (ValueError, OSError, UnicodeEncodeError) as e: raise aiohttp.web.HTTPConflict(text="Cannot duplicate project: {}".format(str(e))) if previous_status == "closed": - yield from self.close() + await self.close() return project @@ -999,38 +981,34 @@ class Project: except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not write topology: {}".format(e)) - @asyncio.coroutine - def start_all(self): + async def start_all(self): """ Start all nodes """ pool = Pool(concurrency=3) for node in self.nodes.values(): pool.append(node.start) - yield from pool.join() + await pool.join() - @asyncio.coroutine - def stop_all(self): + async def stop_all(self): """ Stop all nodes """ pool = Pool(concurrency=3) for node in self.nodes.values(): pool.append(node.stop) - yield from pool.join() + await pool.join() - @asyncio.coroutine - def suspend_all(self): + async def suspend_all(self): """ Suspend all nodes """ pool = Pool(concurrency=3) for node in self.nodes.values(): pool.append(node.suspend) - yield from pool.join() + await pool.join() - @asyncio.coroutine - def duplicate_node(self, node, x, y, z): + async def duplicate_node(self, node, x, y, z): """ Duplicate a node @@ -1061,21 +1039,21 @@ class Project: data['y'] = y data['z'] = z new_node_uuid = str(uuid.uuid4()) - new_node = yield from self.add_node( + new_node = await self.add_node( node.compute, node.name, new_node_uuid, node_type=node_type, **data) try: - yield from node.post("/duplicate", timeout=None, data={ + await node.post("/duplicate", timeout=None, data={ "destination_node_id": new_node_uuid }) except aiohttp.web.HTTPNotFound as e: - yield from self.delete_node(new_node_uuid) + await self.delete_node(new_node_uuid) raise aiohttp.web.HTTPConflict(text="This node type cannot be duplicated") except aiohttp.web.HTTPConflict as e: - yield from self.delete_node(new_node_uuid) + await self.delete_node(new_node_uuid) raise e return new_node diff --git a/gns3server/controller/snapshot.py b/gns3server/controller/snapshot.py index 512e23f5..c7321047 100644 --- a/gns3server/controller/snapshot.py +++ b/gns3server/controller/snapshot.py @@ -82,8 +82,7 @@ class Snapshot: for data in zipstream: f.write(data) - @asyncio.coroutine - def create(self): + async def create(self): """ Create the snapshot """ @@ -99,31 +98,30 @@ class Snapshot: try: with tempfile.TemporaryDirectory() as tmpdir: - zipstream = yield from export_project(self._project, tmpdir, keep_compute_id=True, allow_all_nodes=True) - yield from wait_run_in_executor(self._create_snapshot_file, zipstream) + zipstream = await export_project(self._project, tmpdir, keep_compute_id=True, allow_all_nodes=True) + await wait_run_in_executor(self._create_snapshot_file, zipstream) except (ValueError, OSError, RuntimeError) as e: raise aiohttp.web.HTTPConflict(text="Could not create snapshot file '{}': {}".format(self.path, e)) - @asyncio.coroutine - def restore(self): + async def restore(self): """ Restore the snapshot """ - yield from self._project.delete_on_computes() + await self._project.delete_on_computes() # We don't send close notification to clients because the close / open dance is purely internal - yield from self._project.close(ignore_notification=True) + await self._project.close(ignore_notification=True) try: # delete the current project files project_files_path = os.path.join(self._project.path, "project-files") if os.path.exists(project_files_path): - yield from wait_run_in_executor(shutil.rmtree, project_files_path) + await wait_run_in_executor(shutil.rmtree, project_files_path) with open(self._path, "rb") as f: - project = yield from import_project(self._project.controller, self._project.id, f, location=self._project.path) + project = await import_project(self._project.controller, self._project.id, f, location=self._project.path) except (OSError, PermissionError) as e: raise aiohttp.web.HTTPConflict(text=str(e)) - yield from project.open() + await project.open() self._project.controller.notification.project_emit("snapshot.restored", self.__json__()) return self._project diff --git a/gns3server/controller/udp_link.py b/gns3server/controller/udp_link.py index 9a190286..4a4ed070 100644 --- a/gns3server/controller/udp_link.py +++ b/gns3server/controller/udp_link.py @@ -37,8 +37,7 @@ class UDPLink(Link): """ return self._link_data - @asyncio.coroutine - def create(self): + async def create(self): """ Create the link on the nodes """ @@ -52,14 +51,14 @@ class UDPLink(Link): # Get an IP allowing communication between both host try: - (node1_host, node2_host) = yield from node1.compute.get_ip_on_same_subnet(node2.compute) + (node1_host, node2_host) = await node1.compute.get_ip_on_same_subnet(node2.compute) except ValueError as e: raise aiohttp.web.HTTPConflict(text="Cannot get an IP address on same subnet: {}".format(e)) # Reserve a UDP port on both side - response = yield from node1.compute.post("/projects/{}/ports/udp".format(self._project.id)) + response = await node1.compute.post("/projects/{}/ports/udp".format(self._project.id)) self._node1_port = response.json["udp_port"] - response = yield from node2.compute.post("/projects/{}/ports/udp".format(self._project.id)) + response = await node2.compute.post("/projects/{}/ports/udp".format(self._project.id)) self._node2_port = response.json["udp_port"] node1_filters = {} @@ -79,7 +78,7 @@ class UDPLink(Link): "filters": node1_filters, "suspend": self._suspended }) - yield from node1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120) + await node1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120) self._link_data.append({ "lport": self._node2_port, @@ -90,15 +89,14 @@ class UDPLink(Link): "suspend": self._suspended }) try: - yield from node2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=120) + await node2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=120) except Exception as e: # We clean the first NIO - yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120) + await node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120) raise e self._created = True - @asyncio.coroutine - def update(self): + async def update(self): """ Update the link on the nodes """ @@ -120,16 +118,15 @@ class UDPLink(Link): port_number1 = self._nodes[0]["port_number"] self._link_data[0]["filters"] = node1_filters self._link_data[0]["suspend"] = self._suspended - yield from node1.put("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120) + await node1.put("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120) adapter_number2 = self._nodes[1]["adapter_number"] port_number2 = self._nodes[1]["port_number"] self._link_data[1]["filters"] = node2_filters self._link_data[1]["suspend"] = self._suspended - yield from node2.put("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=221) + await node2.put("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=221) - @asyncio.coroutine - def delete(self): + async def delete(self): """ Delete the link and free the resources """ @@ -142,7 +139,7 @@ class UDPLink(Link): except IndexError: return try: - yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120) + await node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120) # If the node is already delete (user selected multiple element and delete all in the same time) except aiohttp.web.HTTPNotFound: pass @@ -154,14 +151,13 @@ class UDPLink(Link): except IndexError: return try: - yield from node2.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), timeout=120) + await node2.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), timeout=120) # If the node is already delete (user selected multiple element and delete all in the same time) except aiohttp.web.HTTPNotFound: pass - yield from super().delete() + await super().delete() - @asyncio.coroutine - def start_capture(self, data_link_type="DLT_EN10MB", capture_file_name=None): + async def start_capture(self, data_link_type="DLT_EN10MB", capture_file_name=None): """ Start capture on a link """ @@ -172,18 +168,17 @@ class UDPLink(Link): "capture_file_name": capture_file_name, "data_link_type": data_link_type } - yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/start_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]), data=data) - yield from super().start_capture(data_link_type=data_link_type, capture_file_name=capture_file_name) + await self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/start_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]), data=data) + await super().start_capture(data_link_type=data_link_type, capture_file_name=capture_file_name) - @asyncio.coroutine - def stop_capture(self): + async def stop_capture(self): """ Stop capture on a link """ if self._capture_node: - yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/stop_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"])) + await self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/stop_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"])) self._capture_node = None - yield from super().stop_capture() + await super().stop_capture() def _choose_capture_side(self): """ @@ -215,8 +210,7 @@ class UDPLink(Link): raise aiohttp.web.HTTPConflict(text="Cannot capture because there is no running device on this link") - @asyncio.coroutine - def read_pcap_from_source(self): + async def read_pcap_from_source(self): """ Return a FileStream of the Pcap from the compute node """ @@ -224,10 +218,9 @@ class UDPLink(Link): compute = self._capture_node["node"].compute return compute.stream_file(self._project, "tmp/captures/" + self._capture_file_name) - @asyncio.coroutine - def node_updated(self, node): + async def node_updated(self, node): """ Called when a node member of the link is updated """ if self._capture_node and node == self._capture_node["node"] and node.status != "started": - yield from self.stop_capture() + await self.stop_capture() diff --git a/gns3server/handlers/api/compute/atm_switch_handler.py b/gns3server/handlers/api/compute/atm_switch_handler.py index b57dbf42..5a9d01a3 100644 --- a/gns3server/handlers/api/compute/atm_switch_handler.py +++ b/gns3server/handlers/api/compute/atm_switch_handler.py @@ -48,11 +48,11 @@ class ATMSwitchHandler: description="Create a new ATM switch instance", input=ATM_SWITCH_CREATE_SCHEMA, output=ATM_SWITCH_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): # Use the Dynamips ATM switch to simulate this node dynamips_manager = Dynamips.instance() - node = yield from dynamips_manager.create_node(request.json.pop("name"), + node = await dynamips_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), node_type="atm_switch", @@ -90,9 +90,9 @@ class ATMSwitchHandler: 404: "Instance doesn't exist" }, description="Duplicate an atm switch instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from Dynamips.instance().duplicate_node( + new_node = await Dynamips.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) @@ -114,12 +114,12 @@ class ATMSwitchHandler: description="Update an ATM switch instance", input=ATM_SWITCH_UPDATE_SCHEMA, output=ATM_SWITCH_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) if "name" in request.json and node.name != request.json["name"]: - yield from node.set_name(request.json["name"]) + await node.set_name(request.json["name"]) if "mappings" in request.json: node.mappings = request.json["mappings"] node.updated() @@ -137,10 +137,10 @@ class ATMSwitchHandler: 404: "Instance doesn't exist" }, description="Delete an ATM switch instance") - def delete(request, response): + async def delete(request, response): dynamips_manager = Dynamips.instance() - yield from dynamips_manager.delete_node(request.match_info["node_id"]) + await dynamips_manager.delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -210,13 +210,13 @@ class ATMSwitchHandler: description="Add a NIO to an ATM switch instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - nio = yield from dynamips_manager.create_nio(node, request.json) + nio = await dynamips_manager.create_nio(node, request.json) port_number = int(request.match_info["port_number"]) - yield from node.add_nio(nio, port_number) + await node.add_nio(nio, port_number) response.set_status(201) response.json(nio) @@ -234,13 +234,13 @@ class ATMSwitchHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from an ATM switch instance") - def delete_nio(request, response): + async def delete_nio(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - nio = yield from node.remove_nio(port_number) - yield from nio.delete() + nio = await node.remove_nio(port_number) + await nio.delete() response.set_status(204) @Route.post( @@ -258,13 +258,13 @@ class ATMSwitchHandler: }, description="Start a packet capture on an ATM switch instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) pcap_file_path = os.path.join(node.project.capture_working_directory(), request.json["capture_file_name"]) - yield from node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) + await node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -281,10 +281,10 @@ class ATMSwitchHandler: 404: "Instance doesn't exist" }, description="Stop a packet capture on an ATM switch instance") - def stop_capture(request, response): + async def stop_capture(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from node.stop_capture(port_number) + await node.stop_capture(port_number) response.set_status(204) diff --git a/gns3server/handlers/api/compute/cloud_handler.py b/gns3server/handlers/api/compute/cloud_handler.py index f4bd1db3..60fcc7d1 100644 --- a/gns3server/handlers/api/compute/cloud_handler.py +++ b/gns3server/handlers/api/compute/cloud_handler.py @@ -49,10 +49,10 @@ class CloudHandler: description="Create a new cloud instance", input=CLOUD_CREATE_SCHEMA, output=CLOUD_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): builtin_manager = Builtin.instance() - node = yield from builtin_manager.create_node(request.json.pop("name"), + node = await builtin_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), node_type="cloud", @@ -123,10 +123,10 @@ class CloudHandler: 404: "Instance doesn't exist" }, description="Delete a cloud instance") - def delete(request, response): + async def delete(request, response): builtin_manager = Builtin.instance() - yield from builtin_manager.delete_node(request.match_info["node_id"]) + await builtin_manager.delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -141,10 +141,10 @@ class CloudHandler: 404: "Instance doesn't exist" }, description="Start a cloud") - def start(request, response): + async def start(request, response): node = Builtin.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from node.start() + await node.start() response.set_status(204) @Route.post( @@ -197,13 +197,13 @@ class CloudHandler: description="Add a NIO to a cloud instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) nio = builtin_manager.create_nio(request.json) port_number = int(request.match_info["port_number"]) - yield from node.add_nio(nio, port_number) + await node.add_nio(nio, port_number) response.set_status(201) response.json(nio) @@ -223,7 +223,7 @@ class CloudHandler: input=NIO_SCHEMA, output=NIO_SCHEMA, description="Update a NIO from a Cloud instance") - def update_nio(request, response): + async def update_nio(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -236,7 +236,7 @@ class CloudHandler: if "filters" in request.json and nio: nio.filters = request.json["filters"] - yield from node.update_nio(int(request.match_info["port_number"]), nio) + await node.update_nio(int(request.match_info["port_number"]), nio) response.set_status(201) response.json(request.json) @@ -254,12 +254,12 @@ class CloudHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a cloud instance") - def delete_nio(request, response): + async def delete_nio(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from node.remove_nio(port_number) + await node.remove_nio(port_number) response.set_status(204) @Route.post( @@ -277,13 +277,13 @@ class CloudHandler: }, description="Start a packet capture on a cloud instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) pcap_file_path = os.path.join(node.project.capture_working_directory(), request.json["capture_file_name"]) - yield from node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) + await node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -300,10 +300,10 @@ class CloudHandler: 404: "Instance doesn't exist" }, description="Stop a packet capture on a cloud instance") - def stop_capture(request, response): + async def stop_capture(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from node.stop_capture(port_number) + await node.stop_capture(port_number) response.set_status(204) diff --git a/gns3server/handlers/api/compute/docker_handler.py b/gns3server/handlers/api/compute/docker_handler.py index bacf7f89..af595acd 100644 --- a/gns3server/handlers/api/compute/docker_handler.py +++ b/gns3server/handlers/api/compute/docker_handler.py @@ -46,9 +46,9 @@ class DockerHandler: description="Create a new Docker container", input=DOCKER_CREATE_SCHEMA, output=DOCKER_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): docker_manager = Docker.instance() - container = yield from docker_manager.create_node(request.json.pop("name"), + container = await docker_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), image=request.json.pop("image"), @@ -82,10 +82,10 @@ class DockerHandler: 404: "Instance doesn't exist" }, description="Start a Docker container") - def start(request, response): + async def start(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from container.start() + await container.start() response.set_status(204) @Route.post( @@ -100,10 +100,10 @@ class DockerHandler: 404: "Instance doesn't exist" }, description="Stop a Docker container") - def stop(request, response): + async def stop(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from container.stop() + await container.stop() response.set_status(204) @Route.post( @@ -118,10 +118,10 @@ class DockerHandler: 404: "Instance doesn't exist" }, description="Suspend a Docker container") - def suspend(request, response): + async def suspend(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from container.pause() + await container.pause() response.set_status(204) @Route.post( @@ -136,10 +136,10 @@ class DockerHandler: 404: "Instance doesn't exist" }, description="Restart a Docker container") - def reload(request, response): + async def reload(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from container.restart() + await container.restart() response.set_status(204) @Route.delete( @@ -154,10 +154,10 @@ class DockerHandler: 404: "Instance doesn't exist" }, description="Delete a Docker container") - def delete(request, response): + async def delete(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from container.delete() + await container.delete() response.set_status(204) @Route.post( @@ -171,9 +171,9 @@ class DockerHandler: 404: "Instance doesn't exist" }, description="Duplicate a Docker instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from Docker.instance().duplicate_node( + new_node = await Docker.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) @@ -192,10 +192,10 @@ class DockerHandler: 404: "Instance doesn't exist" }, description="Pause a Docker container") - def pause(request, response): + async def pause(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from container.pause() + await container.pause() response.set_status(204) @Route.post( @@ -210,10 +210,10 @@ class DockerHandler: 404: "Instance doesn't exist" }, description="Unpause a Docker container") - def unpause(request, response): + async def unpause(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from container.unpause() + await container.unpause() response.set_status(204) @Route.post( @@ -232,14 +232,14 @@ class DockerHandler: description="Add a NIO to a Docker container", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) nio_type = request.json["type"] if nio_type != "nio_udp": raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type)) nio = docker_manager.create_nio(request.json) - yield from container.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio) + await container.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio) response.set_status(201) response.json(nio) @@ -259,14 +259,14 @@ class DockerHandler: input=NIO_SCHEMA, output=NIO_SCHEMA, description="Update a NIO from a Docker instance") - def update_nio(request, response): + async def update_nio(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) nio = container.ethernet_adapters[int(request.match_info["adapter_number"])].get_nio(0) if "filters" in request.json and nio: nio.filters = request.json["filters"] - yield from container.adapter_update_nio_binding(int(request.match_info["port_number"]), nio) + await container.adapter_update_nio_binding(int(request.match_info["port_number"]), nio) response.set_status(201) response.json(request.json) @@ -284,10 +284,10 @@ class DockerHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a Docker container") - def delete_nio(request, response): + async def delete_nio(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from container.adapter_remove_nio_binding(int(request.match_info["adapter_number"])) + await container.adapter_remove_nio_binding(int(request.match_info["adapter_number"])) response.set_status(204) @Route.put( @@ -305,7 +305,7 @@ class DockerHandler: description="Update a Docker instance", input=DOCKER_OBJECT_SCHEMA, output=DOCKER_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -323,7 +323,7 @@ class DockerHandler: changed = True # We don't call container.update for nothing because it will restart the container if changed: - yield from container.update() + await container.update() container.updated() response.json(container) @@ -343,14 +343,14 @@ class DockerHandler: }, description="Start a packet capture on a Docker container instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) adapter_number = int(request.match_info["adapter_number"]) pcap_file_path = os.path.join(container.project.capture_working_directory(), request.json["capture_file_name"]) - yield from container.start_capture(adapter_number, pcap_file_path) + await container.start_capture(adapter_number, pcap_file_path) response.json({"pcap_file_path": str(pcap_file_path)}) @Route.post( @@ -368,13 +368,13 @@ class DockerHandler: 409: "Container not started" }, description="Stop a packet capture on a Docker container instance") - def stop_capture(request, response): + async def stop_capture(request, response): docker_manager = Docker.instance() container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) adapter_number = int(request.match_info["adapter_number"]) - yield from container.stop_capture(adapter_number) + await container.stop_capture(adapter_number) response.set_status(204) @Route.get( @@ -384,7 +384,7 @@ class DockerHandler: }, output=DOCKER_LIST_IMAGES_SCHEMA, description="Get all available Docker images") - def show(request, response): + async def show(request, response): docker_manager = Docker.instance() - images = yield from docker_manager.list_images() + images = await docker_manager.list_images() response.json(images) diff --git a/gns3server/handlers/api/compute/dynamips_vm_handler.py b/gns3server/handlers/api/compute/dynamips_vm_handler.py index c3d170df..f1a021b1 100644 --- a/gns3server/handlers/api/compute/dynamips_vm_handler.py +++ b/gns3server/handlers/api/compute/dynamips_vm_handler.py @@ -62,14 +62,14 @@ class DynamipsVMHandler: description="Create a new Dynamips VM instance", input=VM_CREATE_SCHEMA, output=VM_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): dynamips_manager = Dynamips.instance() platform = request.json.pop("platform") default_chassis = None if platform in DEFAULT_CHASSIS: default_chassis = DEFAULT_CHASSIS[platform] - vm = yield from dynamips_manager.create_node(request.json.pop("name"), + vm = await dynamips_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), dynamips_id=request.json.get("dynamips_id"), @@ -79,7 +79,7 @@ class DynamipsVMHandler: aux=request.json.get("aux"), chassis=request.json.pop("chassis", default_chassis), node_type="dynamips") - yield from dynamips_manager.update_vm_settings(vm, request.json) + await dynamips_manager.update_vm_settings(vm, request.json) response.set_status(201) response.json(vm) @@ -117,11 +117,11 @@ class DynamipsVMHandler: description="Update a Dynamips VM instance", input=VM_UPDATE_SCHEMA, output=VM_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from dynamips_manager.update_vm_settings(vm, request.json) + await dynamips_manager.update_vm_settings(vm, request.json) vm.updated() response.json(vm) @@ -137,11 +137,11 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Delete a Dynamips VM instance") - def delete(request, response): + async def delete(request, response): # check the project_id exists ProjectManager.instance().get_project(request.match_info["project_id"]) - yield from Dynamips.instance().delete_node(request.match_info["node_id"]) + await Dynamips.instance().delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -156,15 +156,15 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Start a Dynamips VM instance") - def start(request, response): + async def start(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) try: - yield from dynamips_manager.ghost_ios_support(vm) + await dynamips_manager.ghost_ios_support(vm) except GeneratorExit: pass - yield from vm.start() + await vm.start() response.set_status(204) @Route.post( @@ -179,11 +179,11 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Stop a Dynamips VM instance") - def stop(request, response): + async def stop(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.stop() + await vm.stop() response.set_status(204) @Route.post( @@ -198,11 +198,11 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Suspend a Dynamips VM instance") - def suspend(request, response): + async def suspend(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.suspend() + await vm.suspend() response.set_status(204) @Route.post( @@ -217,11 +217,11 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Resume a suspended Dynamips VM instance") - def resume(request, response): + async def resume(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.resume() + await vm.resume() response.set_status(204) @Route.post( @@ -236,11 +236,11 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Reload a Dynamips VM instance") - def reload(request, response): + async def reload(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.reload() + await vm.reload() response.set_status(204) @Route.post( @@ -259,14 +259,14 @@ class DynamipsVMHandler: description="Add a NIO to a Dynamips VM instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - nio = yield from dynamips_manager.create_nio(vm, request.json) + nio = await dynamips_manager.create_nio(vm, request.json) slot_number = int(request.match_info["adapter_number"]) port_number = int(request.match_info["port_number"]) - yield from vm.slot_add_nio_binding(slot_number, port_number, nio) + await vm.slot_add_nio_binding(slot_number, port_number, nio) response.set_status(201) response.json(nio) @@ -286,7 +286,7 @@ class DynamipsVMHandler: input=NIO_SCHEMA, output=NIO_SCHEMA, description="Update a NIO from a Dynamips instance") - def update_nio(request, response): + async def update_nio(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -295,7 +295,7 @@ class DynamipsVMHandler: nio = vm.slots[slot_number].get_nio(port_number) if "filters" in request.json and nio: nio.filters = request.json["filters"] - yield from vm.slot_update_nio_binding(slot_number, port_number, nio) + await vm.slot_update_nio_binding(slot_number, port_number, nio) response.set_status(201) response.json(request.json) @@ -313,14 +313,14 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a Dynamips VM instance") - def delete_nio(request, response): + async def delete_nio(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) slot_number = int(request.match_info["adapter_number"]) port_number = int(request.match_info["port_number"]) - nio = yield from vm.slot_remove_nio_binding(slot_number, port_number) - yield from nio.delete() + nio = await vm.slot_remove_nio_binding(slot_number, port_number) + await nio.delete() response.set_status(204) @Route.post( @@ -338,7 +338,7 @@ class DynamipsVMHandler: }, description="Start a packet capture on a Dynamips VM instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -353,7 +353,7 @@ class DynamipsVMHandler: except UnicodeEncodeError: raise DynamipsError('The capture file path "{}" must only contain ASCII (English) characters'.format(pcap_file_path)) - yield from vm.start_capture(slot_number, port_number, pcap_file_path, request.json["data_link_type"]) + await vm.start_capture(slot_number, port_number, pcap_file_path, request.json["data_link_type"]) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -370,13 +370,13 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Stop a packet capture on a Dynamips VM instance") - def stop_capture(request, response): + async def stop_capture(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) slot_number = int(request.match_info["adapter_number"]) port_number = int(request.match_info["port_number"]) - yield from vm.stop_capture(slot_number, port_number) + await vm.stop_capture(slot_number, port_number) response.set_status(204) @Route.get( @@ -391,12 +391,12 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Retrieve the idlepc proposals") - def get_idlepcs(request, response): + async def get_idlepcs(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.set_idlepc("0x0") - idlepcs = yield from vm.get_idle_pc_prop() + await vm.set_idlepc("0x0") + idlepcs = await vm.get_idle_pc_prop() response.set_status(200) response.json(idlepcs) @@ -412,11 +412,11 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Retrieve the idlepc proposals") - def get_auto_idlepc(request, response): + async def get_auto_idlepc(request, response): dynamips_manager = Dynamips.instance() vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - idlepc = yield from dynamips_manager.auto_idlepc(vm) + idlepc = await dynamips_manager.auto_idlepc(vm) response.set_status(200) response.json({"idlepc": idlepc}) @@ -427,10 +427,10 @@ class DynamipsVMHandler: }, description="Retrieve the list of Dynamips IOS images", output=NODE_LIST_IMAGES_SCHEMA) - def list_images(request, response): + async def list_images(request, response): dynamips_manager = Dynamips.instance() - images = yield from dynamips_manager.list_images() + images = await dynamips_manager.list_images() response.set_status(200) response.json(images) @@ -444,10 +444,10 @@ class DynamipsVMHandler: }, raw=True, description="Upload a Dynamips IOS image") - def upload_image(request, response): + async def upload_image(request, response): dynamips_manager = Dynamips.instance() - yield from dynamips_manager.write_image(request.match_info["filename"], request.content) + await dynamips_manager.write_image(request.match_info["filename"], request.content) response.set_status(204) @Route.get( @@ -460,7 +460,7 @@ class DynamipsVMHandler: }, raw=True, description="Download a Dynamips IOS image") - def download_image(request, response): + async def download_image(request, response): filename = request.match_info["filename"] dynamips_manager = Dynamips.instance() @@ -470,7 +470,7 @@ class DynamipsVMHandler: if filename[0] == ".": raise aiohttp.web.HTTPForbidden() - yield from response.file(image_path) + await response.file(image_path) @Route.post( r"/projects/{project_id}/dynamips/nodes/{node_id}/duplicate", @@ -483,9 +483,9 @@ class DynamipsVMHandler: 404: "Instance doesn't exist" }, description="Duplicate a dynamips instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from Dynamips.instance().duplicate_node( + new_node = await Dynamips.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) diff --git a/gns3server/handlers/api/compute/ethernet_hub_handler.py b/gns3server/handlers/api/compute/ethernet_hub_handler.py index 77369567..54d051b4 100644 --- a/gns3server/handlers/api/compute/ethernet_hub_handler.py +++ b/gns3server/handlers/api/compute/ethernet_hub_handler.py @@ -48,11 +48,11 @@ class EthernetHubHandler: description="Create a new Ethernet hub instance", input=ETHERNET_HUB_CREATE_SCHEMA, output=ETHERNET_HUB_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): # Use the Dynamips Ethernet hub to simulate this node dynamips_manager = Dynamips.instance() - node = yield from dynamips_manager.create_node(request.json.pop("name"), + node = await dynamips_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), node_type="ethernet_hub", @@ -92,9 +92,9 @@ class EthernetHubHandler: 404: "Instance doesn't exist" }, description="Duplicate an ethernet hub instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from Dynamips.instance().duplicate_node( + new_node = await Dynamips.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) @@ -116,12 +116,12 @@ class EthernetHubHandler: description="Update an Ethernet hub instance", input=ETHERNET_HUB_UPDATE_SCHEMA, output=ETHERNET_HUB_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) if "name" in request.json and node.name != request.json["name"]: - yield from node.set_name(request.json["name"]) + await node.set_name(request.json["name"]) if "ports_mapping" in request.json: node.ports_mapping = request.json["ports_mapping"] @@ -140,10 +140,10 @@ class EthernetHubHandler: 404: "Instance doesn't exist" }, description="Delete an Ethernet hub instance") - def delete(request, response): + async def delete(request, response): dynamips_manager = Dynamips.instance() - yield from dynamips_manager.delete_node(request.match_info["node_id"]) + await dynamips_manager.delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -213,13 +213,13 @@ class EthernetHubHandler: description="Add a NIO to an Ethernet hub instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - nio = yield from dynamips_manager.create_nio(node, request.json) + nio = await dynamips_manager.create_nio(node, request.json) port_number = int(request.match_info["port_number"]) - yield from node.add_nio(nio, port_number) + await node.add_nio(nio, port_number) response.set_status(201) response.json(nio) @@ -238,13 +238,13 @@ class EthernetHubHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from an Ethernet hub instance") - def delete_nio(request, response): + async def delete_nio(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - nio = yield from node.remove_nio(port_number) - yield from nio.delete() + nio = await node.remove_nio(port_number) + await nio.delete() response.set_status(204) @Route.post( @@ -262,13 +262,13 @@ class EthernetHubHandler: }, description="Start a packet capture on an Ethernet hub instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) pcap_file_path = os.path.join(node.project.capture_working_directory(), request.json["capture_file_name"]) - yield from node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) + await node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -285,10 +285,10 @@ class EthernetHubHandler: 404: "Instance doesn't exist" }, description="Stop a packet capture on an Ethernet hub instance") - def stop_capture(request, response): + async def stop_capture(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from node.stop_capture(port_number) + await node.stop_capture(port_number) response.set_status(204) diff --git a/gns3server/handlers/api/compute/ethernet_switch_handler.py b/gns3server/handlers/api/compute/ethernet_switch_handler.py index 01aa69c0..bd7fee9a 100644 --- a/gns3server/handlers/api/compute/ethernet_switch_handler.py +++ b/gns3server/handlers/api/compute/ethernet_switch_handler.py @@ -48,11 +48,11 @@ class EthernetSwitchHandler: description="Create a new Ethernet switch instance", input=ETHERNET_SWITCH_CREATE_SCHEMA, output=ETHERNET_SWITCH_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): # Use the Dynamips Ethernet switch to simulate this node dynamips_manager = Dynamips.instance() - node = yield from dynamips_manager.create_node(request.json.pop("name"), + node = await dynamips_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), console=request.json.get("console"), @@ -62,7 +62,7 @@ class EthernetSwitchHandler: # On Linux, use the generic switch # builtin_manager = Builtin.instance() - # node = yield from builtin_manager.create_node(request.json.pop("name"), + # node = await builtin_manager.create_node(request.json.pop("name"), # request.match_info["project_id"], # request.json.get("node_id"), # node_type="ethernet_switch") @@ -103,9 +103,9 @@ class EthernetSwitchHandler: 404: "Instance doesn't exist" }, description="Duplicate an ethernet switch instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from Dynamips.instance().duplicate_node( + new_node = await Dynamips.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) @@ -127,15 +127,15 @@ class EthernetSwitchHandler: description="Update an Ethernet switch instance", input=ETHERNET_SWITCH_UPDATE_SCHEMA, output=ETHERNET_SWITCH_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) if "name" in request.json and node.name != request.json["name"]: - yield from node.set_name(request.json["name"]) + await node.set_name(request.json["name"]) if "ports_mapping" in request.json: node.ports_mapping = request.json["ports_mapping"] - yield from node.update_port_settings() + await node.update_port_settings() if "console_type" in request.json: node.console_type = request.json["console_type"] @@ -157,10 +157,10 @@ class EthernetSwitchHandler: 404: "Instance doesn't exist" }, description="Delete an Ethernet switch instance") - def delete(request, response): + async def delete(request, response): dynamips_manager = Dynamips.instance() - yield from dynamips_manager.delete_node(request.match_info["node_id"]) + await dynamips_manager.delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -230,17 +230,17 @@ class EthernetSwitchHandler: description="Add a NIO to an Ethernet switch instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - nio = yield from dynamips_manager.create_nio(node, request.json) + nio = await dynamips_manager.create_nio(node, request.json) port_number = int(request.match_info["port_number"]) - yield from node.add_nio(nio, port_number) + await node.add_nio(nio, port_number) #builtin_manager = Builtin.instance() #node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - #nio = yield from builtin_manager.create_nio(request.json["nio"]) + #nio = await builtin_manager.create_nio(request.json["nio"]) response.set_status(201) response.json(nio) @@ -259,15 +259,15 @@ class EthernetSwitchHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from an Ethernet switch instance") - def delete_nio(request, response): + async def delete_nio(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) #builtin_manager = Builtin.instance() #node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - nio = yield from node.remove_nio(port_number) - yield from nio.delete() + nio = await node.remove_nio(port_number) + await nio.delete() response.set_status(204) @Route.post( @@ -285,7 +285,7 @@ class EthernetSwitchHandler: }, description="Start a packet capture on an Ethernet switch instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -293,7 +293,7 @@ class EthernetSwitchHandler: #node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) pcap_file_path = os.path.join(node.project.capture_working_directory(), request.json["capture_file_name"]) - yield from node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) + await node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -310,12 +310,12 @@ class EthernetSwitchHandler: 404: "Instance doesn't exist" }, description="Stop a packet capture on an Ethernet switch instance") - def stop_capture(request, response): + async def stop_capture(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) #builtin_manager = Builtin.instance() #node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from node.stop_capture(port_number) + await node.stop_capture(port_number) response.set_status(204) diff --git a/gns3server/handlers/api/compute/frame_relay_switch_handler.py b/gns3server/handlers/api/compute/frame_relay_switch_handler.py index fac4ebc5..24a903ec 100644 --- a/gns3server/handlers/api/compute/frame_relay_switch_handler.py +++ b/gns3server/handlers/api/compute/frame_relay_switch_handler.py @@ -48,11 +48,11 @@ class FrameRelaySwitchHandler: description="Create a new Frame Relay switch instance", input=FRAME_RELAY_SWITCH_CREATE_SCHEMA, output=FRAME_RELAY_SWITCH_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): # Use the Dynamips Frame Relay switch to simulate this node dynamips_manager = Dynamips.instance() - node = yield from dynamips_manager.create_node(request.json.pop("name"), + node = await dynamips_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), node_type="frame_relay_switch", @@ -90,9 +90,9 @@ class FrameRelaySwitchHandler: 404: "Instance doesn't exist" }, description="Duplicate a frame relay switch instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from Dynamips.instance().duplicate_node( + new_node = await Dynamips.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) @@ -114,12 +114,12 @@ class FrameRelaySwitchHandler: description="Update a Frame Relay switch instance", input=FRAME_RELAY_SWITCH_UPDATE_SCHEMA, output=FRAME_RELAY_SWITCH_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) if "name" in request.json and node.name != request.json["name"]: - yield from node.set_name(request.json["name"]) + await node.set_name(request.json["name"]) if "mappings" in request.json: node.mappings = request.json["mappings"] node.updated() @@ -137,10 +137,10 @@ class FrameRelaySwitchHandler: 404: "Instance doesn't exist" }, description="Delete a Frame Relay switch instance") - def delete(request, response): + async def delete(request, response): dynamips_manager = Dynamips.instance() - yield from dynamips_manager.delete_node(request.match_info["node_id"]) + await dynamips_manager.delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -210,13 +210,13 @@ class FrameRelaySwitchHandler: description="Add a NIO to a Frame Relay switch instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - nio = yield from dynamips_manager.create_nio(node, request.json) + nio = await dynamips_manager.create_nio(node, request.json) port_number = int(request.match_info["port_number"]) - yield from node.add_nio(nio, port_number) + await node.add_nio(nio, port_number) response.set_status(201) response.json(nio) @@ -234,13 +234,13 @@ class FrameRelaySwitchHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a Frame Relay switch instance") - def delete_nio(request, response): + async def delete_nio(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - nio = yield from node.remove_nio(port_number) - yield from nio.delete() + nio = await node.remove_nio(port_number) + await nio.delete() response.set_status(204) @Route.post( @@ -258,13 +258,13 @@ class FrameRelaySwitchHandler: }, description="Start a packet capture on a Frame Relay switch instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) pcap_file_path = os.path.join(node.project.capture_working_directory(), request.json["capture_file_name"]) - yield from node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) + await node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -281,10 +281,10 @@ class FrameRelaySwitchHandler: 404: "Instance doesn't exist" }, description="Stop a packet capture on a Frame Relay switch instance") - def stop_capture(request, response): + async def stop_capture(request, response): dynamips_manager = Dynamips.instance() node = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from node.stop_capture(port_number) + await node.stop_capture(port_number) response.set_status(204) diff --git a/gns3server/handlers/api/compute/iou_handler.py b/gns3server/handlers/api/compute/iou_handler.py index 0a64ad2a..b5583033 100644 --- a/gns3server/handlers/api/compute/iou_handler.py +++ b/gns3server/handlers/api/compute/iou_handler.py @@ -54,10 +54,10 @@ class IOUHandler: description="Create a new IOU instance", input=IOU_CREATE_SCHEMA, output=IOU_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): iou = IOU.instance() - vm = yield from iou.create_node(request.json.pop("name"), + vm = await iou.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), path=request.json.get("path"), @@ -112,7 +112,7 @@ class IOUHandler: description="Update an IOU instance", input=IOU_OBJECT_SCHEMA, output=IOU_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): iou_manager = IOU.instance() vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -126,7 +126,7 @@ class IOUHandler: if vm.use_default_iou_values: # update the default IOU values in case the image or use_default_iou_values have changed # this is important to have the correct NVRAM amount in order to correctly push the configs to the NVRAM - yield from vm.update_default_iou_values() + await vm.update_default_iou_values() vm.updated() response.json(vm) @@ -142,9 +142,9 @@ class IOUHandler: 404: "Instance doesn't exist" }, description="Delete an IOU instance") - def delete(request, response): + async def delete(request, response): - yield from IOU.instance().delete_node(request.match_info["node_id"]) + await IOU.instance().delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -158,9 +158,9 @@ class IOUHandler: 404: "Instance doesn't exist" }, description="Duplicate a IOU instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from IOU.instance().duplicate_node( + new_node = await IOU.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) @@ -181,7 +181,7 @@ class IOUHandler: input=IOU_START_SCHEMA, output=IOU_OBJECT_SCHEMA, description="Start an IOU instance") - def start(request, response): + async def start(request, response): iou_manager = IOU.instance() vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -190,7 +190,7 @@ class IOUHandler: if hasattr(vm, name) and getattr(vm, name) != value: setattr(vm, name, value) - yield from vm.start() + await vm.start() response.json(vm) @Route.post( @@ -205,11 +205,11 @@ class IOUHandler: 404: "Instance doesn't exist" }, description="Stop an IOU instance") - def stop(request, response): + async def stop(request, response): iou_manager = IOU.instance() vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.stop() + await vm.stop() response.set_status(204) @Route.post( @@ -242,11 +242,11 @@ class IOUHandler: 404: "Instance doesn't exist" }, description="Reload an IOU instance") - def reload(request, response): + async def reload(request, response): iou_manager = IOU.instance() vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.reload() + await vm.reload() response.set_status(204) @Route.post( @@ -265,7 +265,7 @@ class IOUHandler: description="Add a NIO to a IOU instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): iou_manager = IOU.instance() vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -273,7 +273,7 @@ class IOUHandler: if nio_type not in ("nio_udp", "nio_tap", "nio_ethernet", "nio_generic_ethernet"): raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_type)) nio = iou_manager.create_nio(request.json) - yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio) + await vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio) response.set_status(201) response.json(nio) @@ -293,7 +293,7 @@ class IOUHandler: description="Update a NIO from a IOU instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def update_nio(request, response): + async def update_nio(request, response): iou_manager = IOU.instance() vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -302,7 +302,7 @@ class IOUHandler: nio = vm.adapters[adapter_number].get_nio(port_number) if "filters" in request.json and nio: nio.filters = request.json["filters"] - yield from vm.adapter_update_nio_binding( + await vm.adapter_update_nio_binding( adapter_number, port_number, nio) @@ -323,11 +323,11 @@ class IOUHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a IOU instance") - def delete_nio(request, response): + async def delete_nio(request, response): iou_manager = IOU.instance() vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"])) + await vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"])) response.set_status(204) @Route.post( @@ -346,14 +346,14 @@ class IOUHandler: }, description="Start a packet capture on an IOU VM instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): iou_manager = IOU.instance() vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) adapter_number = int(request.match_info["adapter_number"]) port_number = int(request.match_info["port_number"]) pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"]) - yield from vm.start_capture(adapter_number, port_number, pcap_file_path, request.json["data_link_type"]) + await vm.start_capture(adapter_number, port_number, pcap_file_path, request.json["data_link_type"]) response.json({"pcap_file_path": str(pcap_file_path)}) @Route.post( @@ -371,14 +371,14 @@ class IOUHandler: 409: "VM not started" }, description="Stop a packet capture on an IOU VM instance") - def stop_capture(request, response): + async def stop_capture(request, response): iou_manager = IOU.instance() vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) adapter_number = int(request.match_info["adapter_number"]) port_number = int(request.match_info["port_number"]) - yield from vm.stop_capture(adapter_number, port_number) + await vm.stop_capture(adapter_number, port_number) response.set_status(204) @Route.get( @@ -388,10 +388,10 @@ class IOUHandler: }, description="Retrieve the list of IOU images", output=NODE_LIST_IMAGES_SCHEMA) - def list_iou_images(request, response): + async def list_iou_images(request, response): iou_manager = IOU.instance() - images = yield from iou_manager.list_images() + images = await iou_manager.list_images() response.set_status(200) response.json(images) @@ -405,10 +405,10 @@ class IOUHandler: }, raw=True, description="Upload an IOU image") - def upload_image(request, response): + async def upload_image(request, response): iou_manager = IOU.instance() - yield from iou_manager.write_image(request.match_info["filename"], request.content) + await iou_manager.write_image(request.match_info["filename"], request.content) response.set_status(204) @@ -422,7 +422,7 @@ class IOUHandler: }, raw=True, description="Download an IOU image") - def download_image(request, response): + async def download_image(request, response): filename = request.match_info["filename"] iou_manager = IOU.instance() @@ -432,4 +432,4 @@ class IOUHandler: if filename[0] == ".": raise aiohttp.web.HTTPForbidden() - yield from response.file(image_path) + await response.file(image_path) diff --git a/gns3server/handlers/api/compute/nat_handler.py b/gns3server/handlers/api/compute/nat_handler.py index 21487c1d..45e6f21f 100644 --- a/gns3server/handlers/api/compute/nat_handler.py +++ b/gns3server/handlers/api/compute/nat_handler.py @@ -48,10 +48,10 @@ class NatHandler: description="Create a new nat instance", input=NAT_CREATE_SCHEMA, output=NAT_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): builtin_manager = Builtin.instance() - node = yield from builtin_manager.create_node(request.json.pop("name"), + node = await builtin_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), node_type="nat", @@ -115,10 +115,10 @@ class NatHandler: 404: "Instance doesn't exist" }, description="Delete a nat instance") - def delete(request, response): + async def delete(request, response): builtin_manager = Builtin.instance() - yield from builtin_manager.delete_node(request.match_info["node_id"]) + await builtin_manager.delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -188,13 +188,13 @@ class NatHandler: description="Add a NIO to a nat instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) nio = builtin_manager.create_nio(request.json) port_number = int(request.match_info["port_number"]) - yield from node.add_nio(nio, port_number) + await node.add_nio(nio, port_number) response.set_status(201) response.json(nio) @@ -214,14 +214,14 @@ class NatHandler: input=NIO_SCHEMA, output=NIO_SCHEMA, description="Update a NIO from a NAT instance") - def update_nio(request, response): + async def update_nio(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) nio = node.nios[int(request.match_info["adapter_number"])] if "filters" in request.json and nio: nio.filters = request.json["filters"] - yield from node.update_nio(int(request.match_info["port_number"]), nio) + await node.update_nio(int(request.match_info["port_number"]), nio) response.set_status(201) response.json(request.json) @@ -239,12 +239,12 @@ class NatHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a nat instance") - def delete_nio(request, response): + async def delete_nio(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from node.remove_nio(port_number) + await node.remove_nio(port_number) response.set_status(204) @Route.post( @@ -262,13 +262,13 @@ class NatHandler: }, description="Start a packet capture on a nat instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) pcap_file_path = os.path.join(node.project.capture_working_directory(), request.json["capture_file_name"]) - yield from node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) + await node.start_capture(port_number, pcap_file_path, request.json["data_link_type"]) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -285,10 +285,10 @@ class NatHandler: 404: "Instance doesn't exist" }, description="Stop a packet capture on a nat instance") - def stop_capture(request, response): + async def stop_capture(request, response): builtin_manager = Builtin.instance() node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from node.stop_capture(port_number) + await node.stop_capture(port_number) response.set_status(204) diff --git a/gns3server/handlers/api/compute/notification_handler.py b/gns3server/handlers/api/compute/notification_handler.py index d41bcaa9..b8157921 100644 --- a/gns3server/handlers/api/compute/notification_handler.py +++ b/gns3server/handlers/api/compute/notification_handler.py @@ -20,16 +20,14 @@ import aiohttp from aiohttp.web import WebSocketResponse from gns3server.web.route import Route from gns3server.compute.notification_manager import NotificationManager -from gns3server.utils.asyncio import asyncio_ensure_future -@asyncio.coroutine -def process_websocket(ws): +async def process_websocket(ws): """ Process ping / pong and close message """ try: - yield from ws.receive() + await ws.receive() except aiohttp.WSServerHandshakeError: pass @@ -39,17 +37,17 @@ class NotificationHandler: @Route.get( r"/notifications/ws", description="Send notifications using Websockets") - def notifications(request, response): + async def notifications(request, response): notifications = NotificationManager.instance() ws = WebSocketResponse() - yield from ws.prepare(request) + await ws.prepare(request) - asyncio_ensure_future(process_websocket(ws)) + asyncio.ensure_future(process_websocket(ws)) with notifications.queue() as queue: while True: try: - notification = yield from queue.get_json(1) + notification = await queue.get_json(1) except asyncio.futures.CancelledError: break if ws.closed: diff --git a/gns3server/handlers/api/compute/project_handler.py b/gns3server/handlers/api/compute/project_handler.py index 18042f29..d18eeb6d 100644 --- a/gns3server/handlers/api/compute/project_handler.py +++ b/gns3server/handlers/api/compute/project_handler.py @@ -88,11 +88,11 @@ class ProjectHandler: }, output=PROJECT_OBJECT_SCHEMA, input=PROJECT_UPDATE_SCHEMA) - def update_project(request, response): + async def update_project(request, response): pm = ProjectManager.instance() project = pm.get_project(request.match_info["project_id"]) - yield from project.update( + await project.update( variables=request.json.get("variables", None) ) response.set_status(200) @@ -125,12 +125,12 @@ class ProjectHandler: 204: "Project closed", 404: "The project doesn't exist" }) - def close(request, response): + async def close(request, response): pm = ProjectManager.instance() project = pm.get_project(request.match_info["project_id"]) if ProjectHandler._notifications_listening.setdefault(project.id, 0) <= 1: - yield from project.close() + await project.close() pm.remove_project(project.id) try: del ProjectHandler._notifications_listening[project.id] @@ -150,11 +150,11 @@ class ProjectHandler: 204: "Changes have been written on disk", 404: "The project doesn't exist" }) - def delete(request, response): + async def delete(request, response): pm = ProjectManager.instance() project = pm.get_project(request.match_info["project_id"]) - yield from project.delete() + await project.delete() pm.remove_project(project.id) response.set_status(204) @@ -168,7 +168,7 @@ class ProjectHandler: 200: "End of stream", 404: "The project doesn't exist" }) - def notification(request, response): + async def notification(request, response): pm = ProjectManager.instance() project = pm.get_project(request.match_info["project_id"]) @@ -184,7 +184,7 @@ class ProjectHandler: response.write("{}\n".format(json.dumps(ProjectHandler._getPingMessage())).encode("utf-8")) while True: try: - (action, msg) = yield from asyncio.wait_for(queue.get(), 5) + (action, msg) = await asyncio.wait_for(queue.get(), 5) if hasattr(msg, "__json__"): msg = json.dumps({"action": action, "event": msg.__json__()}, sort_keys=True) else: @@ -219,11 +219,11 @@ class ProjectHandler: 404: "The project doesn't exist" }, output=PROJECT_FILE_LIST_SCHEMA) - def list_files(request, response): + async def list_files(request, response): pm = ProjectManager.instance() project = pm.get_project(request.match_info["project_id"]) - files = yield from project.list_files() + files = await project.list_files() response.json(files) response.set_status(200) @@ -238,7 +238,7 @@ class ProjectHandler: 403: "Permission denied", 404: "The file doesn't exist" }) - def get_file(request, response): + async def get_file(request, response): pm = ProjectManager.instance() project = pm.get_project(request.match_info["project_id"]) @@ -256,12 +256,12 @@ class ProjectHandler: try: with open(path, "rb") as f: - yield from response.prepare(request) + await response.prepare(request) while True: data = f.read(4096) if not data: break - yield from response.write(data) + await response.write(data) except FileNotFoundError: raise aiohttp.web.HTTPNotFound() @@ -279,7 +279,7 @@ class ProjectHandler: 403: "Permission denied", 404: "The file doesn't exist" }) - def stream_file(request, response): + async def stream_file(request, response): pm = ProjectManager.instance() project = pm.get_project(request.match_info["project_id"]) @@ -297,12 +297,12 @@ class ProjectHandler: try: with open(path, "rb") as f: - yield from response.prepare(request) + await response.prepare(request) while True: data = f.read(4096) if not data: - yield from asyncio.sleep(0.1) - yield from response.write(data) + await asyncio.sleep(0.1) + await response.write(data) except FileNotFoundError: raise aiohttp.web.HTTPNotFound() @@ -321,7 +321,7 @@ class ProjectHandler: 403: "Permission denied", 404: "The path doesn't exist" }) - def write_file(request, response): + async def write_file(request, response): pm = ProjectManager.instance() project = pm.get_project(request.match_info["project_id"]) @@ -340,7 +340,7 @@ class ProjectHandler: with open(path, 'wb+') as f: while True: try: - chunk = yield from request.content.read(1024) + chunk = await request.content.read(1024) except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path)) if not chunk: @@ -363,21 +363,21 @@ class ProjectHandler: 200: "File returned", 404: "The project doesn't exist" }) - def export_project(request, response): + async def export_project(request, response): pm = ProjectManager.instance() project = pm.get_project(request.match_info["project_id"]) response.content_type = 'application/gns3project' response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name) response.enable_chunked_encoding() - yield from response.prepare(request) + await response.prepare(request) include_images = bool(int(request.json.get("include_images", "0"))) for data in project.export(include_images=include_images): response.write(data) - yield from response.drain() + await response.drain() - yield from response.write_eof() + await response.write_eof() @Route.post( r"/projects/{project_id}/import", @@ -391,7 +391,7 @@ class ProjectHandler: 200: "Project imported", 403: "Forbidden to import project" }) - def import_project(request, response): + async def import_project(request, response): pm = ProjectManager.instance() project_id = request.match_info["project_id"] @@ -403,7 +403,7 @@ class ProjectHandler: try: with tempfile.SpooledTemporaryFile(max_size=10000) as temp: while True: - chunk = yield from request.content.read(1024) + chunk = await request.content.read(1024) if not chunk: break temp.write(chunk) diff --git a/gns3server/handlers/api/compute/qemu_handler.py b/gns3server/handlers/api/compute/qemu_handler.py index 9d406bf5..41aba44d 100644 --- a/gns3server/handlers/api/compute/qemu_handler.py +++ b/gns3server/handlers/api/compute/qemu_handler.py @@ -63,10 +63,10 @@ class QEMUHandler: description="Create a new Qemu VM instance", input=QEMU_CREATE_SCHEMA, output=QEMU_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): qemu = Qemu.instance() - vm = yield from qemu.create_node(request.json.pop("name"), + vm = await qemu.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.pop("node_id", None), linked_clone=request.json.get("linked_clone", True), @@ -141,9 +141,9 @@ class QEMUHandler: 404: "Instance doesn't exist" }, description="Delete a Qemu VM instance") - def delete(request, response): + async def delete(request, response): - yield from Qemu.instance().delete_node(request.match_info["node_id"]) + await Qemu.instance().delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -157,9 +157,9 @@ class QEMUHandler: 404: "Instance doesn't exist" }, description="Duplicate a Qemu instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from Qemu.instance().duplicate_node( + new_node = await Qemu.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) @@ -178,11 +178,11 @@ class QEMUHandler: }, description="Resize a Qemu VM disk image", input=QEMU_RESIZE_SCHEMA) - def resize_disk(request, response): + async def resize_disk(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.resize_disk(request.json["drive_name"], request.json["extend"]) + await vm.resize_disk(request.json["drive_name"], request.json["extend"]) response.set_status(201) @Route.post( @@ -198,7 +198,7 @@ class QEMUHandler: }, description="Start a Qemu VM instance", output=QEMU_OBJECT_SCHEMA) - def start(request, response): + async def start(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -212,7 +212,7 @@ class QEMUHandler: pm = ProjectManager.instance() if pm.check_hardware_virtualization(vm) is False: raise aiohttp.web.HTTPConflict(text="Cannot start VM with hardware acceleration (KVM/HAX) enabled because hardware virtualization (VT-x/AMD-V) is already used by another software like VMware or VirtualBox") - yield from vm.start() + await vm.start() response.json(vm) @Route.post( @@ -227,11 +227,11 @@ class QEMUHandler: 404: "Instance doesn't exist" }, description="Stop a Qemu VM instance") - def stop(request, response): + async def stop(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.stop() + await vm.stop() response.set_status(204) @Route.post( @@ -246,11 +246,11 @@ class QEMUHandler: 404: "Instance doesn't exist" }, description="Reload a Qemu VM instance") - def reload(request, response): + async def reload(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.reload() + await vm.reload() response.set_status(204) @Route.post( @@ -265,11 +265,11 @@ class QEMUHandler: 404: "Instance doesn't exist" }, description="Suspend a Qemu VM instance") - def suspend(request, response): + async def suspend(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.suspend() + await vm.suspend() response.set_status(204) @Route.post( @@ -284,11 +284,11 @@ class QEMUHandler: 404: "Instance doesn't exist" }, description="Resume a Qemu VM instance") - def resume(request, response): + async def resume(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.resume() + await vm.resume() response.set_status(204) @Route.post( @@ -307,7 +307,7 @@ class QEMUHandler: description="Add a NIO to a Qemu VM instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -315,7 +315,7 @@ class QEMUHandler: if nio_type not in ("nio_udp"): raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_type)) nio = qemu_manager.create_nio(request.json) - yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio) + await vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio) response.set_status(201) response.json(nio) @@ -335,7 +335,7 @@ class QEMUHandler: input=NIO_SCHEMA, output=NIO_SCHEMA, description="Update a NIO from a Qemu instance") - def update_nio(request, response): + async def update_nio(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -344,7 +344,7 @@ class QEMUHandler: nio.filters = request.json["filters"] if "suspend" in request.json and nio: nio.suspend = request.json["suspend"] - yield from vm.adapter_update_nio_binding(int(request.match_info["adapter_number"]), nio) + await vm.adapter_update_nio_binding(int(request.match_info["adapter_number"]), nio) response.set_status(201) response.json(request.json) @@ -362,11 +362,11 @@ class QEMUHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a Qemu VM instance") - def delete_nio(request, response): + async def delete_nio(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"])) + await vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"])) response.set_status(204) @Route.post( @@ -384,13 +384,13 @@ class QEMUHandler: }, description="Start a packet capture on a Qemu VM instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) adapter_number = int(request.match_info["adapter_number"]) pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"]) - yield from vm.start_capture(adapter_number, pcap_file_path) + await vm.start_capture(adapter_number, pcap_file_path) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -407,12 +407,12 @@ class QEMUHandler: 404: "Instance doesn't exist", }, description="Stop a packet capture on a Qemu VM instance") - def stop_capture(request, response): + async def stop_capture(request, response): qemu_manager = Qemu.instance() vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) adapter_number = int(request.match_info["adapter_number"]) - yield from vm.stop_capture(adapter_number) + await vm.stop_capture(adapter_number) response.set_status(204) @Route.get( @@ -425,9 +425,9 @@ class QEMUHandler: description="Get a list of available Qemu binaries", input=QEMU_BINARY_FILTER_SCHEMA, output=QEMU_BINARY_LIST_SCHEMA) - def list_binaries(request, response): + async def list_binaries(request, response): - binaries = yield from Qemu.binary_list(request.json.get("archs", None)) + binaries = await Qemu.binary_list(request.json.get("archs", None)) response.json(binaries) @Route.get( @@ -439,9 +439,9 @@ class QEMUHandler: }, description="Get a list of available Qemu-img binaries", output=QEMU_BINARY_LIST_SCHEMA) - def list_img_binaries(request, response): + async def list_img_binaries(request, response): - binaries = yield from Qemu.img_binary_list() + binaries = await Qemu.img_binary_list() response.json(binaries) @Route.get( @@ -452,9 +452,9 @@ class QEMUHandler: description="Get a list of Qemu capabilities on this server", output=QEMU_CAPABILITY_LIST_SCHEMA ) - def get_capabilities(request, response): + async def get_capabilities(request, response): capabilities = {"kvm": []} - kvms = yield from Qemu.get_kvm_archs() + kvms = await Qemu.get_kvm_archs() if kvms: capabilities["kvm"] = kvms response.json(capabilities) @@ -467,7 +467,7 @@ class QEMUHandler: description="Create a Qemu image", input=QEMU_IMAGE_CREATE_SCHEMA ) - def create_img(request, response): + async def create_img(request, response): qemu_img = request.json.pop("qemu_img") path = request.json.pop("path") @@ -477,7 +477,7 @@ class QEMUHandler: response.set_status(403) return - yield from Qemu.instance().create_disk(qemu_img, path, request.json) + await Qemu.instance().create_disk(qemu_img, path, request.json) response.set_status(201) @Route.put( @@ -488,7 +488,7 @@ class QEMUHandler: description="Update a Qemu image", input=QEMU_IMAGE_UPDATE_SCHEMA ) - def update_img(request, response): + async def update_img(request, response): qemu_img = request.json.pop("qemu_img") path = request.json.pop("path") @@ -499,7 +499,7 @@ class QEMUHandler: return if "extend" in request.json: - yield from Qemu.instance().resize_disk(qemu_img, path, request.json.pop("extend")) + await Qemu.instance().resize_disk(qemu_img, path, request.json.pop("extend")) response.set_status(201) @Route.get( @@ -509,10 +509,10 @@ class QEMUHandler: }, description="Retrieve the list of Qemu images", output=NODE_LIST_IMAGES_SCHEMA) - def list_qemu_images(request, response): + async def list_qemu_images(request, response): qemu_manager = Qemu.instance() - images = yield from qemu_manager.list_images() + images = await qemu_manager.list_images() response.set_status(200) response.json(images) @@ -526,10 +526,10 @@ class QEMUHandler: }, raw=True, description="Upload Qemu image") - def upload_image(request, response): + async def upload_image(request, response): qemu_manager = Qemu.instance() - yield from qemu_manager.write_image(request.match_info["filename"], request.content) + await qemu_manager.write_image(request.match_info["filename"], request.content) response.set_status(204) @Route.get( @@ -542,7 +542,7 @@ class QEMUHandler: }, raw=True, description="Download Qemu image") - def download_image(request, response): + async def download_image(request, response): filename = request.match_info["filename"] qemu_manager = Qemu.instance() @@ -552,4 +552,4 @@ class QEMUHandler: if filename[0] == ".": raise aiohttp.web.HTTPForbidden() - yield from response.file(image_path) + await response.file(image_path) diff --git a/gns3server/handlers/api/compute/traceng_handler.py b/gns3server/handlers/api/compute/traceng_handler.py index fc5d21c8..360c4d87 100644 --- a/gns3server/handlers/api/compute/traceng_handler.py +++ b/gns3server/handlers/api/compute/traceng_handler.py @@ -48,10 +48,10 @@ class TraceNGHandler: description="Create a new TraceNG instance", input=TRACENG_CREATE_SCHEMA, output=TRACENG_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): traceng = TraceNG.instance() - vm = yield from traceng.create_node(request.json["name"], + vm = await traceng.create_node(request.json["name"], request.match_info["project_id"], request.json.get("node_id"), console=request.json.get("console")) @@ -116,9 +116,9 @@ class TraceNGHandler: 404: "Instance doesn't exist" }, description="Delete a TraceNG instance") - def delete(request, response): + async def delete(request, response): - yield from TraceNG.instance().delete_node(request.match_info["node_id"]) + await TraceNG.instance().delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -132,9 +132,9 @@ class TraceNGHandler: 404: "Instance doesn't exist" }, description="Duplicate a TraceNG instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from TraceNG.instance().duplicate_node( + new_node = await TraceNG.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) @@ -155,11 +155,11 @@ class TraceNGHandler: description="Start a TraceNG instance", input=TRACENG_START_SCHEMA, output=TRACENG_OBJECT_SCHEMA) - def start(request, response): + async def start(request, response): traceng_manager = TraceNG.instance() vm = traceng_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.start(request.get("destination")) + await vm.start(request.get("destination")) response.json(vm) @Route.post( @@ -174,11 +174,11 @@ class TraceNGHandler: 404: "Instance doesn't exist" }, description="Stop a TraceNG instance") - def stop(request, response): + async def stop(request, response): traceng_manager = TraceNG.instance() vm = traceng_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.stop() + await vm.stop() response.set_status(204) @Route.post( @@ -211,11 +211,11 @@ class TraceNGHandler: 404: "Instance doesn't exist" }, description="Reload a TraceNG instance") - def reload(request, response): + async def reload(request, response): traceng_manager = TraceNG.instance() vm = traceng_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.reload() + await vm.reload() response.set_status(204) @Route.post( @@ -234,7 +234,7 @@ class TraceNGHandler: description="Add a NIO to a TraceNG instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): traceng_manager = TraceNG.instance() vm = traceng_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -242,7 +242,7 @@ class TraceNGHandler: if nio_type not in ("nio_udp"): raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type)) nio = traceng_manager.create_nio(request.json) - yield from vm.port_add_nio_binding(int(request.match_info["port_number"]), nio) + await vm.port_add_nio_binding(int(request.match_info["port_number"]), nio) response.set_status(201) response.json(nio) @@ -262,14 +262,14 @@ class TraceNGHandler: input=NIO_SCHEMA, output=NIO_SCHEMA, description="Update a NIO from a TraceNG instance") - def update_nio(request, response): + async def update_nio(request, response): traceng_manager = TraceNG.instance() vm = traceng_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) nio = vm.ethernet_adapter.get_nio(int(request.match_info["port_number"])) if "filters" in request.json and nio: nio.filters = request.json["filters"] - yield from vm.port_update_nio_binding(int(request.match_info["port_number"]), nio) + await vm.port_update_nio_binding(int(request.match_info["port_number"]), nio) response.set_status(201) response.json(request.json) @@ -287,11 +287,11 @@ class TraceNGHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a TraceNG instance") - def delete_nio(request, response): + async def delete_nio(request, response): traceng_manager = TraceNG.instance() vm = traceng_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.port_remove_nio_binding(int(request.match_info["port_number"])) + await vm.port_remove_nio_binding(int(request.match_info["port_number"])) response.set_status(204) @Route.post( @@ -309,13 +309,13 @@ class TraceNGHandler: }, description="Start a packet capture on a TraceNG instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): traceng_manager = TraceNG.instance() vm = traceng_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"]) - yield from vm.start_capture(port_number, pcap_file_path) + await vm.start_capture(port_number, pcap_file_path) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -332,10 +332,10 @@ class TraceNGHandler: 404: "Instance doesn't exist", }, description="Stop a packet capture on a TraceNG instance") - def stop_capture(request, response): + async def stop_capture(request, response): traceng_manager = TraceNG.instance() vm = traceng_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from vm.stop_capture(port_number) + await vm.stop_capture(port_number) response.set_status(204) diff --git a/gns3server/handlers/api/compute/virtualbox_handler.py b/gns3server/handlers/api/compute/virtualbox_handler.py index 42a1f127..a89efe96 100644 --- a/gns3server/handlers/api/compute/virtualbox_handler.py +++ b/gns3server/handlers/api/compute/virtualbox_handler.py @@ -50,10 +50,10 @@ class VirtualBoxHandler: description="Create a new VirtualBox VM instance", input=VBOX_CREATE_SCHEMA, output=VBOX_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): vbox_manager = VirtualBox.instance() - vm = yield from vbox_manager.create_node(request.json.pop("name"), + vm = await vbox_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), request.json.pop("vmname"), @@ -65,7 +65,7 @@ class VirtualBoxHandler: if "ram" in request.json: ram = request.json.pop("ram") if ram != vm.ram: - yield from vm.set_ram(ram) + await vm.set_ram(ram) for name, value in request.json.items(): if name != "node_id": @@ -109,7 +109,7 @@ class VirtualBoxHandler: description="Update a VirtualBox VM instance", input=VBOX_OBJECT_SCHEMA, output=VBOX_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -122,7 +122,7 @@ class VirtualBoxHandler: vm.name = name if vm.linked_clone: try: - yield from vm.set_vmname(vm.name) + await vm.set_vmname(vm.name) except VirtualBoxError as e: # In case of error we rollback (we can't change the name when running) vm.name = oldname vm.updated() @@ -131,12 +131,12 @@ class VirtualBoxHandler: if "adapters" in request.json: adapters = int(request.json.pop("adapters")) if adapters != vm.adapters: - yield from vm.set_adapters(adapters) + await vm.set_adapters(adapters) if "ram" in request.json: ram = request.json.pop("ram") if ram != vm.ram: - yield from vm.set_ram(ram) + await vm.set_ram(ram) # update the console first to avoid issue if updating console type vm.console = request.json.pop("console", vm.console) @@ -160,11 +160,11 @@ class VirtualBoxHandler: 404: "Instance doesn't exist" }, description="Delete a VirtualBox VM instance") - def delete(request, response): + async def delete(request, response): # check the project_id exists ProjectManager.instance().get_project(request.match_info["project_id"]) - yield from VirtualBox.instance().delete_node(request.match_info["node_id"]) + await VirtualBox.instance().delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -179,15 +179,15 @@ class VirtualBoxHandler: 404: "Instance doesn't exist" }, description="Start a VirtualBox VM instance") - def start(request, response): + async def start(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - if (yield from vm.check_hw_virtualization()): + if (await vm.check_hw_virtualization()): pm = ProjectManager.instance() if pm.check_hardware_virtualization(vm) is False: raise HTTPConflict(text="Cannot start VM because hardware virtualization (VT-x/AMD-V) is already used by another software like VMware or KVM (on Linux)") - yield from vm.start() + await vm.start() response.set_status(204) @Route.post( @@ -202,11 +202,11 @@ class VirtualBoxHandler: 404: "Instance doesn't exist" }, description="Stop a VirtualBox VM instance") - def stop(request, response): + async def stop(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.stop() + await vm.stop() response.set_status(204) @Route.post( @@ -221,11 +221,11 @@ class VirtualBoxHandler: 404: "Instance doesn't exist" }, description="Suspend a VirtualBox VM instance") - def suspend(request, response): + async def suspend(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.suspend() + await vm.suspend() response.set_status(204) @Route.post( @@ -240,11 +240,11 @@ class VirtualBoxHandler: 404: "Instance doesn't exist" }, description="Resume a suspended VirtualBox VM instance") - def resume(request, response): + async def resume(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.resume() + await vm.resume() response.set_status(204) @Route.post( @@ -259,11 +259,11 @@ class VirtualBoxHandler: 404: "Instance doesn't exist" }, description="Reload a VirtualBox VM instance") - def reload(request, response): + async def reload(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.reload() + await vm.reload() response.set_status(204) @Route.post( @@ -282,7 +282,7 @@ class VirtualBoxHandler: description="Add a NIO to a VirtualBox VM instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -290,7 +290,7 @@ class VirtualBoxHandler: if nio_type not in ("nio_udp", "nio_nat"): raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type)) nio = vbox_manager.create_nio(request.json) - yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio) + await vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio) response.set_status(201) response.json(nio) @@ -310,7 +310,7 @@ class VirtualBoxHandler: input=NIO_SCHEMA, output=NIO_SCHEMA, description="Update a NIO from a Virtualbox instance") - def update_nio(request, response): + async def update_nio(request, response): virtualbox_manager = VirtualBox.instance() vm = virtualbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -319,7 +319,7 @@ class VirtualBoxHandler: nio.filters = request.json["filters"] if "suspend" in request.json and nio: nio.suspend = request.json["suspend"] - yield from vm.adapter_update_nio_binding(int(request.match_info["adapter_number"]), nio) + await vm.adapter_update_nio_binding(int(request.match_info["adapter_number"]), nio) response.set_status(201) response.json(request.json) @@ -337,11 +337,11 @@ class VirtualBoxHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a VirtualBox VM instance") - def delete_nio(request, response): + async def delete_nio(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"])) + await vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"])) response.set_status(204) @Route.post( @@ -359,13 +359,13 @@ class VirtualBoxHandler: }, description="Start a packet capture on a VirtualBox VM instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) adapter_number = int(request.match_info["adapter_number"]) pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"]) - yield from vm.start_capture(adapter_number, pcap_file_path) + await vm.start_capture(adapter_number, pcap_file_path) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -382,11 +382,11 @@ class VirtualBoxHandler: 404: "Instance doesn't exist" }, description="Stop a packet capture on a VirtualBox VM instance") - def stop_capture(request, response): + async def stop_capture(request, response): vbox_manager = VirtualBox.instance() vm = vbox_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - vm.stop_capture(int(request.match_info["adapter_number"])) + await vm.stop_capture(int(request.match_info["adapter_number"])) response.set_status(204) @Route.get( @@ -395,7 +395,7 @@ class VirtualBoxHandler: 200: "Success", }, description="Get all available VirtualBox VMs") - def get_vms(request, response): + async def get_vms(request, response): vbox_manager = VirtualBox.instance() - vms = yield from vbox_manager.list_vms() + vms = await vbox_manager.list_vms() response.json(vms) diff --git a/gns3server/handlers/api/compute/vmware_handler.py b/gns3server/handlers/api/compute/vmware_handler.py index 45475fe5..fa5f0b90 100644 --- a/gns3server/handlers/api/compute/vmware_handler.py +++ b/gns3server/handlers/api/compute/vmware_handler.py @@ -49,10 +49,10 @@ class VMwareHandler: description="Create a new VMware VM instance", input=VMWARE_CREATE_SCHEMA, output=VMWARE_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): vmware_manager = VMware.instance() - vm = yield from vmware_manager.create_node(request.json.pop("name"), + vm = await vmware_manager.create_node(request.json.pop("name"), request.match_info["project_id"], request.json.get("node_id"), request.json.pop("vmx_path"), @@ -127,11 +127,11 @@ class VMwareHandler: 404: "Instance doesn't exist" }, description="Delete a VMware VM instance") - def delete(request, response): + async def delete(request, response): # check the project_id exists ProjectManager.instance().get_project(request.match_info["project_id"]) - yield from VMware.instance().delete_node(request.match_info["node_id"]) + await VMware.instance().delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -146,7 +146,7 @@ class VMwareHandler: 404: "Instance doesn't exist" }, description="Start a VMware VM instance") - def start(request, response): + async def start(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -154,7 +154,7 @@ class VMwareHandler: pm = ProjectManager.instance() if pm.check_hardware_virtualization(vm) is False: raise HTTPConflict(text="Cannot start VM because hardware virtualization (VT-x/AMD-V) is already used by another software like VirtualBox or KVM (on Linux)") - yield from vm.start() + await vm.start() response.set_status(204) @Route.post( @@ -169,11 +169,11 @@ class VMwareHandler: 404: "Instance doesn't exist" }, description="Stop a VMware VM instance") - def stop(request, response): + async def stop(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.stop() + await vm.stop() response.set_status(204) @Route.post( @@ -188,11 +188,11 @@ class VMwareHandler: 404: "Instance doesn't exist" }, description="Suspend a VMware VM instance") - def suspend(request, response): + async def suspend(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.suspend() + await vm.suspend() response.set_status(204) @Route.post( @@ -207,11 +207,11 @@ class VMwareHandler: 404: "Instance doesn't exist" }, description="Resume a suspended VMware VM instance") - def resume(request, response): + async def resume(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.resume() + await vm.resume() response.set_status(204) @Route.post( @@ -226,11 +226,11 @@ class VMwareHandler: 404: "Instance doesn't exist" }, description="Reload a VMware VM instance") - def reload(request, response): + async def reload(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.reload() + await vm.reload() response.set_status(204) @Route.post( @@ -249,7 +249,7 @@ class VMwareHandler: description="Add a NIO to a VMware VM instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -257,7 +257,7 @@ class VMwareHandler: if nio_type not in ("nio_udp", "nio_vmnet", "nio_nat", "nio_tap"): raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type)) nio = vmware_manager.create_nio(request.json) - yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio) + await vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio) response.set_status(201) response.json(nio) @@ -277,14 +277,14 @@ class VMwareHandler: input=NIO_SCHEMA, output=NIO_SCHEMA, description="Update a NIO from a Virtualbox instance") - def update_nio(request, response): + async def update_nio(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) nio = vm.ethernet_adapters[int(request.match_info["adapter_number"])] if "filters" in request.json and nio: nio.filters = request.json["filters"] - yield from vm.adapter_update_nio_binding(int(request.match_info["adapter_number"]), nio) + await vm.adapter_update_nio_binding(int(request.match_info["adapter_number"]), nio) response.set_status(201) response.json(request.json) @@ -302,11 +302,11 @@ class VMwareHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a VMware VM instance") - def delete_nio(request, response): + async def delete_nio(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"])) + await vm.adapter_remove_nio_binding(int(request.match_info["adapter_number"])) response.set_status(204) @Route.post( @@ -324,13 +324,13 @@ class VMwareHandler: }, description="Start a packet capture on a VMware VM instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) adapter_number = int(request.match_info["adapter_number"]) pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"]) - yield from vm.start_capture(adapter_number, pcap_file_path) + await vm.start_capture(adapter_number, pcap_file_path) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -347,12 +347,12 @@ class VMwareHandler: 404: "Instance doesn't exist", }, description="Stop a packet capture on a VMware VM instance") - def stop_capture(request, response): + async def stop_capture(request, response): vmware_manager = VMware.instance() vm = vmware_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) adapter_number = int(request.match_info["adapter_number"]) - yield from vm.stop_capture(adapter_number) + await vm.stop_capture(adapter_number) response.set_status(204) @Route.post( @@ -381,7 +381,7 @@ class VMwareHandler: 200: "Success", }, description="Get all VMware VMs available") - def get_vms(request, response): + async def get_vms(request, response): vmware_manager = VMware.instance() - vms = yield from vmware_manager.list_vms() + vms = await vmware_manager.list_vms() response.json(vms) diff --git a/gns3server/handlers/api/compute/vpcs_handler.py b/gns3server/handlers/api/compute/vpcs_handler.py index ed08de11..0d3ba5f9 100644 --- a/gns3server/handlers/api/compute/vpcs_handler.py +++ b/gns3server/handlers/api/compute/vpcs_handler.py @@ -47,10 +47,10 @@ class VPCSHandler: description="Create a new VPCS instance", input=VPCS_CREATE_SCHEMA, output=VPCS_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): vpcs = VPCS.instance() - vm = yield from vpcs.create_node(request.json["name"], + vm = await vpcs.create_node(request.json["name"], request.match_info["project_id"], request.json.get("node_id"), console=request.json.get("console"), @@ -115,9 +115,9 @@ class VPCSHandler: 404: "Instance doesn't exist" }, description="Delete a VPCS instance") - def delete(request, response): + async def delete(request, response): - yield from VPCS.instance().delete_node(request.match_info["node_id"]) + await VPCS.instance().delete_node(request.match_info["node_id"]) response.set_status(204) @Route.post( @@ -131,9 +131,9 @@ class VPCSHandler: 404: "Instance doesn't exist" }, description="Duplicate a VPCS instance") - def duplicate(request, response): + async def duplicate(request, response): - new_node = yield from VPCS.instance().duplicate_node( + new_node = await VPCS.instance().duplicate_node( request.match_info["node_id"], request.json["destination_node_id"] ) @@ -153,11 +153,11 @@ class VPCSHandler: }, description="Start a VPCS instance", output=VPCS_OBJECT_SCHEMA) - def start(request, response): + async def start(request, response): vpcs_manager = VPCS.instance() vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.start() + await vm.start() response.json(vm) @Route.post( @@ -172,11 +172,11 @@ class VPCSHandler: 404: "Instance doesn't exist" }, description="Stop a VPCS instance") - def stop(request, response): + async def stop(request, response): vpcs_manager = VPCS.instance() vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.stop() + await vm.stop() response.set_status(204) @Route.post( @@ -209,11 +209,11 @@ class VPCSHandler: 404: "Instance doesn't exist" }, description="Reload a VPCS instance") - def reload(request, response): + async def reload(request, response): vpcs_manager = VPCS.instance() vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.reload() + await vm.reload() response.set_status(204) @Route.post( @@ -232,7 +232,7 @@ class VPCSHandler: description="Add a NIO to a VPCS instance", input=NIO_SCHEMA, output=NIO_SCHEMA) - def create_nio(request, response): + async def create_nio(request, response): vpcs_manager = VPCS.instance() vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) @@ -240,7 +240,7 @@ class VPCSHandler: if nio_type not in ("nio_udp", "nio_tap"): raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type)) nio = vpcs_manager.create_nio(request.json) - yield from vm.port_add_nio_binding(int(request.match_info["port_number"]), nio) + await vm.port_add_nio_binding(int(request.match_info["port_number"]), nio) response.set_status(201) response.json(nio) @@ -260,14 +260,14 @@ class VPCSHandler: input=NIO_SCHEMA, output=NIO_SCHEMA, description="Update a NIO from a VPCS instance") - def update_nio(request, response): + async def update_nio(request, response): vpcs_manager = VPCS.instance() vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) nio = vm.ethernet_adapter.get_nio(int(request.match_info["port_number"])) if "filters" in request.json and nio: nio.filters = request.json["filters"] - yield from vm.port_update_nio_binding(int(request.match_info["port_number"]), nio) + await vm.port_update_nio_binding(int(request.match_info["port_number"]), nio) response.set_status(201) response.json(request.json) @@ -285,11 +285,11 @@ class VPCSHandler: 404: "Instance doesn't exist" }, description="Remove a NIO from a VPCS instance") - def delete_nio(request, response): + async def delete_nio(request, response): vpcs_manager = VPCS.instance() vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) - yield from vm.port_remove_nio_binding(int(request.match_info["port_number"])) + await vm.port_remove_nio_binding(int(request.match_info["port_number"])) response.set_status(204) @Route.post( @@ -307,13 +307,13 @@ class VPCSHandler: }, description="Start a packet capture on a VPCS instance", input=NODE_CAPTURE_SCHEMA) - def start_capture(request, response): + async def start_capture(request, response): vpcs_manager = VPCS.instance() vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) pcap_file_path = os.path.join(vm.project.capture_working_directory(), request.json["capture_file_name"]) - yield from vm.start_capture(port_number, pcap_file_path) + await vm.start_capture(port_number, pcap_file_path) response.json({"pcap_file_path": pcap_file_path}) @Route.post( @@ -330,10 +330,10 @@ class VPCSHandler: 404: "Instance doesn't exist", }, description="Stop a packet capture on a VPCS instance") - def stop_capture(request, response): + async def stop_capture(request, response): vpcs_manager = VPCS.instance() vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) port_number = int(request.match_info["port_number"]) - yield from vm.stop_capture(port_number) + await vm.stop_capture(port_number) response.set_status(204) diff --git a/gns3server/handlers/api/controller/appliance_handler.py b/gns3server/handlers/api/controller/appliance_handler.py index 7cadd710..c78f6cb1 100644 --- a/gns3server/handlers/api/controller/appliance_handler.py +++ b/gns3server/handlers/api/controller/appliance_handler.py @@ -34,11 +34,11 @@ class ApplianceHandler: status_codes={ 200: "Appliance template list returned" }) - def list_templates(request, response): + async def list_templates(request, response): controller = Controller.instance() if request.query.get("update", "no") == "yes": - yield from controller.download_appliance_templates() + await controller.download_appliance_templates() controller.load_appliance_templates() response.json([c for c in controller.appliance_templates.values()]) @@ -66,11 +66,11 @@ class ApplianceHandler: }, input=APPLIANCE_USAGE_SCHEMA, output=NODE_OBJECT_SCHEMA) - def create_node_from_appliance(request, response): + async def create_node_from_appliance(request, response): controller = Controller.instance() project = controller.get_project(request.match_info["project_id"]) - yield from project.add_node_from_appliance(request.match_info["appliance_id"], + await project.add_node_from_appliance(request.match_info["appliance_id"], x=request.json["x"], y=request.json["y"], compute_id=request.json.get("compute_id")) diff --git a/gns3server/handlers/api/controller/compute_handler.py b/gns3server/handlers/api/controller/compute_handler.py index dbc013f2..ec801050 100644 --- a/gns3server/handlers/api/controller/compute_handler.py +++ b/gns3server/handlers/api/controller/compute_handler.py @@ -41,9 +41,9 @@ class ComputeHandler: }, input=COMPUTE_CREATE_SCHEMA, output=COMPUTE_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): - compute = yield from Controller.instance().add_compute(**request.json) + compute = await Controller.instance().add_compute(**request.json) response.set_status(201) response.json(compute) @@ -68,14 +68,14 @@ class ComputeHandler: }, input=COMPUTE_UPDATE_SCHEMA, output=COMPUTE_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): controller = Controller.instance() compute = controller.get_compute(request.match_info["compute_id"]) # Ignore these because we only use them when creating a node request.json.pop("compute_id", None) - yield from compute.update(**request.json) + await compute.update(**request.json) response.set_status(200) response.json(compute) @@ -89,10 +89,10 @@ class ComputeHandler: 404: "Instance doesn't exist" }, description="Return the list of images available on compute and controller for this emulator type") - def images(request, response): + async def images(request, response): controller = Controller.instance() compute = controller.get_compute(request.match_info["compute_id"]) - res = yield from compute.images(request.match_info["emulator"]) + res = await compute.images(request.match_info["emulator"]) response.json(res) @Route.get( @@ -133,10 +133,10 @@ class ComputeHandler: 404: "Instance doesn't exist" }, description="Forward call specific to compute node. Read the full compute API for available actions") - def get_forward(request, response): + async def get_forward(request, response): controller = Controller.instance() compute = controller.get_compute(request.match_info["compute_id"]) - res = yield from compute.forward("GET", request.match_info["emulator"], request.match_info["action"]) + res = await compute.forward("GET", request.match_info["emulator"], request.match_info["action"]) response.json(res) @Route.post( @@ -150,10 +150,10 @@ class ComputeHandler: }, raw=True, description="Forward call specific to compute node. Read the full compute API for available actions") - def post_forward(request, response): + async def post_forward(request, response): controller = Controller.instance() compute = controller.get_compute(request.match_info["compute_id"]) - res = yield from compute.forward("POST", request.match_info["emulator"], request.match_info["action"], data=request.content) + res = await compute.forward("POST", request.match_info["emulator"], request.match_info["action"], data=request.content) response.json(res) @Route.put( @@ -167,10 +167,10 @@ class ComputeHandler: }, raw=True, description="Forward call specific to compute node. Read the full compute API for available actions") - def put_forward(request, response): + async def put_forward(request, response): controller = Controller.instance() compute = controller.get_compute(request.match_info["compute_id"]) - res = yield from compute.forward("PUT", request.match_info["emulator"], request.match_info["action"], data=request.content) + res = await compute.forward("PUT", request.match_info["emulator"], request.match_info["action"], data=request.content) response.json(res) @Route.get( @@ -197,9 +197,9 @@ class ComputeHandler: 404: "Instance doesn't exist" }, description="Delete a compute instance") - def delete(request, response): + async def delete(request, response): controller = Controller.instance() - yield from controller.delete_compute(request.match_info["compute_id"]) + await controller.delete_compute(request.match_info["compute_id"]) response.set_status(204) @Route.post( @@ -211,9 +211,9 @@ class ComputeHandler: 200: "Idle PC computed", }, description="Compute IDLE PC value") - def autoidlepc(request, response): + async def autoidlepc(request, response): controller = Controller.instance() - res = yield from controller.autoidlepc(request.match_info["compute_id"], request.json["platform"], request.json["image"], request.json["ram"]) + res = await controller.autoidlepc(request.match_info["compute_id"], request.json["platform"], request.json["image"], request.json["ram"]) response.json(res) @Route.get( @@ -226,8 +226,8 @@ class ComputeHandler: }, description="Get ports used by a compute", output=COMPUTE_PORTS_OBJECT_SCHEMA) - def ports(request, response): + async def ports(request, response): controller = Controller.instance() - res = yield from controller.compute_ports(request.match_info["compute_id"]) + res = await controller.compute_ports(request.match_info["compute_id"]) response.json(res) diff --git a/gns3server/handlers/api/controller/drawing_handler.py b/gns3server/handlers/api/controller/drawing_handler.py index 719b2694..2ff19b41 100644 --- a/gns3server/handlers/api/controller/drawing_handler.py +++ b/gns3server/handlers/api/controller/drawing_handler.py @@ -37,9 +37,9 @@ class DrawingHandler: 200: "List of drawings returned", }, description="List drawings of a project") - def list_drawings(request, response): + async def list_drawings(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) response.json([v for v in project.drawings.values()]) @Route.post( @@ -54,10 +54,10 @@ class DrawingHandler: description="Create a new drawing instance", input=DRAWING_OBJECT_SCHEMA, output=DRAWING_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) - drawing = yield from project.add_drawing(**request.json) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) + drawing = await project.add_drawing(**request.json) response.set_status(201) response.json(drawing) @@ -74,9 +74,9 @@ class DrawingHandler: }, description="Get a drawing instance", output=DRAWING_OBJECT_SCHEMA) - def get_drawing(request, response): + async def get_drawing(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) drawing = project.get_drawing(request.match_info["drawing_id"]) response.set_status(200) response.json(drawing) @@ -94,11 +94,11 @@ class DrawingHandler: description="Update a drawing instance", input=DRAWING_OBJECT_SCHEMA, output=DRAWING_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) drawing = project.get_drawing(request.match_info["drawing_id"]) - yield from drawing.update(**request.json) + await drawing.update(**request.json) response.set_status(201) response.json(drawing) @@ -113,8 +113,8 @@ class DrawingHandler: 400: "Invalid request" }, description="Delete a drawing instance") - def delete(request, response): + async def delete(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) - yield from project.delete_drawing(request.match_info["drawing_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) + await project.delete_drawing(request.match_info["drawing_id"]) response.set_status(204) diff --git a/gns3server/handlers/api/controller/gns3_vm_handler.py b/gns3server/handlers/api/controller/gns3_vm_handler.py index 87366f71..5371e3d0 100644 --- a/gns3server/handlers/api/controller/gns3_vm_handler.py +++ b/gns3server/handlers/api/controller/gns3_vm_handler.py @@ -49,9 +49,9 @@ class GNS3VMHandler: 400: "Invalid request", }, description="Get all the available VMs for a specific virtualization engine") - def get_vms(request, response): + async def get_vms(request, response): - vms = yield from Controller.instance().gns3vm.list(request.match_info["engine"]) + vms = await Controller.instance().gns3vm.list(request.match_info["engine"]) response.json(vms) @Route.get( @@ -72,9 +72,9 @@ class GNS3VMHandler: status_codes={ 201: "GNS3 VM updated" }) - def update(request, response): + async def update(request, response): gns3_vm = Controller().instance().gns3vm - yield from gns3_vm.update_settings(request.json) + await gns3_vm.update_settings(request.json) response.json(gns3_vm) response.set_status(201) diff --git a/gns3server/handlers/api/controller/link_handler.py b/gns3server/handlers/api/controller/link_handler.py index 5f15577d..b4176fe7 100644 --- a/gns3server/handlers/api/controller/link_handler.py +++ b/gns3server/handlers/api/controller/link_handler.py @@ -42,9 +42,9 @@ class LinkHandler: 200: "List of links returned", }, description="List links of a project") - def list_links(request, response): + async def list_links(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) response.json([v for v in project.links.values()]) @Route.post( @@ -59,22 +59,22 @@ class LinkHandler: description="Create a new link instance", input=LINK_OBJECT_SCHEMA, output=LINK_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) - link = yield from project.add_link() + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) + link = await project.add_link() if "filters" in request.json: - yield from link.update_filters(request.json["filters"]) + await link.update_filters(request.json["filters"]) if "suspend" in request.json: - yield from link.update_suspend(request.json["suspend"]) + await link.update_suspend(request.json["suspend"]) try: for node in request.json["nodes"]: - yield from link.add_node(project.get_node(node["node_id"]), + await link.add_node(project.get_node(node["node_id"]), node.get("adapter_number", 0), node.get("port_number", 0), label=node.get("label")) except aiohttp.web.HTTPException as e: - yield from project.delete_link(link.id) + await project.delete_link(link.id) raise e response.set_status(201) response.json(link) @@ -90,9 +90,9 @@ class LinkHandler: 400: "Invalid request" }, description="Return the list of filters available for this link") - def list_filters(request, response): + async def list_filters(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) link = project.get_link(request.match_info["link_id"]) response.set_status(200) response.json(link.available_filters()) @@ -110,9 +110,9 @@ class LinkHandler: }, description="Get a link instance", output=LINK_OBJECT_SCHEMA) - def get_link(request, response): + async def get_link(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) link = project.get_link(request.match_info["link_id"]) response.set_status(200) response.json(link) @@ -130,16 +130,16 @@ class LinkHandler: description="Update a link instance", input=LINK_OBJECT_SCHEMA, output=LINK_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) link = project.get_link(request.match_info["link_id"]) if "filters" in request.json: - yield from link.update_filters(request.json["filters"]) + await link.update_filters(request.json["filters"]) if "suspend" in request.json: - yield from link.update_suspend(request.json["suspend"]) + await link.update_suspend(request.json["suspend"]) if "nodes" in request.json: - yield from link.update_nodes(request.json["nodes"]) + await link.update_nodes(request.json["nodes"]) response.set_status(201) response.json(link) @@ -156,11 +156,11 @@ class LinkHandler: input=LINK_CAPTURE_SCHEMA, output=LINK_OBJECT_SCHEMA, description="Start capture on a link instance. By default we consider it as an Ethernet link") - def start_capture(request, response): + async def start_capture(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) link = project.get_link(request.match_info["link_id"]) - yield from link.start_capture(data_link_type=request.json.get("data_link_type", "DLT_EN10MB"), capture_file_name=request.json.get("capture_file_name")) + await link.start_capture(data_link_type=request.json.get("data_link_type", "DLT_EN10MB"), capture_file_name=request.json.get("capture_file_name")) response.set_status(201) response.json(link) @@ -175,11 +175,11 @@ class LinkHandler: 400: "Invalid request" }, description="Stop capture on a link instance") - def stop_capture(request, response): + async def stop_capture(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) link = project.get_link(request.match_info["link_id"]) - yield from link.stop_capture() + await link.stop_capture() response.set_status(201) response.json(link) @@ -194,10 +194,10 @@ class LinkHandler: 400: "Invalid request" }, description="Delete a link instance") - def delete(request, response): + async def delete(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) - yield from project.delete_link(request.match_info["link_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) + await project.delete_link(request.match_info["link_id"]) response.set_status(204) @Route.get( @@ -212,16 +212,16 @@ class LinkHandler: 403: "Permission denied", 404: "The file doesn't exist" }) - def pcap(request, response): + async def pcap(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) link = project.get_link(request.match_info["link_id"]) while link.capture_file_path is None: raise aiohttp.web.HTTPNotFound(text="pcap file not found") while not os.path.isfile(link.capture_file_path): - yield from asyncio.sleep(0.5) + await asyncio.sleep(0.5) try: with open(link.capture_file_path, "rb") as f: @@ -229,12 +229,12 @@ class LinkHandler: response.content_type = "application/vnd.tcpdump.pcap" response.set_status(200) response.enable_chunked_encoding() - yield from response.prepare(request) + await response.prepare(request) while True: chunk = f.read(4096) if not chunk: - yield from asyncio.sleep(0.1) - yield from response.write(chunk) + await asyncio.sleep(0.1) + await response.write(chunk) except OSError: raise aiohttp.web.HTTPNotFound(text="pcap file {} not found or not accessible".format(link.capture_file_path)) diff --git a/gns3server/handlers/api/controller/node_handler.py b/gns3server/handlers/api/controller/node_handler.py index c56d953c..0337cc4e 100644 --- a/gns3server/handlers/api/controller/node_handler.py +++ b/gns3server/handlers/api/controller/node_handler.py @@ -46,12 +46,12 @@ class NodeHandler: description="Create a new node instance", input=NODE_CREATE_SCHEMA, output=NODE_OBJECT_SCHEMA) - def create(request, response): + async def create(request, response): controller = Controller.instance() compute = controller.get_compute(request.json.pop("compute_id")) - project = yield from controller.get_loaded_project(request.match_info["project_id"]) - node = yield from project.add_node(compute, request.json.pop("name"), request.json.pop("node_id", None), **request.json) + project = await controller.get_loaded_project(request.match_info["project_id"]) + node = await project.add_node(compute, request.json.pop("name"), request.json.pop("node_id", None), **request.json) response.set_status(201) response.json(node) @@ -79,9 +79,9 @@ class NodeHandler: 200: "List of nodes returned", }, description="List nodes of a project") - def list_nodes(request, response): + async def list_nodes(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) response.json([v for v in project.nodes.values()]) @Route.put( @@ -94,8 +94,8 @@ class NodeHandler: description="Update a node instance", input=NODE_UPDATE_SCHEMA, output=NODE_OBJECT_SCHEMA) - def update(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + async def update(request, response): + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) # Ignore these because we only use them when creating a node @@ -103,7 +103,7 @@ class NodeHandler: request.json.pop("node_type", None) request.json.pop("compute_id", None) - yield from node.update(**request.json) + await node.update(**request.json) response.set_status(200) response.json(node) @@ -119,10 +119,10 @@ class NodeHandler: }, description="Start all nodes belonging to the project", output=NODE_OBJECT_SCHEMA) - def start_all(request, response): + async def start_all(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) - yield from project.start_all() + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) + await project.start_all() response.set_status(204) @Route.post( @@ -137,10 +137,10 @@ class NodeHandler: }, description="Stop all nodes belonging to the project", output=NODE_OBJECT_SCHEMA) - def stop_all(request, response): + async def stop_all(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) - yield from project.stop_all() + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) + await project.stop_all() response.set_status(204) @Route.post( @@ -155,10 +155,10 @@ class NodeHandler: }, description="Suspend all nodes belonging to the project", output=NODE_OBJECT_SCHEMA) - def suspend_all(request, response): + async def suspend_all(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) - yield from project.suspend_all() + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) + await project.suspend_all() response.set_status(204) @Route.post( @@ -173,11 +173,11 @@ class NodeHandler: }, description="Reload all nodes belonging to the project", output=NODE_OBJECT_SCHEMA) - def reload_all(request, response): + async def reload_all(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) - yield from project.stop_all() - yield from project.start_all() + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) + await project.stop_all() + await project.start_all() response.set_status(204) @Route.post( @@ -194,11 +194,11 @@ class NodeHandler: description="Duplicate a node instance", input=NODE_DUPLICATE_SCHEMA, output=NODE_OBJECT_SCHEMA) - def duplicate(request, response): + async def duplicate(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) - new_node = yield from project.duplicate_node( + new_node = await project.duplicate_node( node, request.json["x"], request.json["y"], @@ -219,11 +219,11 @@ class NodeHandler: }, description="Start a node instance", output=NODE_OBJECT_SCHEMA) - def start(request, response): + async def start(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) - yield from node.start(data=request.json) + await node.start(data=request.json) response.json(node) response.set_status(200) @@ -240,11 +240,11 @@ class NodeHandler: }, description="Stop a node instance", output=NODE_OBJECT_SCHEMA) - def stop(request, response): + async def stop(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) - yield from node.stop() + await node.stop() response.json(node) response.set_status(200) @@ -261,11 +261,11 @@ class NodeHandler: }, description="Suspend a node instance", output=NODE_OBJECT_SCHEMA) - def suspend(request, response): + async def suspend(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) - yield from node.suspend() + await node.suspend() response.json(node) response.set_status(200) @@ -282,11 +282,11 @@ class NodeHandler: }, description="Reload a node instance", output=NODE_OBJECT_SCHEMA) - def reload(request, response): + async def reload(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) - #yield from node.reload() + #await node.reload() response.json(node) response.set_status(200) @@ -302,9 +302,9 @@ class NodeHandler: 404: "Instance doesn't exist" }, description="Delete a node instance") - def delete(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) - yield from project.delete_node(request.match_info["node_id"]) + async def delete(request, response): + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) + await project.delete_node(request.match_info["node_id"]) response.set_status(204) @Route.get( @@ -319,9 +319,9 @@ class NodeHandler: 404: "Instance doesn't exist" }, description="Return all the links connected to this node") - def links(request, response): + async def links(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) links = [] for link in node.links: @@ -341,11 +341,11 @@ class NodeHandler: 404: "Instance doesn't exist" }, description="Compute the IDLE PC for a Dynamips node") - def auto_idlepc(request, response): + async def auto_idlepc(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) - idle = yield from node.dynamips_auto_idlepc() + idle = await node.dynamips_auto_idlepc() response.json(idle) response.set_status(200) @@ -361,11 +361,11 @@ class NodeHandler: 404: "Instance doesn't exist" }, description="Compute a list of potential idle PC for a node") - def idlepc_proposals(request, response): + async def idlepc_proposals(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) - idle = yield from node.dynamips_idlepc_proposals() + idle = await node.dynamips_idlepc_proposals() response.json(idle) response.set_status(200) @@ -381,11 +381,11 @@ class NodeHandler: 404: "Instance doesn't exist" }, description="Reload a node instance") - def resize_disk(request, response): + async def resize_disk(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) - yield from node.post("/resize_disk", request.json) + await node.post("/resize_disk", request.json) response.set_status(201) @Route.get( @@ -400,9 +400,9 @@ class NodeHandler: 404: "Instance doesn't exist" }, description="Get a file in the node directory") - def get_file(request, response): + async def get_file(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) path = request.match_info["path"] path = force_unix_path(path) @@ -415,14 +415,14 @@ class NodeHandler: node_type = node.node_type path = "/project-files/{}/{}/{}".format(node_type, node.id, path) - res = yield from node.compute.http_query("GET", "/projects/{project_id}/files{path}".format(project_id=project.id, path=path), timeout=None, raw=True) + res = await node.compute.http_query("GET", "/projects/{project_id}/files{path}".format(project_id=project.id, path=path), timeout=None, raw=True) response.set_status(200) response.content_type = "application/octet-stream" response.enable_chunked_encoding() - yield from response.prepare(request) + await response.prepare(request) response.write(res.body) - yield from response.write_eof() + await response.write_eof() @Route.post( r"/projects/{project_id}/nodes/{node_id}/files/{path:.+}", @@ -437,9 +437,9 @@ class NodeHandler: }, raw=True, description="Write a file in the node directory") - def post_file(request, response): + async def post_file(request, response): - project = yield from Controller.instance().get_loaded_project(request.match_info["project_id"]) + project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) node = project.get_node(request.match_info["node_id"]) path = request.match_info["path"] path = force_unix_path(path) @@ -450,6 +450,6 @@ class NodeHandler: node_type = node.node_type path = "/project-files/{}/{}/{}".format(node_type, node.id, path) - data = yield from request.content.read() #FIXME: are we handling timeout or large files correctly? - yield from node.compute.http_query("POST", "/projects/{project_id}/files{path}".format(project_id=project.id, path=path), data=data, timeout=None, raw=True) + data = await request.content.read() #FIXME: are we handling timeout or large files correctly? + await node.compute.http_query("POST", "/projects/{project_id}/files{path}".format(project_id=project.id, path=path), data=data, timeout=None, raw=True) response.set_status(201) diff --git a/gns3server/handlers/api/controller/notification_handler.py b/gns3server/handlers/api/controller/notification_handler.py index be64a9f3..7e7fd320 100644 --- a/gns3server/handlers/api/controller/notification_handler.py +++ b/gns3server/handlers/api/controller/notification_handler.py @@ -20,16 +20,14 @@ import aiohttp from aiohttp.web import WebSocketResponse from gns3server.web.route import Route from gns3server.controller import Controller -from gns3server.utils.asyncio import asyncio_ensure_future -@asyncio.coroutine -def process_websocket(ws): +async def process_websocket(ws): """ Process ping / pong and close message """ try: - yield from ws.receive() + await ws.receive() except aiohttp.WSServerHandshakeError: pass @@ -42,22 +40,22 @@ class NotificationHandler: status_codes={ 200: "End of stream" }) - def notification(request, response): + async def notification(request, response): controller = Controller.instance() response.content_type = "application/json" response.set_status(200) response.enable_chunked_encoding() - yield from response.prepare(request) + await response.prepare(request) with controller.notification.controller_queue() as queue: while True: try: - msg = yield from queue.get_json(5) + msg = await queue.get_json(5) response.write(("{}\n".format(msg)).encode("utf-8")) except asyncio.futures.CancelledError: break - yield from response.drain() + await response.drain() @Route.get( r"/notifications/ws", @@ -65,17 +63,17 @@ class NotificationHandler: status_codes={ 200: "End of stream" }) - def notification_ws(request, response): + async def notification_ws(request, response): controller = Controller.instance() ws = aiohttp.web.WebSocketResponse() - yield from ws.prepare(request) + await ws.prepare(request) - asyncio_ensure_future(process_websocket(ws)) + asyncio.ensure_future(process_websocket(ws)) with controller.notification.controller_queue() as queue: while True: try: - notification = yield from queue.get_json(5) + notification = await queue.get_json(5) except asyncio.futures.CancelledError: break if ws.closed: diff --git a/gns3server/handlers/api/controller/project_handler.py b/gns3server/handlers/api/controller/project_handler.py index 45ee9c3d..2e68f288 100644 --- a/gns3server/handlers/api/controller/project_handler.py +++ b/gns3server/handlers/api/controller/project_handler.py @@ -26,7 +26,6 @@ from gns3server.controller import Controller from gns3server.controller.import_project import import_project from gns3server.controller.export_project import export_project from gns3server.config import Config -from gns3server.utils.asyncio import asyncio_ensure_future from gns3server.schemas.project import ( @@ -40,13 +39,12 @@ import logging log = logging.getLogger() -@asyncio.coroutine -def process_websocket(ws): +async def process_websocket(ws): """ Process ping / pong and close message """ try: - yield from ws.receive() + await ws.receive() except aiohttp.WSServerHandshakeError: pass @@ -62,10 +60,10 @@ class ProjectHandler: }, output=PROJECT_OBJECT_SCHEMA, input=PROJECT_CREATE_SCHEMA) - def create_project(request, response): + async def create_project(request, response): controller = Controller.instance() - project = yield from controller.add_project(**request.json) + project = await controller.add_project(**request.json) response.set_status(201) response.json(project) @@ -119,13 +117,13 @@ class ProjectHandler: description="Update a project instance", input=PROJECT_UPDATE_SCHEMA, output=PROJECT_OBJECT_SCHEMA) - def update(request, response): + async def update(request, response): project = Controller.instance().get_project(request.match_info["project_id"]) # Ignore these because we only use them when creating a project request.json.pop("project_id", None) - yield from project.update(**request.json) + await project.update(**request.json) response.set_status(200) response.json(project) @@ -140,11 +138,11 @@ class ProjectHandler: 404: "The project doesn't exist" }, output=PROJECT_OBJECT_SCHEMA) - def close(request, response): + async def close(request, response): controller = Controller.instance() project = controller.get_project(request.match_info["project_id"]) - yield from project.close() + await project.close() response.set_status(201) response.json(project) @@ -159,11 +157,11 @@ class ProjectHandler: 404: "The project doesn't exist" }, output=PROJECT_OBJECT_SCHEMA) - def open(request, response): + async def open(request, response): controller = Controller.instance() project = controller.get_project(request.match_info["project_id"]) - yield from project.open() + await project.open() response.set_status(201) response.json(project) @@ -179,7 +177,7 @@ class ProjectHandler: }, input=PROJECT_LOAD_SCHEMA, output=PROJECT_OBJECT_SCHEMA) - def load(request, response): + async def load(request, response): controller = Controller.instance() config = Config.instance() @@ -187,7 +185,7 @@ class ProjectHandler: log.error("Can't load the project the server is not started with --local") response.set_status(403) return - project = yield from controller.load_project(request.json.get("path"),) + project = await controller.load_project(request.json.get("path"),) response.set_status(201) response.json(project) @@ -201,11 +199,11 @@ class ProjectHandler: 204: "Changes have been written on disk", 404: "The project doesn't exist" }) - def delete(request, response): + async def delete(request, response): controller = Controller.instance() project = controller.get_project(request.match_info["project_id"]) - yield from project.delete() + await project.delete() controller.remove_project(project) response.set_status(204) @@ -219,7 +217,7 @@ class ProjectHandler: 200: "End of stream", 404: "The project doesn't exist" }) - def notification(request, response): + async def notification(request, response): controller = Controller.instance() project = controller.get_project(request.match_info["project_id"]) @@ -228,22 +226,22 @@ class ProjectHandler: response.set_status(200) response.enable_chunked_encoding() - yield from response.prepare(request) + await response.prepare(request) with controller.notification.project_queue(project) as queue: while True: try: - msg = yield from queue.get_json(5) + msg = await queue.get_json(5) response.write(("{}\n".format(msg)).encode("utf-8")) except asyncio.futures.CancelledError as e: break - yield from response.drain() + await response.drain() if project.auto_close: # To avoid trouble with client connecting disconnecting we sleep few seconds before checking # if someone else is not connected - yield from asyncio.sleep(5) + await asyncio.sleep(5) if not controller.notification.project_has_listeners(project): - yield from project.close() + await project.close() @Route.get( r"/projects/{project_id}/notifications/ws", @@ -255,20 +253,20 @@ class ProjectHandler: 200: "End of stream", 404: "The project doesn't exist" }) - def notification_ws(request, response): + async def notification_ws(request, response): controller = Controller.instance() project = controller.get_project(request.match_info["project_id"]) ws = aiohttp.web.WebSocketResponse() - yield from ws.prepare(request) + await ws.prepare(request) - asyncio_ensure_future(process_websocket(ws)) + asyncio.ensure_future(process_websocket(ws)) with controller.notification.project_queue(project) as queue: while True: try: - notification = yield from queue.get_json(5) + notification = await queue.get_json(5) except asyncio.futures.CancelledError as e: break if ws.closed: @@ -278,9 +276,9 @@ class ProjectHandler: if project.auto_close: # To avoid trouble with client connecting disconnecting we sleep few seconds before checking # if someone else is not connected - yield from asyncio.sleep(5) + await asyncio.sleep(5) if not controller.notification.project_has_listeners(project): - yield from project.close() + await project.close() return ws @@ -295,14 +293,14 @@ class ProjectHandler: 200: "File returned", 404: "The project doesn't exist" }) - def export_project(request, response): + async def export_project(request, response): controller = Controller.instance() - project = yield from controller.get_loaded_project(request.match_info["project_id"]) + project = await controller.get_loaded_project(request.match_info["project_id"]) try: with tempfile.TemporaryDirectory() as tmp_dir: - stream = yield from export_project(project, + stream = await export_project(project, tmp_dir, include_images=bool(int(request.query.get("include_images", "0")))) # We need to do that now because export could failed and raise an HTTP error @@ -310,13 +308,13 @@ class ProjectHandler: response.content_type = 'application/gns3project' response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name) response.enable_chunked_encoding() - yield from response.prepare(request) + await response.prepare(request) for data in stream: response.write(data) - yield from response.drain() + await response.drain() - yield from response.write_eof() + await response.write_eof() # Will be raise if you have no space left or permission issue on your temporary directory # RuntimeError: something was wrong during the zip process except (ValueError, OSError, RuntimeError) as e: @@ -334,7 +332,7 @@ class ProjectHandler: 200: "Project imported", 403: "Forbidden to import project" }) - def import_project(request, response): + async def import_project(request, response): controller = Controller.instance() @@ -355,19 +353,19 @@ class ProjectHandler: if sys.version_info >= (3, 7) and sys.version_info < (3, 8): with tempfile.TemporaryFile() as temp: while True: - chunk = yield from request.content.read(1024) + chunk = await request.content.read(1024) if not chunk: break temp.write(chunk) - project = yield from import_project(controller, request.match_info["project_id"], temp, location=path, name=name) + project = await import_project(controller, request.match_info["project_id"], temp, location=path, name=name) else: with tempfile.SpooledTemporaryFile(max_size=10000) as temp: while True: - chunk = yield from request.content.read(1024) + chunk = await request.content.read(1024) if not chunk: break temp.write(chunk) - project = yield from import_project(controller, request.match_info["project_id"], temp, location=path, name=name) + project = await import_project(controller, request.match_info["project_id"], temp, location=path, name=name) except OSError as e: raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e)) @@ -387,10 +385,10 @@ class ProjectHandler: 403: "The server is not the local server", 404: "The project doesn't exist" }) - def duplicate(request, response): + async def duplicate(request, response): controller = Controller.instance() - project = yield from controller.get_loaded_project(request.match_info["project_id"]) + project = await controller.get_loaded_project(request.match_info["project_id"]) if request.json.get("path"): config = Config.instance() @@ -401,7 +399,7 @@ class ProjectHandler: else: location = None - new_project = yield from project.duplicate(name=request.json.get("name"), location=location) + new_project = await project.duplicate(name=request.json.get("name"), location=location) response.json(new_project) response.set_status(201) @@ -417,10 +415,10 @@ class ProjectHandler: 403: "Permission denied", 404: "The file doesn't exist" }) - def get_file(request, response): + async def get_file(request, response): controller = Controller.instance() - project = yield from controller.get_loaded_project(request.match_info["project_id"]) + project = await controller.get_loaded_project(request.match_info["project_id"]) path = request.match_info["path"] path = os.path.normpath(path).strip('/') @@ -435,12 +433,12 @@ class ProjectHandler: try: with open(path, "rb") as f: - yield from response.prepare(request) + await response.prepare(request) while True: data = f.read(4096) if not data: break - yield from response.write(data) + await response.write(data) except FileNotFoundError: raise aiohttp.web.HTTPNotFound() @@ -459,10 +457,10 @@ class ProjectHandler: 403: "Permission denied", 404: "The path doesn't exist" }) - def write_file(request, response): + async def write_file(request, response): controller = Controller.instance() - project = yield from controller.get_loaded_project(request.match_info["project_id"]) + project = await controller.get_loaded_project(request.match_info["project_id"]) path = request.match_info["path"] path = os.path.normpath(path).strip("/") @@ -477,7 +475,7 @@ class ProjectHandler: with open(path, 'wb+') as f: while True: try: - chunk = yield from request.content.read(1024) + chunk = await request.content.read(1024) except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path)) if not chunk: diff --git a/gns3server/handlers/api/controller/server_handler.py b/gns3server/handlers/api/controller/server_handler.py index 3a3d7006..a7422816 100644 --- a/gns3server/handlers/api/controller/server_handler.py +++ b/gns3server/handlers/api/controller/server_handler.py @@ -20,7 +20,6 @@ from gns3server.config import Config from gns3server.controller import Controller from gns3server.schemas.version import VERSION_SCHEMA from gns3server.version import __version__ -from gns3server.utils.asyncio import asyncio_ensure_future from aiohttp.web import HTTPConflict, HTTPForbidden @@ -44,7 +43,7 @@ class ServerHandler: 201: "Server is shutting down", 403: "Server shutdown refused" }) - def shutdown(request, response): + async def shutdown(request, response): config = Config.instance() if config.get_section_config("Server").getboolean("local", False) is False: @@ -58,10 +57,10 @@ class ServerHandler: tasks = [] for project in projects: - tasks.append(asyncio_ensure_future(project.close())) + tasks.append(asyncio.ensure_future(project.close())) if tasks: - done, _ = yield from asyncio.wait(tasks) + done, _ = await asyncio.wait(tasks) for future in done: try: future.result() @@ -73,7 +72,7 @@ class ServerHandler: from gns3server.web.web_server import WebServer server = WebServer.instance() try: - asyncio_ensure_future(server.shutdown_server()) + asyncio.ensure_future(server.shutdown_server()) except asyncio.CancelledError: pass response.set_status(201) @@ -105,7 +104,7 @@ class ServerHandler: @Route.get( r"/settings", description="Retrieve gui settings from the server. Temporary will we removed in later release") - def read_settings(request, response): + async def read_settings(request, response): settings = None while True: @@ -115,7 +114,7 @@ class ServerHandler: if settings is not None: break - yield from asyncio.sleep(0.5) + await asyncio.sleep(0.5) response.json(settings) @Route.post( @@ -142,7 +141,7 @@ class ServerHandler: status_codes={ 201: "Written" }) - def debug(request, response): + async def debug(request, response): config = Config.instance() if config.get_section_config("Server").getboolean("local", False) is False: @@ -170,7 +169,7 @@ class ServerHandler: for compute in list(Controller.instance().computes.values()): try: - r = yield from compute.get("/debug", raw=True) + r = await compute.get("/debug", raw=True) data = r.body.decode("utf-8") except Exception as e: data = str(e) diff --git a/gns3server/handlers/api/controller/snapshot_handler.py b/gns3server/handlers/api/controller/snapshot_handler.py index 81ae8113..513bbe3e 100644 --- a/gns3server/handlers/api/controller/snapshot_handler.py +++ b/gns3server/handlers/api/controller/snapshot_handler.py @@ -43,10 +43,10 @@ class SnapshotHandler: 201: "Snasphot created", 404: "The project doesn't exist" }) - def create(request, response): + async def create(request, response): controller = Controller.instance() project = controller.get_project(request.match_info["project_id"]) - snapshot = yield from project.snapshot(request.json["name"]) + snapshot = await project.snapshot(request.json["name"]) response.json(snapshot) response.set_status(201) @@ -77,11 +77,11 @@ class SnapshotHandler: 204: "Changes have been written on disk", 404: "The project or snapshot doesn't exist" }) - def delete(request, response): + async def delete(request, response): controller = Controller.instance() project = controller.get_project(request.match_info["project_id"]) - yield from project.delete_snapshot(request.match_info["snapshot_id"]) + await project.delete_snapshot(request.match_info["snapshot_id"]) response.set_status(204) @Route.post( @@ -96,11 +96,11 @@ class SnapshotHandler: 201: "The snapshot has been restored", 404: "The project or snapshot doesn't exist" }) - def restore(request, response): + async def restore(request, response): controller = Controller.instance() project = controller.get_project(request.match_info["project_id"]) snapshot = project.get_snapshot(request.match_info["snapshot_id"]) - project = yield from snapshot.restore() + project = await snapshot.restore() response.set_status(201) response.json(project) diff --git a/gns3server/handlers/api/controller/symbol_handler.py b/gns3server/handlers/api/controller/symbol_handler.py index 3d378766..8910e068 100644 --- a/gns3server/handlers/api/controller/symbol_handler.py +++ b/gns3server/handlers/api/controller/symbol_handler.py @@ -47,11 +47,11 @@ class SymbolHandler: status_codes={ 200: "Symbol returned" }) - def raw(request, response): + async def raw(request, response): controller = Controller.instance() try: - yield from response.file(controller.symbols.get_path(request.match_info["symbol_id"])) + await response.file(controller.symbols.get_path(request.match_info["symbol_id"])) except (KeyError, OSError) as e: log.warning("Could not get symbol file: {}".format(e)) response.set_status(404) @@ -63,14 +63,14 @@ class SymbolHandler: 200: "Symbol returned" }, raw=True) - def upload(request, response): + async def upload(request, response): controller = Controller.instance() path = os.path.join(controller.symbols.symbols_path(), os.path.basename(request.match_info["symbol_id"])) try: with open(path, "wb") as f: while True: try: - chunk = yield from request.content.read(1024) + chunk = await request.content.read(1024) except asyncio.TimeoutError: raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to symbol '{}'".format(path)) if not chunk: diff --git a/gns3server/handlers/index_handler.py b/gns3server/handlers/index_handler.py index 8c6a486f..e35c6056 100644 --- a/gns3server/handlers/index_handler.py +++ b/gns3server/handlers/index_handler.py @@ -78,7 +78,7 @@ class IndexHandler: }, raw=True, description="Get static resource") - def webui(request, response): + async def webui(request, response): filename = request.match_info["filename"] filename = os.path.normpath(filename).strip("/") filename = os.path.join('web-ui', filename) @@ -92,7 +92,7 @@ class IndexHandler: if not os.path.exists(static): static = get_static_path(os.path.join('web-ui', 'index.html')) - yield from response.file(static) + await response.file(static) @Route.get( r"/v1/version", diff --git a/gns3server/notification_queue.py b/gns3server/notification_queue.py index 2412d5cb..889e49b9 100644 --- a/gns3server/notification_queue.py +++ b/gns3server/notification_queue.py @@ -30,8 +30,7 @@ class NotificationQueue(asyncio.Queue): super().__init__() self._first = True - @asyncio.coroutine - def get(self, timeout): + async def get(self, timeout): """ When timeout is expire we send a ping notification with server information """ @@ -42,17 +41,16 @@ class NotificationQueue(asyncio.Queue): return ("ping", PingStats.get(), {}) try: - (action, msg, kwargs) = yield from asyncio.wait_for(super().get(), timeout) + (action, msg, kwargs) = await asyncio.wait_for(super().get(), timeout) except asyncio.futures.TimeoutError: return ("ping", PingStats.get(), {}) return (action, msg, kwargs) - @asyncio.coroutine - def get_json(self, timeout): + async def get_json(self, timeout): """ Get a message as a JSON """ - (action, msg, kwargs) = yield from self.get(timeout) + (action, msg, kwargs) = await self.get(timeout) if hasattr(msg, "__json__"): msg = {"action": action, "event": msg.__json__()} else: diff --git a/gns3server/run.py b/gns3server/run.py index c3b4bc27..5caeb4ad 100644 --- a/gns3server/run.py +++ b/gns3server/run.py @@ -223,9 +223,9 @@ def run(): if server_config.getboolean("local"): log.warning("Local mode is enabled. Beware, clients will have full control on your filesystem") - # we only support Python 3 version >= 3.4 - if sys.version_info < (3, 4): - raise SystemExit("Python 3.4 or higher is required") + # we only support Python 3 version >= 3.5 + if sys.version_info < (3, 5, 3): + raise SystemExit("Python 3.5.3 or higher is required") user_log.info("Running with Python {major}.{minor}.{micro} and has PID {pid}".format( major=sys.version_info[0], minor=sys.version_info[1], diff --git a/gns3server/ubridge/hypervisor.py b/gns3server/ubridge/hypervisor.py index 643af129..24e94064 100644 --- a/gns3server/ubridge/hypervisor.py +++ b/gns3server/ubridge/hypervisor.py @@ -129,13 +129,12 @@ class Hypervisor(UBridgeHypervisor): return self._version - @asyncio.coroutine - def _check_ubridge_version(self, env=None): + async def _check_ubridge_version(self, env=None): """ Checks if the ubridge executable version """ try: - output = yield from subprocess_check_output(self._path, "-v", cwd=self._working_dir, env=env) + output = await subprocess_check_output(self._path, "-v", cwd=self._working_dir, env=env) match = re.search("ubridge version ([0-9a-z\.]+)", output) if match: self._version = match.group(1) @@ -152,8 +151,7 @@ class Hypervisor(UBridgeHypervisor): except (OSError, subprocess.SubprocessError) as e: raise UbridgeError("Error while looking for uBridge version: {}".format(e)) - @asyncio.coroutine - def start(self): + async def start(self): """ Starts the uBridge hypervisor process. """ @@ -164,14 +162,14 @@ class Hypervisor(UBridgeHypervisor): system_root = os.path.join(os.path.expandvars("%SystemRoot%"), "System32", "Npcap") if os.path.isdir(system_root): env["PATH"] = system_root + ';' + env["PATH"] - yield from self._check_ubridge_version(env) + await self._check_ubridge_version(env) try: command = self._build_command() log.info("starting ubridge: {}".format(command)) self._stdout_file = os.path.join(self._working_dir, "ubridge.log") log.info("logging to {}".format(self._stdout_file)) with open(self._stdout_file, "w", encoding="utf-8") as fd: - self._process = yield from asyncio.create_subprocess_exec(*command, + self._process = await asyncio.create_subprocess_exec(*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self._working_dir, @@ -194,17 +192,16 @@ class Hypervisor(UBridgeHypervisor): if returncode != 0: self._project.emit("log.error", {"message": "uBridge process has stopped, return code: {}\n{}".format(returncode, self.read_stdout())}) - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops the uBridge hypervisor process. """ if self.is_running(): log.info("Stopping uBridge process PID={}".format(self._process.pid)) - yield from UBridgeHypervisor.stop(self) + await UBridgeHypervisor.stop(self) try: - yield from wait_for_process_termination(self._process, timeout=3) + await wait_for_process_termination(self._process, timeout=3) except asyncio.TimeoutError: if self._process and self._process.returncode is None: log.warning("uBridge process {} is still running... killing it".format(self._process.pid)) diff --git a/gns3server/ubridge/ubridge_hypervisor.py b/gns3server/ubridge/ubridge_hypervisor.py index 026045c4..0bc8884e 100644 --- a/gns3server/ubridge/ubridge_hypervisor.py +++ b/gns3server/ubridge/ubridge_hypervisor.py @@ -50,8 +50,7 @@ class UBridgeHypervisor: self._reader = None self._writer = None - @asyncio.coroutine - def connect(self, timeout=10): + async def connect(self, timeout=10): """ Connects to the hypervisor. """ @@ -69,9 +68,9 @@ class UBridgeHypervisor: connection_success = False last_exception = None while time.time() - begin < timeout: - yield from asyncio.sleep(0.01) + await asyncio.sleep(0.01) try: - self._reader, self._writer = yield from asyncio.open_connection(host, self._port) + self._reader, self._writer = await asyncio.open_connection(host, self._port) except OSError as e: last_exception = e continue @@ -84,7 +83,7 @@ class UBridgeHypervisor: log.info("Connected to uBridge hypervisor on {}:{} after {:.4f} seconds".format(host, self._port, time.time() - begin)) try: - version = yield from self.send("hypervisor version") + version = await self.send("hypervisor version") self._version = version[0].split("-", 1)[0] except IndexError: self._version = "Unknown" @@ -99,42 +98,39 @@ class UBridgeHypervisor: return self._version - @asyncio.coroutine - def close(self): + async def close(self): """ Closes the connection to this hypervisor (but leave it running). """ - yield from self.send("hypervisor close") + await self.send("hypervisor close") self._writer.close() self._reader, self._writer = None - @asyncio.coroutine - def stop(self): + async def stop(self): """ Stops this hypervisor (will no longer run). """ try: # try to properly stop the hypervisor - yield from self.send("hypervisor stop") + await self.send("hypervisor stop") except UbridgeError: pass try: if self._writer is not None: - yield from self._writer.drain() + await self._writer.drain() self._writer.close() except OSError as e: log.debug("Stopping hypervisor {}:{} {}".format(self._host, self._port, e)) self._reader = self._writer = None - @asyncio.coroutine - def reset(self): + async def reset(self): """ Resets this hypervisor (used to get an empty configuration). """ - yield from self.send("hypervisor reset") + await self.send("hypervisor reset") @property def port(self): @@ -177,8 +173,7 @@ class UBridgeHypervisor: self._host = host @locking - @asyncio.coroutine - def send(self, command): + async def send(self, command): """ Sends commands to this hypervisor. @@ -207,7 +202,7 @@ class UBridgeHypervisor: command = command.strip() + '\n' log.debug("sending {}".format(command)) self._writer.write(command.encode()) - yield from self._writer.drain() + await self._writer.drain() except OSError as e: raise UbridgeError("Lost communication with {host}:{port} when sending command '{command}': {error}, uBridge process running: {run}" .format(host=self._host, port=self._port, command=command, error=e, run=self.is_running())) @@ -220,7 +215,7 @@ class UBridgeHypervisor: while True: try: try: - chunk = yield from self._reader.read(1024) + chunk = await self._reader.read(1024) except asyncio.CancelledError: # task has been canceled but continue to read # any remaining data sent by the hypervisor @@ -237,7 +232,7 @@ class UBridgeHypervisor: .format(host=self._host, port=self._port, command=command, run=self.is_running())) else: retries += 1 - yield from asyncio.sleep(0.1) + await asyncio.sleep(0.1) continue retries = 0 buf += chunk.decode("utf-8") diff --git a/gns3server/utils/asyncio/__init__.py b/gns3server/utils/asyncio/__init__.py index 7ed4528d..ea0dc363 100644 --- a/gns3server/utils/asyncio/__init__.py +++ b/gns3server/utils/asyncio/__init__.py @@ -25,8 +25,7 @@ import threading from asyncio.futures import CancelledError -@asyncio.coroutine -def wait_run_in_executor(func, *args, **kwargs): +async def wait_run_in_executor(func, *args, **kwargs): """ Run blocking code in a different thread and wait for the result. @@ -39,12 +38,11 @@ def wait_run_in_executor(func, *args, **kwargs): loop = asyncio.get_event_loop() future = loop.run_in_executor(None, functools.partial(func, *args, **kwargs)) - yield from asyncio.wait([future]) + await asyncio.wait([future]) return future.result() -@asyncio.coroutine -def cancellable_wait_run_in_executor(func, *args, **kwargs): +async def cancellable_wait_run_in_executor(func, *args, **kwargs): """ Run blocking code in a different thread and wait for the result. Support cancellation. @@ -57,13 +55,12 @@ def cancellable_wait_run_in_executor(func, *args, **kwargs): stopped_event = threading.Event() kwargs['stopped_event'] = stopped_event try: - yield from wait_run_in_executor(func, *args, **kwargs) + await wait_run_in_executor(func, *args, **kwargs) except CancelledError: stopped_event.set() -@asyncio.coroutine -def subprocess_check_output(*args, cwd=None, env=None, stderr=False): +async def subprocess_check_output(*args, cwd=None, env=None, stderr=False): """ Run a command and capture output @@ -75,11 +72,11 @@ def subprocess_check_output(*args, cwd=None, env=None, stderr=False): """ if stderr: - proc = yield from asyncio.create_subprocess_exec(*args, stderr=asyncio.subprocess.PIPE, cwd=cwd, env=env) - output = yield from proc.stderr.read() + proc = await asyncio.create_subprocess_exec(*args, stderr=asyncio.subprocess.PIPE, cwd=cwd, env=env) + output = await proc.stderr.read() else: - proc = yield from asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE, cwd=cwd, env=env) - output = yield from proc.stdout.read() + proc = await asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE, cwd=cwd, env=env) + output = await proc.stdout.read() if output is None: return "" # If we received garbage we ignore invalid characters @@ -87,14 +84,13 @@ def subprocess_check_output(*args, cwd=None, env=None, stderr=False): # and the code of VPCS, dynamips... Will detect it's not the correct binary return output.decode("utf-8", errors="ignore") -@asyncio.coroutine -def wait_for_process_termination(process, timeout=10): +async def wait_for_process_termination(process, timeout=10): """ Wait for a process terminate, and raise asyncio.TimeoutError in case of timeout. In theory this can be implemented by just: - yield from asyncio.wait_for(self._iou_process.wait(), timeout=100) + await asyncio.wait_for(self._iou_process.wait(), timeout=100) But it's broken before Python 3.4: http://bugs.python.org/issue23140 @@ -105,24 +101,23 @@ def wait_for_process_termination(process, timeout=10): if sys.version_info >= (3, 5): try: - yield from asyncio.wait_for(process.wait(), timeout=timeout) + await asyncio.wait_for(process.wait(), timeout=timeout) except ProcessLookupError: return else: while timeout > 0: if process.returncode is not None: return - yield from asyncio.sleep(0.1) + await asyncio.sleep(0.1) timeout -= 0.1 raise asyncio.TimeoutError() -@asyncio.coroutine -def _check_process(process, termination_callback): +async def _check_process(process, termination_callback): if not hasattr(sys, "_called_from_test") or not sys._called_from_test: - returncode = yield from process.wait() + returncode = await process.wait() if asyncio.iscoroutinefunction(termination_callback): - yield from termination_callback(returncode) + await termination_callback(returncode) else: termination_callback(returncode) @@ -130,22 +125,20 @@ def _check_process(process, termination_callback): def monitor_process(process, termination_callback): """Call termination_callback when a process dies""" - asyncio_ensure_future(_check_process(process, termination_callback)) + asyncio.ensure_future(_check_process(process, termination_callback)) -@asyncio.coroutine -def wait_for_file_creation(path, timeout=10): +async def wait_for_file_creation(path, timeout=10): while timeout > 0: if os.path.exists(path): return - yield from asyncio.sleep(0.5) + await asyncio.sleep(0.5) timeout -= 0.5 raise asyncio.TimeoutError() -@asyncio.coroutine -def wait_for_named_pipe_creation(pipe_path, timeout=60): +async def wait_for_named_pipe_creation(pipe_path, timeout=60): import win32pipe import pywintypes @@ -154,38 +147,20 @@ def wait_for_named_pipe_creation(pipe_path, timeout=60): try: win32pipe.WaitNamedPipe(pipe_path, 1) except pywintypes.error: - yield from asyncio.sleep(0.5) + await asyncio.sleep(0.5) timeout -= 0.5 else: return raise asyncio.TimeoutError() -#FIXME: Use the following wrapper when we drop Python 3.4 and use the async def syntax -# def locking(f): -# -# @wraps(f) -# async def wrapper(oself, *args, **kwargs): -# lock_name = "__" + f.__name__ + "_lock" -# if not hasattr(oself, lock_name): -# setattr(oself, lock_name, asyncio.Lock()) -# async with getattr(oself, lock_name): -# return await f(oself, *args, **kwargs) -# return wrapper def locking(f): @functools.wraps(f) - def wrapper(oself, *args, **kwargs): + async def wrapper(oself, *args, **kwargs): lock_name = "__" + f.__name__ + "_lock" if not hasattr(oself, lock_name): setattr(oself, lock_name, asyncio.Lock()) - with (yield from getattr(oself, lock_name)): - return (yield from f(oself, *args, **kwargs)) + async with getattr(oself, lock_name): + return await f(oself, *args, **kwargs) return wrapper - -#FIXME: conservative approach to supported versions, please remove it when we drop the support to Python < 3.4.4 -try: - from asyncio import ensure_future - asyncio_ensure_future = asyncio.ensure_future -except ImportError: - asyncio_ensure_future = getattr(asyncio, 'async') diff --git a/gns3server/utils/asyncio/embed_shell.py b/gns3server/utils/asyncio/embed_shell.py index 4797c6f2..156e6979 100644 --- a/gns3server/utils/asyncio/embed_shell.py +++ b/gns3server/utils/asyncio/embed_shell.py @@ -81,8 +81,7 @@ class EmbedShell: def welcome_message(self, welcome_message): self._welcome_message = welcome_message - @asyncio.coroutine - def help(self, *args): + async def help(self, *args): """ Show help """ @@ -105,8 +104,7 @@ class EmbedShell: res += '\nhelp command for details about a command\n' return res - @asyncio.coroutine - def _parse_command(self, text): + async def _parse_command(self, text): cmd = text.split(' ') found = False if cmd[0] == '?': @@ -119,22 +117,21 @@ class EmbedShell: for (name, meth) in inspect.getmembers(self): if name == cmd[0]: cmd.pop(0) - res = yield from meth(*cmd) + res = await meth(*cmd) found = True break if not found: - res = ('Command not found {}\n'.format(cmd[0]) + (yield from self.help())) + res = ('Command not found {}\n'.format(cmd[0]) + (await self.help())) return res - @asyncio.coroutine - def run(self): + async def run(self): if self._welcome_message: self._writer.feed_data(self._welcome_message.encode()) while True: self._writer.feed_data(self._prompt.encode()) - result = yield from self._reader.readline() + result = await self._reader.readline() result = result.decode().strip('\n') - res = yield from self._parse_command(result) + res = await self._parse_command(result) self._writer.feed_data(res.encode()) def get_commands(self): @@ -208,8 +205,7 @@ class ShellConnection(TelnetConnection): self.encoding = 'utf-8' - @asyncio.coroutine - def connected(self): + async def connected(self): # prompt_toolkit internally checks if it's on windows during output rendering but # we need to force that we use Vt100_Output not Win32_Output from prompt_toolkit import renderer @@ -235,16 +231,14 @@ class ShellConnection(TelnetConnection): self._cli._redraw() - @asyncio.coroutine - def disconnected(self): + async def disconnected(self): pass def window_size_changed(self, columns, rows): self._size = Size(rows=rows, columns=columns) self._cb.terminal_size_changed() - @asyncio.coroutine - def feed(self, data): + async def feed(self, data): data = data.decode() self._inputstream.feed(data) self._cli._redraw() @@ -260,7 +254,7 @@ class ShellConnection(TelnetConnection): command = returned_value.text - res = yield from self._shell._parse_command(command) + res = await self._shell._parse_command(command) self.send(res.encode()) self.reset() @@ -305,20 +299,18 @@ def create_stdin_shell(shell, loop=None): :param loop: The event loop :returns: Telnet server """ - @asyncio.coroutine - def feed_stdin(loop, reader, shell): + async def feed_stdin(loop, reader, shell): history = InMemoryHistory() completer = WordCompleter([name for name, _ in shell.get_commands()], ignore_case=True) while True: - line = yield from prompt( + line = await prompt( ">", patch_stdout=True, return_asyncio_coroutine=True, history=history, completer=completer) line += '\n' reader.feed_data(line.encode()) - @asyncio.coroutine - def read_stdout(writer): + async def read_stdout(writer): while True: - c = yield from writer.read(1) + c = await writer.read(1) print(c.decode(), end='') sys.stdout.flush() @@ -339,22 +331,20 @@ if __name__ == '__main__': class Demo(EmbedShell): - @asyncio.coroutine - def hello(self, *args): + async def hello(self, *args): """ Hello world This command accept arguments: hello tutu will display tutu """ - @asyncio.coroutine - def world(): - yield from asyncio.sleep(2) + async def world(): + await asyncio.sleep(2) if len(args): return ' '.join(args) else: return 'world\n' - return (yield from world()) + return (await world()) # Demo using telnet shell = Demo(welcome_message="Welcome!\n") diff --git a/gns3server/utils/asyncio/pool.py b/gns3server/utils/asyncio/pool.py index eb03365f..0015e29c 100644 --- a/gns3server/utils/asyncio/pool.py +++ b/gns3server/utils/asyncio/pool.py @@ -30,8 +30,7 @@ class Pool(): def append(self, task, *args, **kwargs): self._tasks.append((task, args, kwargs)) - @asyncio.coroutine - def join(self): + async def join(self): """ Wait for all task to finish """ @@ -41,7 +40,7 @@ class Pool(): while len(self._tasks) > 0 and len(pending) < self._concurrency: task, args, kwargs = self._tasks.pop(0) pending.add(task(*args, **kwargs)) - (done, pending) = yield from asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) + (done, pending) = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) for task in done: if task.exception(): exceptions.add(task.exception()) @@ -50,10 +49,9 @@ class Pool(): def main(): - @asyncio.coroutine - def task(id): + async def task(id): print("Run", id) - yield from asyncio.sleep(0.5) + await asyncio.sleep(0.5) pool = Pool(concurrency=5) for i in range(1, 20): diff --git a/gns3server/utils/asyncio/raw_command_server.py b/gns3server/utils/asyncio/raw_command_server.py index 1a50ed0b..a0533dd9 100644 --- a/gns3server/utils/asyncio/raw_command_server.py +++ b/gns3server/utils/asyncio/raw_command_server.py @@ -20,8 +20,6 @@ import copy import asyncio import asyncio.subprocess -from gns3server.utils.asyncio import asyncio_ensure_future - import logging log = logging.getLogger(__name__) @@ -44,24 +42,22 @@ class AsyncioRawCommandServer: # We limit number of process self._lock = asyncio.Semaphore(value=4) - @asyncio.coroutine - def run(self, network_reader, network_writer): - yield from self._lock.acquire() - process = yield from asyncio.subprocess.create_subprocess_exec(*self._command, + async def run(self, network_reader, network_writer): + await self._lock.acquire() + process = await asyncio.subprocess.create_subprocess_exec(*self._command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, stdin=asyncio.subprocess.PIPE) try: - yield from self._process(network_reader, network_writer, process.stdout, process.stdin) + await self._process(network_reader, network_writer, process.stdout, process.stdin) except ConnectionResetError: network_writer.close() if process.returncode is None: process.kill() - yield from process.wait() + await process.wait() self._lock.release() - @asyncio.coroutine - def _process(self, network_reader, network_writer, process_reader, process_writer): + async def _process(self, network_reader, network_writer, process_reader, process_writer): replaces = [] # Server host from the client point of view host = network_writer.transport.get_extra_info("sockname")[0] @@ -71,12 +67,12 @@ class AsyncioRawCommandServer: else: replaces.append((replace[0], replace[1], )) - network_read = asyncio_ensure_future(network_reader.read(READ_SIZE)) - reader_read = asyncio_ensure_future(process_reader.read(READ_SIZE)) + network_read = asyncio.ensure_future(network_reader.read(READ_SIZE)) + reader_read = asyncio.ensure_future(process_reader.read(READ_SIZE)) timeout = 30 while True: - done, pending = yield from asyncio.wait( + done, pending = await asyncio.wait( [ network_read, reader_read @@ -91,22 +87,22 @@ class AsyncioRawCommandServer: if network_reader.at_eof(): raise ConnectionResetError() - network_read = asyncio_ensure_future(network_reader.read(READ_SIZE)) + network_read = asyncio.ensure_future(network_reader.read(READ_SIZE)) process_writer.write(data) - yield from process_writer.drain() + await process_writer.drain() elif coro == reader_read: if process_reader.at_eof(): raise ConnectionResetError() - reader_read = asyncio_ensure_future(process_reader.read(READ_SIZE)) + reader_read = asyncio.ensure_future(process_reader.read(READ_SIZE)) for replace in replaces: data = data.replace(replace[0], replace[1]) timeout = 2 # We reduce the timeout when the process start to return stuff to avoid problem with server not closing the connection network_writer.write(data) - yield from network_writer.drain() + await network_writer.drain() if __name__ == '__main__': diff --git a/gns3server/utils/asyncio/serial.py b/gns3server/utils/asyncio/serial.py index c118a87e..765fda29 100644 --- a/gns3server/utils/asyncio/serial.py +++ b/gns3server/utils/asyncio/serial.py @@ -47,8 +47,7 @@ class SerialReaderWriterProtocol(asyncio.Protocol): if self.transport: self.transport.write(data) - @asyncio.coroutine - def drain(self): + async def drain(self): pass def connection_made(self, transport): @@ -72,13 +71,12 @@ class WindowsPipe: self._handle = open(path, "a+b") self._pipe = msvcrt.get_osfhandle(self._handle.fileno()) - @asyncio.coroutine - def read(self, n=-1): + async def read(self, n=-1): (read, num_avail, num_message) = win32pipe.PeekNamedPipe(self._pipe, 0) if num_avail > 0: (error_code, output) = win32file.ReadFile(self._pipe, num_avail, None) return output - yield from asyncio.sleep(0.01) + await asyncio.sleep(0.01) return b"" def at_eof(self): @@ -87,16 +85,14 @@ class WindowsPipe: def write(self, data): win32file.WriteFile(self._pipe, data) - @asyncio.coroutine - def drain(self): + async def drain(self): return def close(self): pass -@asyncio.coroutine -def _asyncio_open_serial_windows(path): +async def _asyncio_open_serial_windows(path): """ Open a windows named pipe @@ -104,14 +100,13 @@ def _asyncio_open_serial_windows(path): """ try: - yield from wait_for_named_pipe_creation(path) + await wait_for_named_pipe_creation(path) except asyncio.TimeoutError: raise NodeError('Pipe file "{}" is missing'.format(path)) return WindowsPipe(path) -@asyncio.coroutine -def _asyncio_open_serial_unix(path): +async def _asyncio_open_serial_unix(path): """ Open a unix socket or a windows named pipe @@ -120,20 +115,19 @@ def _asyncio_open_serial_unix(path): try: # wait for VM to create the pipe file. - yield from wait_for_file_creation(path) + await wait_for_file_creation(path) except asyncio.TimeoutError: raise NodeError('Pipe file "{}" is missing'.format(path)) output = SerialReaderWriterProtocol() try: - yield from asyncio.get_event_loop().create_unix_connection(lambda: output, path) + await asyncio.get_event_loop().create_unix_connection(lambda: output, path) except ConnectionRefusedError: raise NodeError('Can\'t open pipe file "{}"'.format(path)) return output -@asyncio.coroutine -def asyncio_open_serial(path): +async def asyncio_open_serial(path): """ Open a unix socket or a windows named pipe @@ -141,6 +135,6 @@ def asyncio_open_serial(path): """ if sys.platform.startswith("win"): - return (yield from _asyncio_open_serial_windows(path)) + return (await _asyncio_open_serial_windows(path)) else: - return (yield from _asyncio_open_serial_unix(path)) + return (await _asyncio_open_serial_unix(path)) diff --git a/gns3server/utils/asyncio/telnet_server.py b/gns3server/utils/asyncio/telnet_server.py index 9223fc16..a2f5e436 100644 --- a/gns3server/utils/asyncio/telnet_server.py +++ b/gns3server/utils/asyncio/telnet_server.py @@ -15,13 +15,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import re import asyncio import asyncio.subprocess import struct -from gns3server.utils.asyncio import asyncio_ensure_future - import logging log = logging.getLogger(__name__) @@ -75,13 +72,11 @@ class TelnetConnection(object): def writer(self): return self._writer - @asyncio.coroutine - def connected(self): + async def connected(self): """Method called when client is connected""" pass - @asyncio.coroutine - def disconnected(self): + async def disconnected(self): """Method called when client is disconnecting""" pass @@ -90,8 +85,7 @@ class TelnetConnection(object): `naws` flag is enable in server configuration.""" pass - @asyncio.coroutine - def feed(self, data): + async def feed(self, data): """ Handles incoming data :return: @@ -148,8 +142,7 @@ class AsyncioTelnetServer: self._connection_factory = connection_factory @staticmethod - @asyncio.coroutine - def write_client_intro(writer, echo=False): + async def write_client_intro(writer, echo=False): # Send initial telnet session opening if echo: writer.write(bytes([IAC, WILL, ECHO])) @@ -157,10 +150,9 @@ class AsyncioTelnetServer: writer.write(bytes([ IAC, WONT, ECHO, IAC, DONT, ECHO])) - yield from writer.drain() + await writer.drain() - @asyncio.coroutine - def _write_intro(self, writer, binary=False, echo=False, naws=False): + async def _write_intro(self, writer, binary=False, echo=False, naws=False): # Send initial telnet session opening if echo: writer.write(bytes([IAC, WILL, ECHO])) @@ -185,20 +177,19 @@ class AsyncioTelnetServer: writer.write(bytes([ IAC, DO, NAWS ])) - yield from writer.drain() + await writer.drain() - @asyncio.coroutine - def run(self, network_reader, network_writer): + async def run(self, network_reader, network_writer): # Keep track of connected clients connection = self._connection_factory(network_reader, network_writer) self._connections[network_writer] = connection try: - yield from self._write_intro(network_writer, echo=self._echo, binary=self._binary, naws=self._naws) - yield from connection.connected() - yield from self._process(network_reader, network_writer, connection) + await self._write_intro(network_writer, echo=self._echo, binary=self._binary, naws=self._naws) + await connection.connected() + await self._process(network_reader, network_writer, connection) except ConnectionError: - with (yield from self._lock): + async with self._lock: network_writer.close() if self._reader_process == network_reader: self._reader_process = None @@ -206,53 +197,49 @@ class AsyncioTelnetServer: if self._current_read is not None: self._current_read.cancel() - yield from connection.disconnected() + await connection.disconnected() del self._connections[network_writer] - @asyncio.coroutine - def close(self): + async def close(self): for writer, connection in self._connections.items(): try: writer.write_eof() - yield from writer.drain() + await writer.drain() except (AttributeError, ConnectionError): continue - @asyncio.coroutine - def client_connected_hook(self): + async def client_connected_hook(self): pass - @asyncio.coroutine - def _get_reader(self, network_reader): + async def _get_reader(self, network_reader): """ Get a reader or None if another reader is already reading. """ - with (yield from self._lock): + async with self._lock: if self._reader_process is None: self._reader_process = network_reader if self._reader: if self._reader_process == network_reader: - self._current_read = asyncio_ensure_future(self._reader.read(READ_SIZE)) + self._current_read = asyncio.ensure_future(self._reader.read(READ_SIZE)) return self._current_read return None - @asyncio.coroutine - def _process(self, network_reader, network_writer, connection): - network_read = asyncio_ensure_future(network_reader.read(READ_SIZE)) - reader_read = yield from self._get_reader(network_reader) + async def _process(self, network_reader, network_writer, connection): + network_read = asyncio.ensure_future(network_reader.read(READ_SIZE)) + reader_read = await self._get_reader(network_reader) while True: if reader_read is None: - reader_read = yield from self._get_reader(network_reader) + reader_read = await self._get_reader(network_reader) if reader_read is None: - done, pending = yield from asyncio.wait( + done, pending = await asyncio.wait( [ network_read, ], timeout=1, return_when=asyncio.FIRST_COMPLETED) else: - done, pending = yield from asyncio.wait( + done, pending = await asyncio.wait( [ network_read, reader_read @@ -264,10 +251,10 @@ class AsyncioTelnetServer: if network_reader.at_eof(): raise ConnectionResetError() - network_read = asyncio_ensure_future(network_reader.read(READ_SIZE)) + network_read = asyncio.ensure_future(network_reader.read(READ_SIZE)) if IAC in data: - data = yield from self._IAC_parser(data, network_reader, network_writer, connection) + data = await self._IAC_parser(data, network_reader, network_writer, connection) if len(data) == 0: continue @@ -277,9 +264,9 @@ class AsyncioTelnetServer: if self._writer: self._writer.write(data) - yield from self._writer.drain() + await self._writer.drain() - yield from connection.feed(data) + await connection.feed(data) if connection.is_closing: raise ConnectionResetError() @@ -287,22 +274,21 @@ class AsyncioTelnetServer: if self._reader and self._reader.at_eof(): raise ConnectionResetError() - reader_read = yield from self._get_reader(network_reader) + reader_read = await self._get_reader(network_reader) # Replicate the output on all clients for connection in self._connections.values(): connection.writer.write(data) - yield from connection.writer.drain() + await connection.writer.drain() - @asyncio.coroutine - def _read(self, cmd, buffer, location, reader): + async def _read(self, cmd, buffer, location, reader): """ Reads next op from the buffer or reader""" try: op = buffer[location] cmd.append(op) return op except IndexError: - op = yield from reader.read(1) + op = await reader.read(1) buffer.extend(op) cmd.append(buffer[location]) return op @@ -320,8 +306,7 @@ class AsyncioTelnetServer: else: log.debug("Not supported negotiation sequence, received {} bytes", len(data)) - @asyncio.coroutine - def _IAC_parser(self, buf, network_reader, network_writer, connection): + async def _IAC_parser(self, buf, network_reader, network_writer, connection): """ Processes and removes any Telnet commands from the buffer. @@ -342,7 +327,7 @@ class AsyncioTelnetServer: try: iac_cmd.append(buf[iac_loc + 1]) except IndexError: - d = yield from network_reader.read(1) + d = await network_reader.read(1) buf.extend(d) iac_cmd.append(buf[iac_loc + 1]) @@ -366,7 +351,7 @@ class AsyncioTelnetServer: elif iac_cmd[1] == SB: # starts negotiation commands negotiation = [] for pos in range(2, self.MAX_NEGOTIATION_READ): - op = yield from self._read(iac_cmd, buf, iac_loc + pos, network_reader) + op = await self._read(iac_cmd, buf, iac_loc + pos, network_reader) negotiation.append(op) if op == SE: # ends negotiation commands @@ -380,7 +365,7 @@ class AsyncioTelnetServer: try: iac_cmd.append(buf[iac_loc + 2]) except IndexError: - d = yield from network_reader.read(1) + d = await network_reader.read(1) buf.extend(d) iac_cmd.append(buf[iac_loc + 2]) # We do ECHO, SGA, and BINARY. Period. @@ -413,7 +398,7 @@ class AsyncioTelnetServer: # Remove the entire TELNET command from the buffer buf = buf.replace(iac_cmd, b'', 1) - yield from network_writer.drain() + await network_writer.drain() # Return the new copy of the buffer, minus telnet commands return buf @@ -422,7 +407,7 @@ if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) loop = asyncio.get_event_loop() - process = loop.run_until_complete(asyncio_ensure_future(asyncio.subprocess.create_subprocess_exec("/bin/sh", "-i", + process = loop.run_until_complete(asyncio.ensure_future(asyncio.subprocess.create_subprocess_exec("/bin/sh", "-i", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, stdin=asyncio.subprocess.PIPE))) diff --git a/gns3server/version.py b/gns3server/version.py index 6112a684..60184f10 100644 --- a/gns3server/version.py +++ b/gns3server/version.py @@ -23,7 +23,7 @@ # or negative for a release candidate or beta (after the base version # number has been incremented) -__version__ = "2.2.0dev3" +__version__ = "2.2.0dev4" __version_info__ = (2, 2, 0, 99) # If it's a git checkout try to add the commit diff --git a/gns3server/web/response.py b/gns3server/web/response.py index 0367c60b..9a13ab87 100644 --- a/gns3server/web/response.py +++ b/gns3server/web/response.py @@ -50,8 +50,7 @@ class Response(aiohttp.web.Response): self.content_length = None super().enable_chunked_encoding() - @asyncio.coroutine - def prepare(self, request): + async def prepare(self, request): if log.getEffectiveLevel() == logging.DEBUG: log.info("%s %s", request.method, request.path_qs) log.debug("%s", dict(request.headers)) @@ -61,7 +60,7 @@ class Response(aiohttp.web.Response): log.debug(dict(self.headers)) if hasattr(self, 'body') and self.body is not None and self.headers["CONTENT-TYPE"] == "application/json": log.debug(json.loads(self.body.decode('utf-8'))) - return (yield from super().prepare(request)) + return (await super().prepare(request)) def html(self, answer): """ @@ -112,8 +111,7 @@ class Response(aiohttp.web.Response): raise aiohttp.web.HTTPBadRequest(text="{}".format(e)) self.body = json.dumps(answer, indent=4, sort_keys=True).encode('utf-8') - @asyncio.coroutine - def file(self, path, status=200, set_content_length=True): + async def file(self, path, status=200, set_content_length=True): """ Return a file as a response """ @@ -138,14 +136,14 @@ class Response(aiohttp.web.Response): try: with open(path, 'rb') as fobj: - yield from self.prepare(self._request) + await self.prepare(self._request) while True: data = fobj.read(4096) if not data: break - yield from self.write(data) - yield from self.drain() + await self.write(data) + await self.drain() except FileNotFoundError: raise aiohttp.web.HTTPNotFound() diff --git a/gns3server/web/route.py b/gns3server/web/route.py index 2169c71e..4f36932a 100644 --- a/gns3server/web/route.py +++ b/gns3server/web/route.py @@ -36,13 +36,12 @@ from ..crash_report import CrashReport from ..config import Config -@asyncio.coroutine -def parse_request(request, input_schema, raw): +async def parse_request(request, input_schema, raw): """Parse body of request and raise HTTP errors in case of problems""" request.json = {} if not raw: - body = yield from request.read() + body = await request.read() if body: try: request.json = json.loads(body.decode('utf-8')) @@ -162,8 +161,7 @@ class Route(object): func = asyncio.coroutine(func) - @asyncio.coroutine - def control_schema(request): + async def control_schema(request): # This block is executed at each method call server_config = Config.instance().get_section_config("Server") @@ -177,12 +175,12 @@ class Route(object): if api_version is None or raw is True: response = Response(request=request, route=route, output_schema=output_schema) - request = yield from parse_request(request, None, raw) - yield from func(request, response) + request = await parse_request(request, None, raw) + await func(request, response) return response # API call - request = yield from parse_request(request, input_schema, raw) + request = await parse_request(request, input_schema, raw) record_file = server_config.get("record") if record_file: try: @@ -192,7 +190,7 @@ class Route(object): except OSError as e: log.warning("Could not write to the record file {}: {}".format(record_file, e)) response = Response(request=request, route=route, output_schema=output_schema) - yield from func(request, response) + await func(request, response) except aiohttp.web.HTTPBadRequest as e: response = Response(request=request, route=route) response.set_status(e.status) @@ -246,8 +244,7 @@ class Route(object): return response - @asyncio.coroutine - def node_concurrency(request): + async def node_concurrency(request): """ To avoid strange effect we prevent concurrency between the same instance of the node @@ -264,15 +261,15 @@ class Route(object): cls._node_locks.setdefault(lock_key, {"lock": asyncio.Lock(), "concurrency": 0}) cls._node_locks[lock_key]["concurrency"] += 1 - with (yield from cls._node_locks[lock_key]["lock"]): - response = yield from control_schema(request) + async with cls._node_locks[lock_key]["lock"]: + response = await control_schema(request) cls._node_locks[lock_key]["concurrency"] -= 1 # No more waiting requests, garbage collect the lock if cls._node_locks[lock_key]["concurrency"] <= 0: del cls._node_locks[lock_key] else: - response = yield from control_schema(request) + response = await control_schema(request) return response cls._routes.append((method, route, node_concurrency)) diff --git a/gns3server/web/web_server.py b/gns3server/web/web_server.py index 40aadd25..76f39660 100644 --- a/gns3server/web/web_server.py +++ b/gns3server/web/web_server.py @@ -39,7 +39,6 @@ from ..compute import MODULES from ..compute.port_manager import PortManager from ..compute.qemu import Qemu from ..controller import Controller -from ..utils.asyncio import asyncio_ensure_future from gns3server.utils.static import get_static_dir @@ -90,8 +89,7 @@ class WebServer: return False return True - @asyncio.coroutine - def shutdown_server(self): + async def shutdown_server(self): """ Cleanly shutdown the server. """ @@ -104,25 +102,25 @@ class WebServer: if self._server: self._server.close() - yield from self._server.wait_closed() + await self._server.wait_closed() if self._app: - yield from self._app.shutdown() + await self._app.shutdown() if self._handler: try: # aiohttp < 2.3 - yield from self._handler.finish_connections(2) # Parameter is timeout + await self._handler.finish_connections(2) # Parameter is timeout except AttributeError: # aiohttp >= 2.3 - yield from self._handler.shutdown(2) # Parameter is timeout + await self._handler.shutdown(2) # Parameter is timeout if self._app: - yield from self._app.cleanup() + await self._app.cleanup() - yield from Controller.instance().stop() + await Controller.instance().stop() for module in MODULES: log.debug("Unloading module {}".format(module.__name__)) m = module.instance() - yield from m.unload() + await m.unload() if PortManager.instance().tcp_ports: log.warning("TCP ports are still used {}".format(PortManager.instance().tcp_ports)) @@ -133,7 +131,7 @@ class WebServer: for task in asyncio.Task.all_tasks(): task.cancel() try: - yield from asyncio.wait_for(task, 1) + await asyncio.wait_for(task, 1) except BaseException: pass @@ -144,7 +142,7 @@ class WebServer: def signal_handler(signame, *args): log.warning("Server has got signal {}, exiting...".format(signame)) try: - asyncio_ensure_future(self.shutdown_server()) + asyncio.ensure_future(self.shutdown_server()) except asyncio.CancelledError: pass @@ -179,14 +177,13 @@ class WebServer: log.info("SSL is enabled") return ssl_context - @asyncio.coroutine - def start_shell(self): + async def start_shell(self): try: from ptpython.repl import embed except ImportError: log.error("Unable to start a shell: the ptpython module must be installed!") return - yield from embed(globals(), locals(), return_asyncio_coroutine=True, patch_stdout=True, history_filename=".gns3_shell_history") + await embed(globals(), locals(), return_asyncio_coroutine=True, patch_stdout=True, history_filename=".gns3_shell_history") def _exit_handling(self): """ @@ -204,16 +201,15 @@ class WebServer: atexit.register(close_asyncio_loop) - @asyncio.coroutine - def _on_startup(self, *args): + async def _on_startup(self, *args): """ Called when the HTTP server start """ - yield from Controller.instance().start() + await Controller.instance().start() # Because with a large image collection # without md5sum already computed we start the # computing with server start - asyncio_ensure_future(Qemu.instance().list_images()) + asyncio.ensure_future(Qemu.instance().list_images()) def run(self): """ @@ -251,7 +247,7 @@ class WebServer: if log.getEffectiveLevel() == logging.DEBUG: # On debug version we enable info that - # coroutine is not called in a way await/yield from + # coroutine is not called in a way await/await self._loop.set_debug(True) for key, val in os.environ.items(): @@ -300,7 +296,7 @@ class WebServer: self._exit_handling() if server_config.getboolean("shell"): - asyncio_ensure_future(self.start_shell()) + asyncio.ensure_future(self.start_shell()) try: self._loop.run_forever() diff --git a/setup.py b/setup.py index 46f9b2cb..bdda2ff6 100644 --- a/setup.py +++ b/setup.py @@ -61,6 +61,7 @@ setup( include_package_data=True, zip_safe=False, platforms="any", + python_requires='>=3.5.3', classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", @@ -73,7 +74,6 @@ setup( "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", diff --git a/tests/compute/docker/test_docker.py b/tests/compute/docker/test_docker.py index b840fa83..73817a21 100644 --- a/tests/compute/docker/test_docker.py +++ b/tests/compute/docker/test_docker.py @@ -39,8 +39,7 @@ def test_query_success(loop, vm): response.status = 200 response.headers = {'CONTENT-TYPE': 'application/json'} - @asyncio.coroutine - def read(): + async def read(): return b'{"c": false}' response.read.side_effect = read @@ -61,8 +60,7 @@ def test_query_error(loop, vm): response = MagicMock() response.status = 404 - @asyncio.coroutine - def read(): + async def read(): return b"NOT FOUND" response.read.side_effect = read @@ -82,8 +80,7 @@ def test_query_error_json(loop, vm): response = MagicMock() response.status = 404 - @asyncio.coroutine - def read(): + async def read(): return b'{"message": "Error"}' response.read.side_effect = read @@ -145,8 +142,7 @@ def test_pull_image(loop): def __init__(self): self._read = -1 - @asyncio.coroutine - def read(self, size): + async def read(self, size): self._read += 1 if self._read == 0: return b'{"progress": "0/100",' diff --git a/tests/compute/docker/test_docker_vm.py b/tests/compute/docker/test_docker_vm.py index bb0e9779..d1a5d8c7 100644 --- a/tests/compute/docker/test_docker_vm.py +++ b/tests/compute/docker/test_docker_vm.py @@ -366,8 +366,7 @@ def test_create_image_not_available(loop, project, manager): call = 0 - @asyncio.coroutine - def information(): + async def information(): nonlocal call if call == 0: call += 1 @@ -927,8 +926,8 @@ def test_stop_capture(vm, tmpdir, manager, free_console_port, loop): def test_get_log(loop, vm): - @asyncio.coroutine - def read(): + + async def read(): return b'Hello\nWorld' mock_query = MagicMock() @@ -1036,8 +1035,7 @@ def test_read_console_output_with_binary_mode(vm, loop): def __init__(self): self.sent = False - @asyncio.coroutine - def receive(self): + async def receive(self): if not self.sent: self.sent = True return MagicMock(tp=aiohttp.WSMsgType.BINARY, data=b"test") diff --git a/tests/compute/virtualbox/test_virtualbox_manager.py b/tests/compute/virtualbox/test_virtualbox_manager.py index 15d0ce4e..f37f2d9e 100644 --- a/tests/compute/virtualbox/test_virtualbox_manager.py +++ b/tests/compute/virtualbox/test_virtualbox_manager.py @@ -79,8 +79,7 @@ def test_list_vms(manager, loop): '"" {42b4d095-ff5f-4ac4-bb9d-5f2c7861c1f1}', '"Linux Microcore 4.7.1" {ccd8c50b-c172-457d-99fa-dd69371ede0e}'] - @asyncio.coroutine - def execute_mock(cmd, args): + async def execute_mock(cmd, args): if cmd == "list": return vm_list else: diff --git a/tests/controller/test_compute.py b/tests/controller/test_compute.py index 2a929872..72ff51d8 100644 --- a/tests/controller/test_compute.py +++ b/tests/controller/test_compute.py @@ -205,8 +205,7 @@ def test_connectNotification(compute, async_run): call = 0 - @asyncio.coroutine - def receive(): + async def receive(): nonlocal call call += 1 if call == 1: @@ -238,8 +237,7 @@ def test_connectNotificationPing(compute, async_run): call = 0 - @asyncio.coroutine - def receive(): + async def receive(): nonlocal call call += 1 if call == 1: diff --git a/tests/controller/test_link.py b/tests/controller/test_link.py index 5e287429..1ea05666 100644 --- a/tests/controller/test_link.py +++ b/tests/controller/test_link.py @@ -284,10 +284,10 @@ def test_json_serial_link(async_run, project, compute, link): def test_start_streaming_pcap(link, async_run, tmpdir, project): - @asyncio.coroutine - def fake_reader(): + + async def fake_reader(): output = AsyncioBytesIO() - yield from output.write(b"hello") + await output.write(b"hello") output.seek(0) return output @@ -314,8 +314,8 @@ def test_default_capture_file_name(project, compute, async_run): def test_start_capture(link, async_run, tmpdir, project, controller): - @asyncio.coroutine - def fake_reader(): + + async def fake_reader(): return AsyncioBytesIO() link.read_pcap_from_source = fake_reader diff --git a/tests/controller/test_node.py b/tests/controller/test_node.py index f97ad19f..a80bc9a1 100644 --- a/tests/controller/test_node.py +++ b/tests/controller/test_node.py @@ -211,8 +211,7 @@ def test_create_image_missing(node, compute, project, async_run): node.__calls = 0 - @asyncio.coroutine - def resp(*args, **kwargs): + async def resp(*args, **kwargs): node.__calls += 1 response = MagicMock() if node.__calls == 1: diff --git a/tests/controller/test_udp_link.py b/tests/controller/test_udp_link.py index 4f87bfac..0d1d3713 100644 --- a/tests/controller/test_udp_link.py +++ b/tests/controller/test_udp_link.py @@ -41,8 +41,7 @@ def test_create(async_run, project): node2 = Node(project, compute2, "node2", node_type="vpcs") node2._ports = [EthernetPort("E0", 0, 3, 1)] - @asyncio.coroutine - def subnet_callback(compute2): + async def subnet_callback(compute2): """ Fake subnet callback """ @@ -54,8 +53,7 @@ def test_create(async_run, project): async_run(link.add_node(node1, 0, 4)) async_run(link.update_filters({"latency": [10]})) - @asyncio.coroutine - def compute1_callback(path, data={}, **kwargs): + async def compute1_callback(path, data={}, **kwargs): """ Fake server """ @@ -64,8 +62,7 @@ def test_create(async_run, project): response.json = {"udp_port": 1024} return response - @asyncio.coroutine - def compute2_callback(path, data={}, **kwargs): + async def compute2_callback(path, data={}, **kwargs): """ Fake server """ @@ -107,8 +104,7 @@ def test_create_one_side_failure(async_run, project): node2 = Node(project, compute2, "node2", node_type="vpcs") node2._ports = [EthernetPort("E0", 0, 3, 1)] - @asyncio.coroutine - def subnet_callback(compute2): + async def subnet_callback(compute2): """ Fake subnet callback """ @@ -119,8 +115,7 @@ def test_create_one_side_failure(async_run, project): link = UDPLink(project) async_run(link.add_node(node1, 0, 4)) - @asyncio.coroutine - def compute1_callback(path, data={}, **kwargs): + async def compute1_callback(path, data={}, **kwargs): """ Fake server """ @@ -129,8 +124,7 @@ def test_create_one_side_failure(async_run, project): response.json = {"udp_port": 1024} return response - @asyncio.coroutine - def compute2_callback(path, data={}, **kwargs): + async def compute2_callback(path, data={}, **kwargs): """ Fake server """ @@ -322,8 +316,7 @@ def test_update(async_run, project): node2 = Node(project, compute2, "node2", node_type="vpcs") node2._ports = [EthernetPort("E0", 0, 3, 1)] - @asyncio.coroutine - def subnet_callback(compute2): + async def subnet_callback(compute2): """ Fake subnet callback """ @@ -335,8 +328,7 @@ def test_update(async_run, project): async_run(link.add_node(node1, 0, 4)) async_run(link.update_filters({"latency": [10]})) - @asyncio.coroutine - def compute1_callback(path, data={}, **kwargs): + async def compute1_callback(path, data={}, **kwargs): """ Fake server """ @@ -345,8 +337,7 @@ def test_update(async_run, project): response.json = {"udp_port": 1024} return response - @asyncio.coroutine - def compute2_callback(path, data={}, **kwargs): + async def compute2_callback(path, data={}, **kwargs): """ Fake server """ @@ -402,8 +393,7 @@ def test_update_suspend(async_run, project): node2 = Node(project, compute2, "node2", node_type="vpcs") node2._ports = [EthernetPort("E0", 0, 3, 1)] - @asyncio.coroutine - def subnet_callback(compute2): + async def subnet_callback(compute2): """ Fake subnet callback """ @@ -416,8 +406,7 @@ def test_update_suspend(async_run, project): async_run(link.update_filters({"latency": [10]})) async_run(link.update_suspend(True)) - @asyncio.coroutine - def compute1_callback(path, data={}, **kwargs): + async def compute1_callback(path, data={}, **kwargs): """ Fake server """ @@ -426,8 +415,7 @@ def test_update_suspend(async_run, project): response.json = {"udp_port": 1024} return response - @asyncio.coroutine - def compute2_callback(path, data={}, **kwargs): + async def compute2_callback(path, data={}, **kwargs): """ Fake server """ diff --git a/tests/handlers/api/base.py b/tests/handlers/api/base.py index e71505ae..7ff2d1ab 100644 --- a/tests/handlers/api/base.py +++ b/tests/handlers/api/base.py @@ -42,9 +42,8 @@ class Query: self._api_version = api_version self._session = None - @asyncio.coroutine - def close(self): - yield from self._session.close() + async def close(self): + await self._session.close() def post(self, path, body={}, **kwargs): return self._fetch("POST", path, body, **kwargs) @@ -69,9 +68,8 @@ class Query: """ self._session = aiohttp.ClientSession() - @asyncio.coroutine - def go_request(future): - response = yield from self._session.ws_connect(self.get_url(path)) + async def go_request(future): + response = await self._session.ws_connect(self.get_url(path)) future.set_result(response) future = asyncio.Future() asyncio.ensure_future(go_request(future)) @@ -87,14 +85,13 @@ class Query: """ return self._loop.run_until_complete(asyncio.ensure_future(self._async_fetch(method, path, body=body, **kwargs))) - @asyncio.coroutine - def _async_fetch(self, method, path, body=None, **kwargs): + async def _async_fetch(self, method, path, body=None, **kwargs): if body is not None and not kwargs.get("raw", False): body = json.dumps(body) connector = aiohttp.TCPConnector() - response = yield from aiohttp.request(method, self.get_url(path), data=body, loop=self._loop, connector=connector) - response.body = yield from response.read() + response = await aiohttp.request(method, self.get_url(path), data=body, loop=self._loop, connector=connector) + response.body = await response.read() x_route = response.headers.get('X-Route', None) if x_route is not None: response.route = x_route.replace("/v{}".format(self._api_version), "") diff --git a/tests/handlers/api/controller/test_link.py b/tests/handlers/api/controller/test_link.py index e615281f..b7dcafc7 100644 --- a/tests/handlers/api/controller/test_link.py +++ b/tests/handlers/api/controller/test_link.py @@ -341,10 +341,10 @@ def test_stop_capture(http_controller, tmpdir, project, compute, async_run): def test_pcap(http_controller, tmpdir, project, compute, loop): - @asyncio.coroutine - def go(future): - response = yield from aiohttp.request("GET", http_controller.get_url("/projects/{}/links/{}/pcap".format(project.id, link.id))) - response.body = yield from response.content.read(5) + + async def go(future): + response = await aiohttp.request("GET", http_controller.get_url("/projects/{}/links/{}/pcap".format(project.id, link.id))) + response.body = await response.content.read(5) response.close() future.set_result(response) diff --git a/tests/handlers/api/controller/test_project.py b/tests/handlers/api/controller/test_project.py index 7cd97f45..f4e00c6a 100644 --- a/tests/handlers/api/controller/test_project.py +++ b/tests/handlers/api/controller/test_project.py @@ -169,13 +169,13 @@ def test_load_project(http_controller, project, config): def test_notification(http_controller, project, controller, loop, async_run): - @asyncio.coroutine - def go(): + + async def go(): connector = aiohttp.TCPConnector() - response = yield from aiohttp.request("GET", http_controller.get_url("/projects/{project_id}/notifications".format(project_id=project.id)), connector=connector) - response.body = yield from response.content.read(200) + response = await aiohttp.request("GET", http_controller.get_url("/projects/{project_id}/notifications".format(project_id=project.id)), connector=connector) + response.body = await response.content.read(200) controller.notification.project_emit("node.created", {"a": "b"}) - response.body += yield from response.content.readany() + response.body += await response.content.readany() response.close() return response diff --git a/tests/handlers/api/controller/test_server.py b/tests/handlers/api/controller/test_server.py index b4f10bc4..32c56725 100644 --- a/tests/handlers/api/controller/test_server.py +++ b/tests/handlers/api/controller/test_server.py @@ -31,8 +31,8 @@ def web_server(): def test_shutdown_local(http_controller, web_server, config): - @asyncio.coroutine - def hello(): + + async def hello(): return 0 web_server.shutdown_server.return_value = hello() diff --git a/tests/utils.py b/tests/utils.py index f1cfd191..c4664916 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -65,6 +65,13 @@ class _asyncio_patch: def asyncio_patch(function, *args, **kwargs): return _asyncio_patch(function, *args, **kwargs) +# monkey patch MagicMock, +# FIXME: would probably be better to use asynctest or pytest.asyncio to test our asyncio code +async def async_magic(): + pass + +unittest.mock.MagicMock.__await__ = lambda x: async_magic().__await__() + class AsyncioMagicMock(unittest.mock.MagicMock): """ @@ -100,18 +107,15 @@ class AsyncioMagicMock(unittest.mock.MagicMock): class AsyncioBytesIO(io.BytesIO): """ - An async wrapper arround io.BytesIO to fake an + An async wrapper around io.BytesIO to fake an async network connection """ - @asyncio.coroutine - def read(self, length=-1): + async def read(self, length=-1): return super().read(length) - @asyncio.coroutine - def write(self, data): + async def write(self, data): return super().write(data) - @asyncio.coroutine - def close(self): + async def close(self): return super().close() diff --git a/tests/utils/asyncio/test_embed_shell.py b/tests/utils/asyncio/test_embed_shell.py index 29169ca7..3dc71996 100644 --- a/tests/utils/asyncio/test_embed_shell.py +++ b/tests/utils/asyncio/test_embed_shell.py @@ -23,27 +23,26 @@ from gns3server.utils.asyncio.embed_shell import EmbedShell def test_embed_shell_help(async_run): class Application(EmbedShell): - @asyncio.coroutine - def hello(self): + async def hello(self): """ The hello world function The hello usage """ - pass + await asyncio.sleep(1) + reader = asyncio.StreamReader() writer = asyncio.StreamReader() app = Application(reader, writer) - assert async_run(app._parse_command('help')) == 'Help:\nhello: The hello world function\nhelp: Show help\n\nhelp command for details about a command\n' - assert async_run(app._parse_command('?')) == 'Help:\nhello: The hello world function\nhelp: Show help\n\nhelp command for details about a command\n' + assert async_run(app._parse_command('help')) == 'Help:\nhello: The hello world function\n\nhelp command for details about a command\n' + assert async_run(app._parse_command('?')) == 'Help:\nhello: The hello world function\n\nhelp command for details about a command\n' assert async_run(app._parse_command('? hello')) == 'hello: The hello world function\n\nThe hello usage\n' def test_embed_shell_execute(async_run): class Application(EmbedShell): - @asyncio.coroutine - def hello(self): + async def hello(self): """ The hello world function diff --git a/tests/utils/test_asyncio.py b/tests/utils/test_asyncio.py index 225cb9d6..36bfe548 100644 --- a/tests/utils/test_asyncio.py +++ b/tests/utils/test_asyncio.py @@ -85,10 +85,9 @@ def test_lock_decorator(loop): self._test_val = 0 @locking - @asyncio.coroutine - def method_to_lock(self): + async def method_to_lock(self): res = self._test_val - yield from asyncio.sleep(0.1) + await asyncio.sleep(0.1) self._test_val += 1 return res