Use pyupgrade with --py36-plus param.

This commit is contained in:
grossmj
2021-04-13 18:37:58 +09:30
parent a9eb035b7d
commit f928738bd5
235 changed files with 1501 additions and 1638 deletions

View File

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@ -78,7 +77,7 @@ class DockerVM(BaseNode):
# force the latest image if no version is specified
if ":" not in image:
image = "{}:latest".format(image)
image = f"{image}:latest"
self._image = image
self._start_command = start_command
self._environment = environment
@ -110,9 +109,11 @@ class DockerVM(BaseNode):
else:
self.adapters = adapters
log.debug("{module}: {name} [{image}] initialized.".format(module=self.manager.module_name,
name=self.name,
image=self._image))
log.debug("{module}: {name} [{image}] initialized.".format(
module=self.manager.module_name,
name=self.name,
image=self._image)
)
def __json__(self):
return {
@ -148,7 +149,7 @@ class DockerVM(BaseNode):
if not os.path.exists("/tmp/.X11-unix/"):
return display
while True:
if not os.path.exists("/tmp/.X11-unix/X{}".format(display)):
if not os.path.exists(f"/tmp/.X11-unix/X{display}"):
return display
display += 1
@ -242,7 +243,7 @@ class DockerVM(BaseNode):
"""
try:
result = await self.manager.query("GET", "containers/{}/json".format(self._cid))
result = await self.manager.query("GET", f"containers/{self._cid}/json")
except DockerError:
return "exited"
@ -257,7 +258,7 @@ class DockerVM(BaseNode):
:returns: Dictionary information about the container image
"""
result = await self.manager.query("GET", "images/{}/json".format(self._image))
result = await self.manager.query("GET", f"images/{self._image}/json")
return result
def _mount_binds(self, image_info):
@ -267,20 +268,20 @@ class DockerVM(BaseNode):
resources = get_resource("compute/docker/resources")
if not os.path.exists(resources):
raise DockerError("{} is missing can't start Docker containers".format(resources))
binds = ["{}:/gns3:ro".format(resources)]
raise DockerError(f"{resources} is missing, can't start Docker container")
binds = [f"{resources}:/gns3:ro"]
# We mount our own etc/network
try:
self._create_network_config()
except OSError as e:
raise DockerError("Could not create network config in the container: {}".format(e))
raise DockerError(f"Could not create network config in the container: {e}")
volumes = ["/etc/network"]
volumes.extend((image_info.get("Config", {}).get("Volumes") or {}).keys())
for volume in self._extra_volumes:
if not volume.strip() or volume[0] != "/" or volume.find("..") >= 0:
raise DockerError("Persistent volume '{}' has invalid format. It must start with a '/' and not contain '..'.".format(volume))
raise DockerError(f"Persistent volume '{volume}' has invalid format. It must start with a '/' and not contain '..'.")
volumes.extend(self._extra_volumes)
self._volumes = []
@ -297,7 +298,7 @@ class DockerVM(BaseNode):
for volume in self._volumes:
source = os.path.join(self.working_dir, os.path.relpath(volume, "/"))
os.makedirs(source, exist_ok=True)
binds.append("{}:/gns3volumes{}".format(source, volume))
binds.append(f"{source}:/gns3volumes{volume}")
return binds
@ -343,16 +344,17 @@ class DockerVM(BaseNode):
try:
image_infos = await self._get_image_information()
except DockerHttp404Error:
log.info("Image '{}' is missing, pulling it from Docker hub...".format(self._image))
log.info(f"Image '{self._image}' is missing, pulling it from Docker hub...")
await self.pull_image(self._image)
image_infos = await self._get_image_information()
if image_infos is None:
raise DockerError("Cannot get information for image '{}', please try again.".format(self._image))
raise DockerError(f"Cannot get information for image '{self._image}', please try again.")
available_cpus = psutil.cpu_count(logical=True)
if self._cpus > available_cpus:
raise DockerError("You have allocated too many CPUs for the Docker container (max available is {} CPUs)".format(available_cpus))
raise DockerError(f"You have allocated too many CPUs for the Docker container "
f"(max available is {available_cpus} CPUs)")
params = {
"Hostname": self._name,
@ -381,7 +383,7 @@ class DockerVM(BaseNode):
try:
params["Cmd"] = shlex.split(self._start_command)
except ValueError as e:
raise DockerError("Invalid start command '{}': {}".format(self._start_command, e))
raise DockerError(f"Invalid start command '{self._start_command}': {e}")
if len(params["Cmd"]) == 0:
params["Cmd"] = image_infos.get("Config", {"Cmd": []}).get("Cmd")
if params["Cmd"] is None:
@ -391,7 +393,7 @@ class DockerVM(BaseNode):
params["Entrypoint"].insert(0, "/gns3/init.sh") # FIXME /gns3/init.sh is not found?
# Give the information to the container on how many interface should be inside
params["Env"].append("GNS3_MAX_ETHERNET=eth{}".format(self.adapters - 1))
params["Env"].append(f"GNS3_MAX_ETHERNET=eth{self.adapters - 1}")
# Give the information to the container the list of volume path mounted
params["Env"].append("GNS3_VOLUMES={}".format(":".join(self._volumes)))
@ -412,7 +414,7 @@ class DockerVM(BaseNode):
for e in self._environment.strip().split("\n"):
e = e.strip()
if e.split("=")[0] == "":
self.project.emit("log.warning", {"message": "{} has invalid environment variable: {}".format(self.name, e)})
self.project.emit("log.warning", {"message": f"{self.name} has invalid environment variable: {e}"})
continue
if not e.startswith("GNS3_"):
formatted = self._format_env(variables, e)
@ -421,17 +423,17 @@ class DockerVM(BaseNode):
if self._console_type == "vnc":
await self._start_vnc()
params["Env"].append("QT_GRAPHICSSYSTEM=native") # To fix a Qt issue: https://github.com/GNS3/gns3-server/issues/556
params["Env"].append("DISPLAY=:{}".format(self._display))
params["Env"].append(f"DISPLAY=:{self._display}")
params["HostConfig"]["Binds"].append("/tmp/.X11-unix/:/tmp/.X11-unix/")
if self._extra_hosts:
extra_hosts = self._format_extra_hosts(self._extra_hosts)
if extra_hosts:
params["Env"].append("GNS3_EXTRA_HOSTS={}".format(extra_hosts))
params["Env"].append(f"GNS3_EXTRA_HOSTS={extra_hosts}")
result = await self.manager.query("POST", "containers/create", data=params)
self._cid = result['Id']
log.info("Docker container '{name}' [{id}] created".format(name=self._name, id=self._id))
log.info(f"Docker container '{self._name}' [{self._id}] created")
return True
def _format_env(self, variables, env):
@ -450,8 +452,8 @@ class DockerVM(BaseNode):
if hostname and ip:
hosts.append((hostname, ip))
except ValueError:
raise DockerError("Can't apply `ExtraHosts`, wrong format: {}".format(extra_hosts))
return "\n".join(["{}\t{}".format(h[1], h[0]) for h in hosts])
raise DockerError(f"Can't apply `ExtraHosts`, wrong format: {extra_hosts}")
return "\n".join([f"{h[1]}\t{h[0]}" for h in hosts])
async def update(self):
"""
@ -479,8 +481,10 @@ class DockerVM(BaseNode):
try:
state = await self._get_container_state()
except DockerHttp404Error:
raise DockerError("Docker container '{name}' with ID {cid} does not exist or is not ready yet. Please try again in a few seconds.".format(name=self.name,
cid=self._cid))
raise DockerError("Docker container '{name}' with ID {cid} does not exist or is not ready yet. Please try again in a few seconds.".format(
name=self.name,
cid=self._cid)
)
if state == "paused":
await self.unpause()
elif state == "running":
@ -494,7 +498,7 @@ class DockerVM(BaseNode):
await self._clean_servers()
await self.manager.query("POST", "containers/{}/start".format(self._cid))
await self.manager.query("POST", f"containers/{self._cid}/start")
self._namespace = await self._get_namespace()
await self._start_ubridge(require_privileged_access=True)
@ -524,10 +528,12 @@ class DockerVM(BaseNode):
self._permissions_fixed = False
self.status = "started"
log.info("Docker container '{name}' [{image}] started listen for {console_type} on {console}".format(name=self._name,
image=self._image,
console=self.console,
console_type=self.console_type))
log.info("Docker container '{name}' [{image}] started listen for {console_type} on {console}".format(
name=self._name,
image=self._image,
console=self.console,
console_type=self.console_type)
)
async def _start_aux(self):
"""
@ -543,12 +549,12 @@ class DockerVM(BaseNode):
stderr=asyncio.subprocess.STDOUT,
stdin=asyncio.subprocess.PIPE)
except OSError as e:
raise DockerError("Could not start auxiliary console process: {}".format(e))
raise DockerError(f"Could not start auxiliary console process: {e}")
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=True, echo=True)
try:
self._telnet_servers.append((await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux)))
self._telnet_servers.append(await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux))
except OSError as e:
raise DockerError("Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.aux, e))
raise DockerError(f"Could not start Telnet server on socket {self._manager.port_manager.console_host}:{self.aux}: {e}")
log.debug(f"Docker container '{self.name}' started listen for auxiliary telnet on {self.aux}")
async def _fix_permissions(self):
@ -558,10 +564,10 @@ class DockerVM(BaseNode):
"""
state = await self._get_container_state()
log.info("Docker container '{name}' fix ownership, state = {state}".format(name=self._name, state=state))
log.info(f"Docker container '{self._name}' fix ownership, state = {state}")
if state == "stopped" or state == "exited":
# We need to restart it to fix permissions
await self.manager.query("POST", "containers/{}/start".format(self._cid))
await self.manager.query("POST", f"containers/{self._cid}/start")
for volume in self._volumes:
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(
@ -584,7 +590,7 @@ class DockerVM(BaseNode):
.format(uid=os.getuid(), gid=os.getgid(), path=volume),
)
except OSError as e:
raise DockerError("Could not fix permissions for {}: {}".format(volume, e))
raise DockerError(f"Could not fix permissions for {volume}: {e}")
await process.wait()
self._permissions_fixed = True
@ -608,13 +614,13 @@ class DockerVM(BaseNode):
"-rfbport", str(self.console),
"-AlwaysShared",
"-SecurityTypes", "None",
":{}".format(self._display),
f":{self._display}",
stdout=fd, stderr=subprocess.STDOUT)
else:
if restart is False:
self._xvfb_process = await asyncio.create_subprocess_exec("Xvfb",
"-nolisten",
"tcp", ":{}".format(self._display),
"tcp", f":{self._display}",
"-screen", "0",
self._console_resolution + "x16")
@ -625,7 +631,7 @@ class DockerVM(BaseNode):
"-nopw",
"-shared",
"-geometry", self._console_resolution,
"-display", "WAIT:{}".format(self._display),
"-display", f"WAIT:{self._display}",
"-rfbport", str(self.console),
"-rfbportv6", str(self.console),
"-noncache",
@ -642,17 +648,17 @@ class DockerVM(BaseNode):
if not (tigervnc_path or shutil.which("Xvfb") and shutil.which("x11vnc")):
raise DockerError("Please install TigerVNC server (recommended) or Xvfb + x11vnc before using VNC support")
await self._start_vnc_process()
x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display))
x11_socket = os.path.join("/tmp/.X11-unix/", f"X{self._display}")
try:
await wait_for_file_creation(x11_socket)
except asyncio.TimeoutError:
raise DockerError('x11 socket file "{}" does not exist'.format(x11_socket))
raise DockerError(f'x11 socket file "{x11_socket}" does not exist')
if not hasattr(sys, "_called_from_test") or not sys._called_from_test:
# Start vncconfig for tigervnc clipboard support, connection available only after socket creation.
tigervncconfig_path = shutil.which("vncconfig")
if tigervnc_path and tigervncconfig_path:
self._vncconfig_process = await asyncio.create_subprocess_exec(tigervncconfig_path, "-display", ":{}".format(self._display), "-nowin")
self._vncconfig_process = await asyncio.create_subprocess_exec(tigervncconfig_path, "-display", f":{self._display}", "-nowin")
# sometimes the VNC process can crash
monitor_process(self._vnc_process, self._vnc_callback)
@ -665,7 +671,7 @@ class DockerVM(BaseNode):
"""
if returncode != 0 and self._closing is False:
self.project.emit("log.error", {"message": "The vnc process has stopped with return code {} for node '{}'. Please restart this node.".format(returncode, self.name)})
self.project.emit("log.error", {"message": f"The vnc process has stopped with return code {returncode} for node '{self.name}'. Please restart this node."})
self._vnc_process = None
async def _start_http(self):
@ -679,15 +685,15 @@ class DockerVM(BaseNode):
# We replace host and port in the server answer otherwise some link could be broken
server = AsyncioRawCommandServer(command, replaces=[
(
'://127.0.0.1'.encode(), # {{HOST}} mean client host
'://{{HOST}}'.encode(),
b'://127.0.0.1', # {{HOST}} mean client host
b'://{{HOST}}',
),
(
':{}'.format(self._console_http_port).encode(),
':{}'.format(self.console).encode(),
f':{self._console_http_port}'.encode(),
f':{self.console}'.encode(),
)
])
self._telnet_servers.append((await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console)))
self._telnet_servers.append(await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console))
async def _window_size_changed_callback(self, columns, rows):
"""
@ -699,7 +705,7 @@ class DockerVM(BaseNode):
"""
# resize the container TTY.
await self._manager.query("POST", "containers/{}/resize?h={}&w={}".format(self._cid, rows, columns))
await self._manager.query("POST", f"containers/{self._cid}/resize?h={rows}&w={columns}")
async def _start_console(self):
@ -724,11 +730,11 @@ class DockerVM(BaseNode):
input_stream = InputStream()
telnet = AsyncioTelnetServer(reader=output_stream, writer=input_stream, echo=True, naws=True, window_size_changed_callback=self._window_size_changed_callback)
try:
self._telnet_servers.append((await asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console)))
self._telnet_servers.append(await asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console))
except OSError as e:
raise DockerError("Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e))
raise DockerError(f"Could not start Telnet server on socket {self._manager.port_manager.console_host}:{self.console}: {e}")
self._console_websocket = await self.manager.websocket_query("containers/{}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1".format(self._cid))
self._console_websocket = await self.manager.websocket_query(f"containers/{self._cid}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1")
input_stream.ws = self._console_websocket
output_stream.feed_data(self.name.encode() + b" console is now available... Press RETURN to get started.\r\n")
@ -750,7 +756,7 @@ class DockerVM(BaseNode):
elif msg.type == aiohttp.WSMsgType.BINARY:
out.feed_data(msg.data)
elif msg.type == aiohttp.WSMsgType.ERROR:
log.critical("Docker WebSocket Error: {}".format(ws.exception()))
log.critical(f"Docker WebSocket Error: {ws.exception()}")
else:
out.feed_eof()
await ws.close()
@ -785,7 +791,7 @@ class DockerVM(BaseNode):
Restart this Docker container.
"""
await self.manager.query("POST", "containers/{}/restart".format(self._cid))
await self.manager.query("POST", f"containers/{self._cid}/restart")
log.info("Docker container '{name}' [{image}] restarted".format(
name=self._name, image=self._image))
@ -825,14 +831,14 @@ class DockerVM(BaseNode):
if state != "stopped" or state != "exited":
# t=5 number of seconds to wait before killing the container
try:
await self.manager.query("POST", "containers/{}/stop".format(self._cid), params={"t": 5})
log.info("Docker container '{name}' [{image}] stopped".format(name=self._name, image=self._image))
await self.manager.query("POST", f"containers/{self._cid}/stop", params={"t": 5})
log.info(f"Docker container '{self._name}' [{self._image}] stopped")
except DockerHttp304Error:
# Container is already stopped
pass
# Ignore runtime error because when closing the server
except RuntimeError as e:
log.debug("Docker runtime error when closing: {}".format(str(e)))
log.debug(f"Docker runtime error when closing: {str(e)}")
return
self.status = "stopped"
@ -841,18 +847,18 @@ class DockerVM(BaseNode):
Pauses this Docker container.
"""
await self.manager.query("POST", "containers/{}/pause".format(self._cid))
await self.manager.query("POST", f"containers/{self._cid}/pause")
self.status = "suspended"
log.info("Docker container '{name}' [{image}] paused".format(name=self._name, image=self._image))
log.info(f"Docker container '{self._name}' [{self._image}] paused")
async def unpause(self):
"""
Unpauses this Docker container.
"""
await self.manager.query("POST", "containers/{}/unpause".format(self._cid))
await self.manager.query("POST", f"containers/{self._cid}/unpause")
self.status = "started"
log.info("Docker container '{name}' [{image}] unpaused".format(name=self._name, image=self._image))
log.info(f"Docker container '{self._name}' [{self._image}] unpaused")
async def close(self):
"""
@ -892,17 +898,17 @@ class DockerVM(BaseNode):
pass
if self._display:
display = "/tmp/.X11-unix/X{}".format(self._display)
display = f"/tmp/.X11-unix/X{self._display}"
try:
if os.path.exists(display):
os.remove(display)
except OSError as e:
log.warning("Could not remove display {}: {}".format(display, e))
log.warning(f"Could not remove display {display}: {e}")
# v 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false.
# force - 1/True/true or 0/False/false, Kill then remove the container. Default false.
try:
await self.manager.query("DELETE", "containers/{}".format(self._cid), params={"force": 1, "v": 1})
await self.manager.query("DELETE", f"containers/{self._cid}", params={"force": 1, "v": 1})
except DockerError:
pass
log.info("Docker container '{name}' [{image}] removed".format(
@ -916,7 +922,7 @@ class DockerVM(BaseNode):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
# Ignore runtime error because when closing the server
except (DockerHttp404Error, RuntimeError) as e:
log.debug("Docker error when closing: {}".format(str(e)))
log.debug(f"Docker error when closing: {str(e)}")
return
async def _add_ubridge_connection(self, nio, adapter_number):
@ -934,14 +940,14 @@ class DockerVM(BaseNode):
adapter_number=adapter_number))
for index in range(4096):
if "tap-gns3-e{}".format(index) not in psutil.net_if_addrs():
adapter.host_ifc = "tap-gns3-e{}".format(str(index))
if f"tap-gns3-e{index}" not in psutil.net_if_addrs():
adapter.host_ifc = f"tap-gns3-e{str(index)}"
break
if adapter.host_ifc is None:
raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name,
adapter_number=adapter_number))
bridge_name = 'bridge{}'.format(adapter_number)
await self._ubridge_send('bridge create {}'.format(bridge_name))
bridge_name = f'bridge{adapter_number}'
await self._ubridge_send(f'bridge create {bridge_name}')
self._bridges.add(bridge_name)
await self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number,
hostif=adapter.host_ifc))
@ -958,12 +964,12 @@ class DockerVM(BaseNode):
async def _get_namespace(self):
result = await self.manager.query("GET", "containers/{}/json".format(self._cid))
result = await self.manager.query("GET", f"containers/{self._cid}/json")
return int(result['State']['Pid'])
async def _connect_nio(self, adapter_number, nio):
bridge_name = 'bridge{}'.format(adapter_number)
bridge_name = f'bridge{adapter_number}'
await self._ubridge_send('bridge add_nio_udp {bridge_name} {lport} {rhost} {rport}'.format(bridge_name=bridge_name,
lport=nio.lport,
rhost=nio.rhost,
@ -972,7 +978,7 @@ class DockerVM(BaseNode):
if nio.capturing:
await self._ubridge_send('bridge start_capture {bridge_name} "{pcap_file}"'.format(bridge_name=bridge_name,
pcap_file=nio.pcap_output_file))
await self._ubridge_send('bridge start {bridge_name}'.format(bridge_name=bridge_name))
await self._ubridge_send(f'bridge start {bridge_name}')
await self._ubridge_apply_filters(bridge_name, nio.filters)
async def adapter_add_nio_binding(self, adapter_number, nio):
@ -1007,7 +1013,7 @@ class DockerVM(BaseNode):
"""
if self.ubridge:
bridge_name = 'bridge{}'.format(adapter_number)
bridge_name = f'bridge{adapter_number}'
if bridge_name in self._bridges:
await self._ubridge_apply_filters(bridge_name, nio.filters)
@ -1029,8 +1035,8 @@ class DockerVM(BaseNode):
await self.stop_capture(adapter_number)
if self.ubridge:
nio = adapter.get_nio(0)
bridge_name = 'bridge{}'.format(adapter_number)
await self._ubridge_send("bridge stop {}".format(bridge_name))
bridge_name = f'bridge{adapter_number}'
await self._ubridge_send(f"bridge stop {bridge_name}")
await self._ubridge_send('bridge remove_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number,
lport=nio.lport,
rhost=nio.rhost,
@ -1061,7 +1067,7 @@ class DockerVM(BaseNode):
nio = adapter.get_nio(0)
if not nio:
raise DockerError("Adapter {} is not connected".format(adapter_number))
raise DockerError(f"Adapter {adapter_number} is not connected")
return nio
@ -1112,10 +1118,10 @@ class DockerVM(BaseNode):
:param output_file: PCAP destination file for the capture
"""
adapter = "bridge{}".format(adapter_number)
adapter = f"bridge{adapter_number}"
if not self.ubridge:
raise DockerError("Cannot start the packet capture: uBridge is not running")
await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=adapter, output_file=output_file))
await self._ubridge_send(f'bridge start_capture {adapter} "{output_file}"')
async def _stop_ubridge_capture(self, adapter_number):
"""
@ -1124,10 +1130,10 @@ class DockerVM(BaseNode):
:param adapter_number: adapter number
"""
adapter = "bridge{}".format(adapter_number)
adapter = f"bridge{adapter_number}"
if not self.ubridge:
raise DockerError("Cannot stop the packet capture: uBridge is not running")
await self._ubridge_send("bridge stop_capture {name}".format(name=adapter))
await self._ubridge_send(f"bridge stop_capture {adapter}")
async def start_capture(self, adapter_number, output_file):
"""
@ -1139,7 +1145,7 @@ class DockerVM(BaseNode):
nio = self.get_nio(adapter_number)
if nio.capturing:
raise DockerError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number))
raise DockerError(f"Packet capture is already activated on adapter {adapter_number}")
nio.start_packet_capture(output_file)
if self.status == "started" and self.ubridge:
@ -1174,7 +1180,7 @@ class DockerVM(BaseNode):
:returns: string
"""
result = await self.manager.query("GET", "containers/{}/logs".format(self._cid), params={"stderr": 1, "stdout": 1})
result = await self.manager.query("GET", f"containers/{self._cid}/logs", params={"stderr": 1, "stdout": 1})
return result
async def delete(self):