mirror of
https://github.com/GNS3/gns3-server.git
synced 2024-12-19 04:47:54 +00:00
Fixes major bug with ghost instance ID.
This commit is contained in:
parent
64e09a19d9
commit
0412e051b2
@ -133,7 +133,12 @@ class Dynamips(IModule):
|
||||
if not sys.platform.startswith("win32"):
|
||||
self._callback.stop()
|
||||
|
||||
self.reset()
|
||||
# stop all Dynamips hypervisors
|
||||
if self._hypervisor_manager:
|
||||
self._hypervisor_manager.stop_all_hypervisors()
|
||||
|
||||
self.delete_dynamips_files()
|
||||
|
||||
IModule.stop(self, signum) # this will stop the I/O loop
|
||||
|
||||
def _check_hypervisors(self):
|
||||
@ -173,6 +178,24 @@ class Dynamips(IModule):
|
||||
return None
|
||||
return instance_dict[device_id]
|
||||
|
||||
def delete_dynamips_files(self):
|
||||
"""
|
||||
Deletes useless Dynamips files from the working directory
|
||||
"""
|
||||
|
||||
files = glob.glob(os.path.join(self._working_dir, "dynamips", "*.ghost"))
|
||||
files += glob.glob(os.path.join(self._working_dir, "dynamips", "*_lock"))
|
||||
files += glob.glob(os.path.join(self._working_dir, "dynamips", "ilt_*"))
|
||||
files += glob.glob(os.path.join(self._working_dir, "dynamips", "c[0-9][0-9][0-9][0-9]_*_rommon_vars"))
|
||||
files += glob.glob(os.path.join(self._working_dir, "dynamips", "c[0-9][0-9][0-9][0-9]_*_ssa"))
|
||||
for file in files:
|
||||
try:
|
||||
log.debug("deleting file {}".format(file))
|
||||
os.remove(file)
|
||||
except OSError as e:
|
||||
log.warn("could not delete file {}: {}".format(file, e))
|
||||
continue
|
||||
|
||||
@IModule.route("dynamips.reset")
|
||||
def reset(self, request=None):
|
||||
"""
|
||||
@ -207,19 +230,7 @@ class Dynamips(IModule):
|
||||
self._frame_relay_switches.clear()
|
||||
self._atm_switches.clear()
|
||||
|
||||
# delete useless Dynamips files from the working directory
|
||||
files = glob.glob(os.path.join(self._working_dir, "dynamips", "*.ghost"))
|
||||
files += glob.glob(os.path.join(self._working_dir, "dynamips", "*_lock"))
|
||||
files += glob.glob(os.path.join(self._working_dir, "dynamips", "ilt_*"))
|
||||
files += glob.glob(os.path.join(self._working_dir, "dynamips", "c[0-9][0-9][0-9][0-9]_*_rommon_vars"))
|
||||
files += glob.glob(os.path.join(self._working_dir, "dynamips", "c[0-9][0-9][0-9][0-9]_*_ssa"))
|
||||
for file in files:
|
||||
try:
|
||||
log.debug("deleting file {}".format(file))
|
||||
os.remove(file)
|
||||
except OSError as e:
|
||||
log.warn("could not delete file {}: {}".format(file, e))
|
||||
continue
|
||||
self.delete_dynamips_files()
|
||||
|
||||
self._hypervisor_manager = None
|
||||
log.info("dynamips module has been reset")
|
||||
@ -292,6 +303,7 @@ class Dynamips(IModule):
|
||||
else:
|
||||
if "project_name" in request:
|
||||
new_working_dir = os.path.join(self._projects_dir, request["project_name"])
|
||||
|
||||
if self._projects_dir != self._working_dir != new_working_dir:
|
||||
|
||||
# trick to avoid file locks by Dynamips on Windows
|
||||
@ -300,6 +312,7 @@ class Dynamips(IModule):
|
||||
|
||||
if not os.path.isdir(new_working_dir):
|
||||
try:
|
||||
self.delete_dynamips_files()
|
||||
shutil.move(self._working_dir, new_working_dir)
|
||||
except OSError as e:
|
||||
log.error("could not move working directory from {} to {}: {}".format(self._working_dir,
|
||||
@ -307,8 +320,11 @@ class Dynamips(IModule):
|
||||
e))
|
||||
return
|
||||
|
||||
self._hypervisor_manager.working_dir = new_working_dir
|
||||
self._working_dir = new_working_dir
|
||||
elif "working_dir" in request:
|
||||
new_working_dir = request.pop("working_dir")
|
||||
|
||||
self._hypervisor_manager.working_dir = new_working_dir
|
||||
self._working_dir = new_working_dir
|
||||
|
||||
# apply settings to the hypervisor manager
|
||||
for name, value in request.items():
|
||||
|
@ -276,8 +276,9 @@ class Hypervisor(DynamipsHypervisor):
|
||||
|
||||
command = [self._path]
|
||||
command.extend(["-N1"]) # use instance IDs for filenames
|
||||
command.extend(["-l", "dynamips_log_{}.txt".format(self._port)]) # log file
|
||||
if self._host != "0.0.0.0" and self._host != "::":
|
||||
command.extend(["-H", self._host + ":" + str(self._port)])
|
||||
command.extend(["-H", "{}:{}".format(self._host, self._port)])
|
||||
else:
|
||||
command.extend(["-H", str(self._port)])
|
||||
return command
|
||||
|
@ -52,19 +52,24 @@ class Router(object):
|
||||
|
||||
def __init__(self, hypervisor, name=None, platform="c7200", ghost_flag=False):
|
||||
|
||||
# create an unique ID
|
||||
self._id = Router._instance_count
|
||||
Router._instance_count += 1
|
||||
if not ghost_flag:
|
||||
# create an unique ID
|
||||
self._id = Router._instance_count
|
||||
Router._instance_count += 1
|
||||
|
||||
# let's create a unique name if none has been chosen
|
||||
if not name:
|
||||
name_id = self._id
|
||||
while True:
|
||||
name = "R" + str(name_id)
|
||||
# check if the name has already been allocated to another router
|
||||
if name not in self._allocated_names:
|
||||
break
|
||||
name_id += 1
|
||||
# let's create a unique name if none has been chosen
|
||||
if not name:
|
||||
name_id = self._id
|
||||
while True:
|
||||
name = "R" + str(name_id)
|
||||
# check if the name has already been allocated to another router
|
||||
if name not in self._allocated_names:
|
||||
break
|
||||
name_id += 1
|
||||
else:
|
||||
log.info("creating a new ghost IOS file")
|
||||
self._id = 0
|
||||
name = "Ghost"
|
||||
|
||||
self._allocated_names.append(name)
|
||||
self._hypervisor = hypervisor
|
||||
@ -133,9 +138,6 @@ class Router(object):
|
||||
# get the default base MAC address
|
||||
self._mac_addr = self._hypervisor.send("{platform} get_mac_addr {name}".format(platform=self._platform,
|
||||
name=self._name))[0]
|
||||
else:
|
||||
log.info("creating a new ghost IOS file")
|
||||
Router._instance_count -= 1
|
||||
|
||||
self._hypervisor.devices.append(self)
|
||||
|
||||
|
@ -112,7 +112,14 @@ class IOU(IModule):
|
||||
"""
|
||||
|
||||
self._iou_callback.stop()
|
||||
self.reset()
|
||||
|
||||
# delete all IOU instances
|
||||
for iou_id in self._iou_instances:
|
||||
iou_instance = self._iou_instances[iou_id]
|
||||
iou_instance.delete()
|
||||
|
||||
self.delete_iourc_file()
|
||||
|
||||
IModule.stop(self, signum) # this will stop the I/O loop
|
||||
|
||||
def _check_iou_is_alive(self):
|
||||
@ -156,6 +163,18 @@ class IOU(IModule):
|
||||
return None
|
||||
return self._iou_instances[iou_id]
|
||||
|
||||
def delete_iourc_file(self):
|
||||
"""
|
||||
Deletes the IOURC file.
|
||||
"""
|
||||
|
||||
if self._iourc and os.path.isfile(self._iourc):
|
||||
try:
|
||||
log.info("deleting iourc file {}".format(self._iourc))
|
||||
os.remove(self._iourc)
|
||||
except OSError as e:
|
||||
log.warn("could not delete iourc file {}: {}".format(self._iourc, e))
|
||||
|
||||
@IModule.route("iou.reset")
|
||||
def reset(self, request=None):
|
||||
"""
|
||||
@ -174,13 +193,7 @@ class IOU(IModule):
|
||||
|
||||
self._iou_instances.clear()
|
||||
self._allocated_udp_ports.clear()
|
||||
|
||||
if self._iourc and os.path.isfile(self._iourc):
|
||||
try:
|
||||
log.info("deleting iourc file {}".format(self._iourc))
|
||||
os.remove(self._iourc)
|
||||
except OSError as e:
|
||||
log.warn("could not delete iourc file {}: {}".format(self._iourc, e))
|
||||
self.delete_iourc_file()
|
||||
|
||||
log.info("IOU module has been reset")
|
||||
|
||||
|
@ -110,7 +110,7 @@ class VPCS(IModule):
|
||||
:param signum: signal number (if called by the signal handler)
|
||||
"""
|
||||
|
||||
self._vpcs_callback.stop()
|
||||
# self._vpcs_callback.stop()
|
||||
# delete all VPCS instances
|
||||
for vpcs_id in self._vpcs_instances:
|
||||
vpcs_instance = self._vpcs_instances[vpcs_id]
|
||||
|
@ -28,7 +28,6 @@ import os
|
||||
import tempfile
|
||||
import signal
|
||||
import errno
|
||||
import functools
|
||||
import socket
|
||||
import tornado.ioloop
|
||||
import tornado.web
|
||||
@ -166,7 +165,7 @@ class Server(object):
|
||||
ioloop = tornado.ioloop.IOLoop.instance()
|
||||
self._stream = zmqstream.ZMQStream(router, ioloop)
|
||||
self._stream.on_recv_stream(JSONRPCWebSocket.dispatch_message)
|
||||
tornado.autoreload.add_reload_hook(functools.partial(self._cleanup, stop=False))
|
||||
tornado.autoreload.add_reload_hook(self._reload_callback)
|
||||
|
||||
def signal_handler(signum=None, frame=None):
|
||||
log.warning("Server got signal {}, exiting...".format(signum))
|
||||
@ -226,6 +225,16 @@ class Server(object):
|
||||
self._router.send_string(module, zmq.SNDMORE)
|
||||
self._router.send_string("stop")
|
||||
|
||||
def _reload_callback(self):
|
||||
"""
|
||||
Callback for the Tornado reload hook.
|
||||
"""
|
||||
|
||||
for module in self._modules:
|
||||
if module.is_alive():
|
||||
module.terminate()
|
||||
module.join(timeout=1)
|
||||
|
||||
def _shutdown(self):
|
||||
"""
|
||||
Shutdowns the I/O loop and the ZeroMQ stream & socket.
|
||||
@ -242,13 +251,12 @@ class Server(object):
|
||||
ioloop = tornado.ioloop.IOLoop.instance()
|
||||
ioloop.stop()
|
||||
|
||||
def _cleanup(self, signum=None, stop=True):
|
||||
def _cleanup(self, signum=None):
|
||||
"""
|
||||
Shutdowns any running module processes
|
||||
and adds a callback to stop the event loop & ZeroMQ
|
||||
|
||||
:param signum: signal number (if called by the signal handler)
|
||||
:param stop: stops the ioloop if True (default)
|
||||
:param signum: signal number (if called by a signal handler)
|
||||
"""
|
||||
|
||||
# terminate all modules
|
||||
@ -263,9 +271,8 @@ class Server(object):
|
||||
module.terminate()
|
||||
module.join(timeout=1)
|
||||
|
||||
if stop:
|
||||
ioloop = tornado.ioloop.IOLoop.instance()
|
||||
if signum:
|
||||
ioloop.add_callback_from_signal(self._shutdown)
|
||||
else:
|
||||
ioloop.add_callback(self._shutdown)
|
||||
ioloop = tornado.ioloop.IOLoop.instance()
|
||||
if signum:
|
||||
ioloop.add_callback_from_signal(self._shutdown)
|
||||
else:
|
||||
ioloop.add_callback(self._shutdown)
|
||||
|
Loading…
Reference in New Issue
Block a user