This commit is contained in:
Saifeddine ALOUI 2024-01-17 00:06:38 +01:00
parent 5f76e4a773
commit b4a98d9f3a
16 changed files with 215 additions and 403 deletions

View File

@ -120,7 +120,7 @@ def parse_requirements_file(requirements_path):
class LoLLMsAPI(LollmsApplication):
def __init__(self, config:LOLLMSConfig, socketio:SocketIO, config_file_path:str, lollms_paths: LollmsPaths) -> None:
self.socketio = socketio
self.sio = socketio
super().__init__("Lollms_webui",config, lollms_paths, callback=self.process_chunk, socketio=socketio)
@ -216,13 +216,13 @@ class LoLLMsAPI(LollmsApplication):
"processing":False,
"schedule_for_deletion":False
}
self.socketio.emit('connected', room=request.sid)
self.sio.emit('connected', room=request.sid)
ASCIIColors.success(f'Client {request.sid} connected')
@socketio.on('disconnect')
def disconnect():
try:
self.socketio.emit('disconnected', room=request.sid)
self.sio.emit('disconnected', room=request.sid)
if self.connections[request.sid]["processing"]:
self.connections[request.sid]["schedule_for_deletion"]=True
else:
@ -313,7 +313,7 @@ class LoLLMsAPI(LollmsApplication):
self.error("No model selected. Please make sure you select a model before starting generation", client_id = client_id)
return
self.new_message(client_id, self.config.user_name, message, sender_type=SENDER_TYPES.SENDER_TYPES_USER, open=True)
self.socketio.sleep(0.01)
self.sio.sleep(0.01)
else:
if self.personality is None:
self.warning("Select a personality")
@ -326,7 +326,7 @@ class LoLLMsAPI(LollmsApplication):
self.error("No model selected. Please make sure you select a model before starting generation", client_id=client_id)
return
self.new_message(client_id, self.personality.name, "[edit this to put your ai answer start]", open=True)
self.socketio.sleep(0.01)
self.sio.sleep(0.01)
# -- interactive view --
@ -358,8 +358,8 @@ class LoLLMsAPI(LollmsApplication):
def upgrade_vectorization():
if self.config.data_vectorization_activate and self.config.use_discussions_history:
try:
self.socketio.emit('show_progress')
self.socketio.sleep(0)
self.sio.emit('show_progress')
self.sio.sleep(0)
ASCIIColors.yellow("0- Detected discussion vectorization request")
folder = self.lollms_paths.personal_databases_path/"vectorized_dbs"
folder.mkdir(parents=True, exist_ok=True)
@ -371,8 +371,8 @@ class LoLLMsAPI(LollmsApplication):
index = 0
nb_discussions = len(discussions)
for (title,discussion) in tqdm(discussions):
self.socketio.emit('update_progress',{'value':int(100*(index/nb_discussions))})
self.socketio.sleep(0)
self.sio.emit('update_progress',{'value':int(100*(index/nb_discussions))})
self.sio.sleep(0)
index += 1
if discussion!='':
skill = self.learn_from_discussion(title, discussion)
@ -387,8 +387,8 @@ class LoLLMsAPI(LollmsApplication):
ASCIIColors.yellow("Ready")
except Exception as ex:
ASCIIColors.error(f"Couldn't vectorize database:{ex}")
self.socketio.emit('hide_progress')
self.socketio.sleep(0)
self.sio.emit('hide_progress')
self.sio.sleep(0)
@ -402,14 +402,14 @@ class LoLLMsAPI(LollmsApplication):
model_url = data["model_url"]
signature = f"{model_name}_{binding_folder}_{model_url}"
self.download_infos[signature]["cancel"]=True
self.socketio.emit('canceled', {
self.sio.emit('canceled', {
'status': True
},
room=request.sid
)
except Exception as ex:
trace_exception(ex)
self.socketio.emit('canceled', {
self.sio.emit('canceled', {
'status': False,
'error':str(ex)
},
@ -418,196 +418,8 @@ class LoLLMsAPI(LollmsApplication):
@socketio.on('install_model')
def install_model(data):
room_id = request.sid
def install_model_():
print("Install model triggered")
model_path = data["path"].replace("\\","/")
if data["type"].lower() in model_path.lower():
model_type:str=data["type"]
else:
mtt = None
for mt in self.binding.models_dir_names:
if mt.lower() in model_path.lower():
mtt = mt
break
if mtt:
model_type = mtt
else:
model_type:str=self.binding.models_dir_names[0]
progress = 0
installation_dir = self.binding.searchModelParentFolder(model_path.split('/')[-1], model_type)
if model_type=="gptq" or model_type=="awq" or model_type=="transformers":
parts = model_path.split("/")
if len(parts)==2:
filename = parts[1]
else:
filename = parts[4]
installation_path = installation_dir / filename
elif model_type=="gpt4all":
filename = data["variant_name"]
model_path = "http://gpt4all.io/models/gguf/"+filename
installation_path = installation_dir / filename
else:
filename = Path(model_path).name
installation_path = installation_dir / filename
print("Model install requested")
print(f"Model path : {model_path}")
model_name = filename
binding_folder = self.config["binding_name"]
model_url = model_path
signature = f"{model_name}_{binding_folder}_{model_url}"
try:
self.download_infos[signature]={
"start_time":datetime.now(),
"total_size":self.binding.get_file_size(model_path),
"downloaded_size":0,
"progress":0,
"speed":0,
"cancel":False
}
if installation_path.exists():
print("Error: Model already exists. please remove it first")
socketio.emit('install_progress',{
'status': False,
'error': f'model already exists. Please remove it first.\nThe model can be found here:{installation_path}',
'model_name' : model_name,
'binding_folder' : binding_folder,
'model_url' : model_url,
'start_time': self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
'total_size': self.download_infos[signature]['total_size'],
'downloaded_size': self.download_infos[signature]['downloaded_size'],
'progress': self.download_infos[signature]['progress'],
'speed': self.download_infos[signature]['speed'],
}, room=room_id
)
return
socketio.emit('install_progress',{
'status': True,
'progress': progress,
'model_name' : model_name,
'binding_folder' : binding_folder,
'model_url' : model_url,
'start_time': self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
'total_size': self.download_infos[signature]['total_size'],
'downloaded_size': self.download_infos[signature]['downloaded_size'],
'progress': self.download_infos[signature]['progress'],
'speed': self.download_infos[signature]['speed'],
}, room=room_id)
def callback(downloaded_size, total_size):
progress = (downloaded_size / total_size) * 100
now = datetime.now()
dt = (now - self.download_infos[signature]['start_time']).total_seconds()
speed = downloaded_size/dt
self.download_infos[signature]['downloaded_size'] = downloaded_size
self.download_infos[signature]['speed'] = speed
if progress - self.download_infos[signature]['progress']>2:
self.download_infos[signature]['progress'] = progress
socketio.emit('install_progress',{
'status': True,
'model_name' : model_name,
'binding_folder' : binding_folder,
'model_url' : model_url,
'start_time': self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
'total_size': self.download_infos[signature]['total_size'],
'downloaded_size': self.download_infos[signature]['downloaded_size'],
'progress': self.download_infos[signature]['progress'],
'speed': self.download_infos[signature]['speed'],
}, room=room_id)
if self.download_infos[signature]["cancel"]:
raise Exception("canceled")
if hasattr(self.binding, "download_model"):
try:
self.binding.download_model(model_path, installation_path, callback)
except Exception as ex:
ASCIIColors.warning(str(ex))
trace_exception(ex)
socketio.emit('install_progress',{
'status': False,
'error': 'canceled',
'model_name' : model_name,
'binding_folder' : binding_folder,
'model_url' : model_url,
'start_time': self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
'total_size': self.download_infos[signature]['total_size'],
'downloaded_size': self.download_infos[signature]['downloaded_size'],
'progress': self.download_infos[signature]['progress'],
'speed': self.download_infos[signature]['speed'],
}, room=room_id
)
del self.download_infos[signature]
try:
if installation_path.is_dir():
shutil.rmtree(installation_path)
else:
installation_path.unlink()
except Exception as ex:
trace_exception(ex)
ASCIIColors.error(f"Couldn't delete file. Please try to remove it manually.\n{installation_path}")
return
else:
try:
self.download_file(model_path, installation_path, callback)
except Exception as ex:
ASCIIColors.warning(str(ex))
trace_exception(ex)
socketio.emit('install_progress',{
'status': False,
'error': 'canceled',
'model_name' : model_name,
'binding_folder' : binding_folder,
'model_url' : model_url,
'start_time': self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
'total_size': self.download_infos[signature]['total_size'],
'downloaded_size': self.download_infos[signature]['downloaded_size'],
'progress': self.download_infos[signature]['progress'],
'speed': self.download_infos[signature]['speed'],
}, room=room_id
)
del self.download_infos[signature]
installation_path.unlink()
return
socketio.emit('install_progress',{
'status': True,
'error': '',
'model_name' : model_name,
'binding_folder' : binding_folder,
'model_url' : model_url,
'start_time': self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
'total_size': self.download_infos[signature]['total_size'],
'downloaded_size': self.download_infos[signature]['downloaded_size'],
'progress': 100,
'speed': self.download_infos[signature]['speed'],
}, room=room_id)
del self.download_infos[signature]
except Exception as ex:
trace_exception(ex)
socketio.emit('install_progress',{
'status': False,
'error': str(ex),
'model_name' : model_name,
'binding_folder' : binding_folder,
'model_url' : model_url,
'start_time': '',
'total_size': 0,
'downloaded_size': 0,
'progress': 0,
'speed': 0,
}, room=room_id
)
tpe = threading.Thread(target=install_model_, args=())
client_id = request.sid
tpe = threading.Thread(target=self.binding.install_model, args=(data["type"], data["path"], data["variant_name"], client_id))
tpe.start()
@socketio.on('uninstall_model')
@ -705,12 +517,12 @@ class LoLLMsAPI(LollmsApplication):
finished_generating_at=None
)
self.socketio.emit('discussion_created',
self.sio.emit('discussion_created',
{'id':self.connections[client_id]["current_discussion"].discussion_id},
room=client_id
)
else:
self.socketio.emit('discussion_created',
self.sio.emit('discussion_created',
{'id':0},
room=client_id
)
@ -730,7 +542,7 @@ class LoLLMsAPI(LollmsApplication):
self.connections[client_id]["current_discussion"] = self.db.create_discussion()
messages = self.connections[client_id]["current_discussion"].get_messages()
jsons = [m.to_json() for m in messages]
self.socketio.emit('discussion',
self.sio.emit('discussion',
jsons,
room=client_id
)
@ -801,10 +613,10 @@ class LoLLMsAPI(LollmsApplication):
else:
result = self.personality.add_file(file_path, partial(self.process_chunk, client_id=client_id))
self.socketio.emit('file_received', {'status': True, 'filename': filename})
self.sio.emit('file_received', {'status': True, 'filename': filename})
else:
# Request the next chunk from the client
self.socketio.emit('request_next_chunk', {'offset': offset + len(chunk)})
self.sio.emit('request_next_chunk', {'offset': offset + len(chunk)})
@socketio.on('execute_command')
def execute_command(data):
@ -820,7 +632,7 @@ class LoLLMsAPI(LollmsApplication):
self.close_message(client_id)
# -- misc --
@self.socketio.on('execute_python_code')
@self.sio.on('execute_python_code')
def execute_python_code(data):
"""Executes Python code and returns the output."""
client_id = request.sid
@ -841,7 +653,7 @@ class LoLLMsAPI(LollmsApplication):
# Get the output.
output = interpreter.getvalue()
self.socketio.emit("execution_output", {"output":output,"execution_time":end_time - start_time}, room=client_id)
self.sio.emit("execution_output", {"output":output,"execution_time":end_time - start_time}, room=client_id)
# -- generation --
@ -856,24 +668,24 @@ class LoLLMsAPI(LollmsApplication):
ASCIIColors.error(f'Client {request.sid} canceled generation')
self.busy=False
@self.socketio.on('cancel_text_generation')
@self.sio.on('cancel_text_generation')
def cancel_text_generation(data):
client_id = request.sid
self.connections[client_id]["requested_stop"]=True
print(f"Client {client_id} requested canceling generation")
self.socketio.emit("generation_canceled", {"message":"Generation is canceled."}, room=client_id)
self.socketio.sleep(0)
self.sio.emit("generation_canceled", {"message":"Generation is canceled."}, room=client_id)
self.sio.sleep(0)
self.busy = False
# A copy of the original lollms-server generation code needed for playground
@self.socketio.on('generate_text')
@self.sio.on('generate_text')
def handle_generate_text(data):
client_id = request.sid
self.cancel_gen = False
ASCIIColors.info(f"Text generation requested by client: {client_id}")
if self.busy:
self.socketio.emit("busy", {"message":"I am busy. Come back later."}, room=client_id)
self.socketio.sleep(0)
self.sio.emit("busy", {"message":"I am busy. Come back later."}, room=client_id)
self.sio.sleep(0)
ASCIIColors.warning(f"OOps request {client_id} refused!! Server busy")
return
def generate_text():
@ -908,8 +720,8 @@ class LoLLMsAPI(LollmsApplication):
ASCIIColors.success(f"generated:{len(self.answer['full_text'].split())} words", end='\r')
if text is not None:
self.answer["full_text"] = self.answer["full_text"] + text
self.socketio.emit('text_chunk', {'chunk': text, 'type':MSG_TYPE.MSG_TYPE_CHUNK.value}, room=client_id)
self.socketio.sleep(0)
self.sio.emit('text_chunk', {'chunk': text, 'type':MSG_TYPE.MSG_TYPE_CHUNK.value}, room=client_id)
self.sio.sleep(0)
if client_id in self.connections:# Client disconnected
if self.connections[client_id]["requested_stop"]:
return False
@ -940,10 +752,10 @@ class LoLLMsAPI(LollmsApplication):
if client_id in self.connections:
if not self.connections[client_id]["requested_stop"]:
# Emit the generated text to the client
self.socketio.emit('text_generated', {'text': generated_text}, room=client_id)
self.socketio.sleep(0)
self.sio.emit('text_generated', {'text': generated_text}, room=client_id)
self.sio.sleep(0)
except Exception as ex:
self.socketio.emit('generation_error', {'error': str(ex)}, room=client_id)
self.sio.emit('generation_error', {'error': str(ex)}, room=client_id)
ASCIIColors.error(f"\ndone")
self.busy = False
else:
@ -982,8 +794,8 @@ class LoLLMsAPI(LollmsApplication):
def callback(text, message_type: MSG_TYPE, metadata:dict={}):
if message_type == MSG_TYPE.MSG_TYPE_CHUNK:
self.answer["full_text"] = self.answer["full_text"] + text
self.socketio.emit('text_chunk', {'chunk': text}, room=client_id)
self.socketio.sleep(0)
self.sio.emit('text_chunk', {'chunk': text}, room=client_id)
self.sio.sleep(0)
try:
if self.connections[client_id]["requested_stop"]:
return False
@ -1013,19 +825,19 @@ class LoLLMsAPI(LollmsApplication):
ASCIIColors.success("\ndone")
# Emit the generated text to the client
self.socketio.emit('text_generated', {'text': generated_text}, room=client_id)
self.socketio.sleep(0)
self.sio.emit('text_generated', {'text': generated_text}, room=client_id)
self.sio.sleep(0)
except Exception as ex:
self.socketio.emit('generation_error', {'error': str(ex)}, room=client_id)
self.sio.emit('generation_error', {'error': str(ex)}, room=client_id)
ASCIIColors.error(f"\ndone")
self.busy = False
except Exception as ex:
trace_exception(ex)
self.socketio.emit('generation_error', {'error': str(ex)}, room=client_id)
self.sio.emit('generation_error', {'error': str(ex)}, room=client_id)
self.busy = False
# Start the text generation task in a separate thread
task = self.socketio.start_background_task(target=generate_text)
task = self.sio.start_background_task(target=generate_text)
@socketio.on('generate_msg')
def generate_msg(data):
@ -1065,7 +877,7 @@ class LoLLMsAPI(LollmsApplication):
self.connections[client_id]['generation_thread'] = threading.Thread(target=self.start_message_generation, args=(message, message.id, client_id))
self.connections[client_id]['generation_thread'].start()
self.socketio.sleep(0.01)
self.sio.sleep(0.01)
ASCIIColors.info("Started generation task")
self.busy=True
#tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id, client_id))
@ -1157,7 +969,7 @@ class LoLLMsAPI(LollmsApplication):
self.connections[client_id]['generation_thread'] = threading.Thread(target=self.start_message_generation, args=(message, message.id, client_id))
self.connections[client_id]['generation_thread'].start()
self.socketio.sleep(0.01)
self.sio.sleep(0.01)
ASCIIColors.info("Started generation task")
self.busy=True
#tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id, client_id))
@ -1747,14 +1559,14 @@ class LoLLMsAPI(LollmsApplication):
display_type:NotificationDisplayType=NotificationDisplayType.TOAST,
verbose=True
):
self.socketio.emit('notification', {
self.sio.emit('notification', {
'content': content,# self.connections[client_id]["generated_text"],
'notification_type': notification_type.value,
"duration": duration,
'display_type':display_type.value
}, room=client_id
)
self.socketio.sleep(0.01)
self.sio.sleep(0.01)
if verbose:
if notification_type==NotificationType.NOTIF_SUCCESS:
ASCIIColors.success(content)
@ -1795,7 +1607,7 @@ class LoLLMsAPI(LollmsApplication):
personality = self.config["personalities"][self.config["active_personality_id"]],
) # first the content is empty, but we'll fill it at the end
self.socketio.emit('new_message',
self.sio.emit('new_message',
{
"sender": sender,
"message_type": message_type.value,
@ -1827,7 +1639,7 @@ class LoLLMsAPI(LollmsApplication):
self.connections[client_id]["current_discussion"].current_message.finished_generating_at=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
mtdt = json.dumps(metadata, indent=4) if metadata is not None and type(metadata)== list else metadata
if self.nb_received_tokens==1:
self.socketio.emit('update_message', {
self.sio.emit('update_message', {
"sender": self.personality.name,
'id':self.connections[client_id]["current_discussion"].current_message.id,
'content': "✍ warming up ...",# self.connections[client_id]["generated_text"],
@ -1841,7 +1653,7 @@ class LoLLMsAPI(LollmsApplication):
)
self.socketio.emit('update_message', {
self.sio.emit('update_message', {
"sender": self.personality.name,
'id':self.connections[client_id]["current_discussion"].current_message.id,
'content': chunk,# self.connections[client_id]["generated_text"],
@ -1853,7 +1665,7 @@ class LoLLMsAPI(LollmsApplication):
'metadata':metadata
}, room=client_id
)
self.socketio.sleep(0.01)
self.sio.sleep(0.01)
if msg_type != MSG_TYPE.MSG_TYPE_INFO:
self.connections[client_id]["current_discussion"].update_message(self.connections[client_id]["generated_text"], new_metadata=mtdt, new_ui=ui)
@ -1866,7 +1678,7 @@ class LoLLMsAPI(LollmsApplication):
self.connections[client_id]["generated_text"]=self.connections[client_id]["generated_text"].split("!@>")[0]
# Send final message
self.connections[client_id]["current_discussion"].current_message.finished_generating_at=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.socketio.emit('close_message', {
self.sio.emit('close_message', {
"sender": self.personality.name,
"id": self.connections[client_id]["current_discussion"].current_message.id,
"content":self.connections[client_id]["generated_text"],
@ -2104,7 +1916,7 @@ class LoLLMsAPI(LollmsApplication):
else:
self.new_message(client_id, self.personality.name, "")
self.update_message(client_id, "✍ warming up ...", msg_type=MSG_TYPE.MSG_TYPE_STEP_START)
self.socketio.sleep(0.01)
self.sio.sleep(0.01)
# prepare query and reception
self.discussion_messages, self.current_message, tokens = self.prepare_query(client_id, message_id, is_continue, n_tokens=self.config.min_n_predict, generation_type=generation_type)
@ -2184,7 +1996,7 @@ class LoLLMsAPI(LollmsApplication):
# Send final message
self.close_message(client_id)
self.socketio.sleep(0.01)
self.sio.sleep(0.01)
self.connections[client_id]["processing"]=False
if self.connections[client_id]["schedule_for_deletion"]:
del self.connections[client_id]
@ -2198,7 +2010,7 @@ class LoLLMsAPI(LollmsApplication):
if ttl is None or ttl=="" or ttl=="untitled":
title = self.make_discussion_title(d, client_id=client_id)
d.rename(title)
self.socketio.emit('disucssion_renamed',{
self.sio.emit('disucssion_renamed',{
'status': True,
'discussion_id':d.discussion_id,
'title':title

2
app.py
View File

@ -58,7 +58,7 @@ if __name__ == "__main__":
if args.port:
config.port=args.port
LOLLMSWebUI.build_instance(config=config, lollms_paths=lollms_paths, args=args, socketio=sio)
LOLLMSWebUI.build_instance(config=config, lollms_paths=lollms_paths, args=args, sio=sio)
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
# Import all endpoints
from lollms.server.endpoints.lollms_binding_files_server import router as lollms_binding_files_server_router

View File

@ -850,7 +850,7 @@ try:
if self.config["debug"]:
print("Configuration saved")
# Tell that the setting was changed
self.socketio.emit('save_settings', {"status":True})
self.sio.emit('save_settings', {"status":True})
return jsonify({"status":True})
@ -1383,7 +1383,7 @@ try:
nb_discussions = len(discussions)
for (title,discussion) in tqdm(discussions):
self.socketio.emit('update_progress',{'value':int(100*(index/nb_discussions))})
self.sio.emit('update_progress',{'value':int(100*(index/nb_discussions))})
index += 1
if discussion!='':
skill = self.learn_from_discussion(title, discussion)
@ -1738,8 +1738,8 @@ try:
def restart_program(self):
socketio.reboot=True
self.socketio.stop()
self.socketio.sleep(1)
self.sio.stop()
self.sio.sleep(1)
def update_software(self):
@ -1752,7 +1752,7 @@ try:
ASCIIColors.info("")
ASCIIColors.info("")
ASCIIColors.info("")
self.socketio.stop()
self.sio.stop()
run_update_script(self.args)
sys.exit()

View File

@ -93,7 +93,7 @@ def select_database(data:DatabaseSelectionParameters):
nb_discussions = len(discussions)
for (title,discussion) in tqdm(discussions):
lollmsElfServer.socketio.emit('update_progress',{'value':int(100*(index/nb_discussions))})
lollmsElfServer.sio.emit('update_progress',{'value':int(100*(index/nb_discussions))})
index += 1
if discussion!='':
skill = lollmsElfServer.learn_from_discussion(title, discussion)

View File

@ -35,11 +35,11 @@ async def get_lollms_webui_version():
async def restart_program():
"""Restart the program."""
# Stop the socketIO server
lollmsElfServer.socketio.shutdown()
lollmsElfServer.sio.shutdown()
# Sleep for 1 second before rebooting
lollmsElfServer.socketio.sleep(1)
lollmsElfServer.sio.sleep(1)
# Reboot the program
lollmsElfServer.socketio.reboot = True
lollmsElfServer.sio.reboot = True
@router.get("/update_software")
async def update_software():
@ -55,7 +55,7 @@ async def update_software():
ASCIIColors.info("")
ASCIIColors.info("")
# Stop the socketIO server
lollmsElfServer.socketio.shutdown()
lollmsElfServer.sio.shutdown()
# Run the update script using the provided arguments
lollmsElfServer.run_update_script(lollmsElfServer.args)

View File

@ -79,12 +79,12 @@ def add_events(sio:socketio):
finished_generating_at=None
)
await lollmsElfServer.socketio.emit('discussion_created',
await lollmsElfServer.sio.emit('discussion_created',
{'id':lollmsElfServer.connections[client_id]["current_discussion"].discussion_id},
to=client_id
)
else:
await lollmsElfServer.socketio.emit('discussion_created',
await lollmsElfServer.sio.emit('discussion_created',
{'id':0},
to=client_id
)
@ -104,7 +104,7 @@ def add_events(sio:socketio):
lollmsElfServer.connections[client_id]["current_discussion"] = lollmsElfServer.db.create_discussion()
messages = lollmsElfServer.connections[client_id]["current_discussion"].get_messages()
jsons = [m.to_json() for m in messages]
await lollmsElfServer.socketio.emit('discussion',
await lollmsElfServer.sio.emit('discussion',
jsons,
to=client_id
)

View File

@ -70,7 +70,7 @@ def add_events(sio:socketio):
lollmsElfServer.connections[client_id]['generation_thread'] = threading.Thread(target=lollmsElfServer.start_message_generation, args=(message, message.id, client_id))
lollmsElfServer.connections[client_id]['generation_thread'].start()
lollmsElfServer.socketio.sleep(0.01)
lollmsElfServer.sio.sleep(0.01)
ASCIIColors.info("Started generation task")
lollmsElfServer.busy=True
#tpe = threading.Thread(target=lollmsElfServer.start_message_generation, args=(message, message_id, client_id))

@ -1 +1 @@
Subproject commit eaedc6b3cbe8d5f303f7993878c11b7b57c8eb06
Subproject commit 1cb5e5b3372b0cd1a13dac35283df19177a8413c

View File

@ -55,7 +55,6 @@ if not PackageManager.check_package_installed("requests"):
if not PackageManager.check_package_installed("bs4"):
PackageManager.install_package("beautifulsoup4")
import requests
from flask_socketio import SocketIO
from bs4 import BeautifulSoup
@ -95,7 +94,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
try_select_model=False,
callback=None,
args=None,
socketio = None
sio = None
):
if LOLLMSWebUI.__instance is None:
LOLLMSWebUI(
@ -109,7 +108,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
try_select_model=try_select_model,
callback=callback,
args=args,
socketio=socketio
sio=sio
)
return LOLLMSWebUI.__instance
def __init__(
@ -124,7 +123,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
try_select_model=False,
callback=None,
args=None,
socketio=None
sio=None
) -> None:
super().__init__(
config,
@ -136,7 +135,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
try_select_binding=try_select_binding,
try_select_model=try_select_model,
callback=callback,
socketio=socketio
sio=sio
)
self.app_name:str = "LOLLMSWebUI"
self.version:str = lollms_webui_version
@ -205,14 +204,14 @@ class LOLLMSWebUI(LOLLMSElfServer):
}
if Media_on:
try:
self.webcam = WebcamImageSender(socketio,lollmsCom=self)
self.webcam = WebcamImageSender(sio,lollmsCom=self)
except:
self.webcam = None
try:
self.rec_output_folder = lollms_paths.personal_outputs_path/"audio_rec"
self.rec_output_folder.mkdir(exist_ok=True, parents=True)
self.summoned = False
self.audio_cap = AudioRecorder(socketio,self.rec_output_folder/"rt.wav", callback=self.audio_callback,lollmsCom=self)
self.audio_cap = AudioRecorder(sio,self.rec_output_folder/"rt.wav", callback=self.audio_callback,lollmsCom=self)
except:
self.audio_cap = None
self.rec_output_folder = None
@ -223,7 +222,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
# Define a WebSocket event handler
@socketio.event
@sio.event
async def connect(sid, environ):
#Create a new connection information
self.connections[sid] = {
@ -236,10 +235,10 @@ class LOLLMSWebUI(LOLLMSElfServer):
"processing":False,
"schedule_for_deletion":False
}
await self.socketio.emit('connected', to=sid)
await self.sio.emit('connected', to=sid)
ASCIIColors.success(f'Client {sid} connected')
@socketio.event
@sio.event
def disconnect(sid):
try:
if self.connections[sid]["processing"]:
@ -378,7 +377,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
self.connections[client_id]['generation_thread'] = threading.Thread(target=self.start_message_generation, args=(message, message.id, client_id))
self.connections[client_id]['generation_thread'].start()
self.socketio.sleep(0.01)
self.sio.sleep(0.01)
ASCIIColors.info("Started generation task")
self.busy=True
#tpe = threading.Thread(target=self.start_message_generation, args=(message, message_id, client_id))
@ -994,7 +993,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
verbose=True,
):
run_async(partial(self.socketio.emit,'notification', {
run_async(partial(self.sio.emit,'notification', {
'content': content,# self.connections[client_id]["generated_text"],
'notification_type': notification_type.value,
"duration": duration,
@ -1042,7 +1041,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
personality = self.config["personalities"][self.config["active_personality_id"]],
) # first the content is empty, but we'll fill it at the end
run_async(partial(
self.socketio.emit,'new_message',
self.sio.emit,'new_message',
{
"sender": sender,
"message_type": message_type.value,
@ -1076,7 +1075,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
mtdt = json.dumps(metadata, indent=4) if metadata is not None and type(metadata)== list else metadata
if self.nb_received_tokens==1:
run_async(
partial(self.socketio.emit,'update_message', {
partial(self.sio.emit,'update_message', {
"sender": self.personality.name,
'id':self.connections[client_id]["current_discussion"].current_message.id,
'content': "✍ warming up ...",# self.connections[client_id]["generated_text"],
@ -1091,7 +1090,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
)
run_async(
partial(self.socketio.emit,'update_message', {
partial(self.sio.emit,'update_message', {
"sender": self.personality.name,
'id':self.connections[client_id]["current_discussion"].current_message.id,
'content': chunk,# self.connections[client_id]["generated_text"],
@ -1117,7 +1116,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
# Send final message
self.connections[client_id]["current_discussion"].current_message.finished_generating_at=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
run_async(
partial(self.socketio.emit,'close_message', {
partial(self.sio.emit,'close_message', {
"sender": self.personality.name,
"id": self.connections[client_id]["current_discussion"].current_message.id,
"content":self.connections[client_id]["generated_text"],
@ -1358,7 +1357,6 @@ class LOLLMSWebUI(LOLLMSElfServer):
else:
self.new_message(client_id, self.personality.name, "")
self.update_message(client_id, "✍ warming up ...", msg_type=MSG_TYPE.MSG_TYPE_STEP_START)
self.socketio.sleep(0.01)
# prepare query and reception
self.discussion_messages, self.current_message, tokens, context_details = self.prepare_query(client_id, message_id, is_continue, n_tokens=self.config.min_n_predict, generation_type=generation_type)
@ -1453,7 +1451,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
title = self.make_discussion_title(d, client_id=client_id)
d.rename(title)
asyncio.run(
self.socketio.emit('disucssion_renamed',{
self.sio.emit('disucssion_renamed',{
'status': True,
'discussion_id':d.discussion_id,
'title':title

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-1800178d.js"></script>
<link rel="stylesheet" href="/assets/index-79a3681d.css">
<script type="module" crossorigin src="/assets/index-b802cd6b.js"></script>
<link rel="stylesheet" href="/assets/index-cbbeaa9e.css">
</head>
<body>
<div id="app"></div>

View File

@ -8,12 +8,14 @@
<RouterLink :to="{ name: 'playground' }" class="link-item dark:link-item-dark">
Playground
</RouterLink>
<RouterLink v-if="this.$store.state.config.enable_voice_service" :to="{ name: 'interactive' }" class="link-item dark:link-item-dark">
<RouterLink v-if="$store.state.config.enable_voice_service" :to="{ name: 'interactive' }" class="link-item dark:link-item-dark">
interactive
</RouterLink>
<!--
<RouterLink :to="{ name: 'nodes' }" class="link-item dark:link-item-dark">
Nodes (under construction)
</RouterLink>
-->
<!--
<RouterLink :to="{ name: 'training' }" class="link-item dark:link-item-dark">

View File

@ -950,7 +950,7 @@
</tr>
<tr>
<td style="min-width: 200px;">
<label for="install_sd_service" class="text-sm font-bold" style="margin-right: 1rem;">Reinstall SD service:</label>
<label for="install_sd_service" class="text-sm font-bold" style="margin-right: 1rem;">Install SD service:</label>
</td>
<td>
<div class="flex flex-row">
@ -1003,7 +1003,7 @@
</tr>
<tr>
<td style="min-width: 200px;">
<label for="ollama_base_url" class="text-sm font-bold" style="margin-right: 1rem;">Reinstall Ollama service:</label>
<label for="ollama_base_url" class="text-sm font-bold" style="margin-right: 1rem;">Install Ollama service:</label>
</td>
<td>
<div class="flex flex-row">
@ -1057,7 +1057,7 @@
</tr>
<tr>
<td style="min-width: 200px;">
<label for="petals_base_url" class="text-sm font-bold" style="margin-right: 1rem;">Reinstall Petals service:</label>
<label for="petals_base_url" class="text-sm font-bold" style="margin-right: 1rem;">Install Petals service:</label>
</td>
<td>
<div class="flex flex-row">
@ -2871,7 +2871,7 @@ export default {
// }
this.modelDownlaodInProgress = false
this.addModel = {}
socket.emit('cancel_install', { model_name: modelEntry.model_name, binding_folder: modelEntry.binding_folder, model_url: modelEntry.model_url, patreon: model.patreon?model.patreon:"None"});
socket.emit('cancel_install', { model_name: modelEntry.model_name, binding_folder: modelEntry.binding_folder, model_url: modelEntry.model_url, patreon: modelEntry.patreon?modelEntry.patreon:"None"});
this.$store.state.toast.showToast("Model installation aborted", 4, false)
},

@ -1 +1 @@
Subproject commit d7e62010c0432d2a2d284422bc59b47253ab4dc2
Subproject commit 46718174c90765bd3520e6462d77a7e2b535ad55

@ -1 +1 @@
Subproject commit 05c39df0d4a3414609c27086200d57efb31d729d
Subproject commit 95cffe727c79ad9fa972d4913b9e9afdea5828ab