mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-19 04:37:54 +00:00
upgraded
This commit is contained in:
parent
eaedc6b3cb
commit
1cb5e5b337
@ -16,25 +16,25 @@ class ServerConnector(QObject):
|
||||
|
||||
def __init__(self, parent=None):
|
||||
super(ServerConnector, self).__init__(parent)
|
||||
self.socketio = Client()
|
||||
self.sio = Client()
|
||||
self.connected = False
|
||||
self.personalities = []
|
||||
self.selected_personality_id = 0
|
||||
|
||||
self.socketio.on('connect', self.handle_connect)
|
||||
self.socketio.on('text_chunk', self.handle_text_chunk)
|
||||
self.socketio.on('text_generated', self.handle_text_generated)
|
||||
self.socketio.on('active_personalities_list', self.handle_personalities_received)
|
||||
self.sio.on('connect', self.handle_connect)
|
||||
self.sio.on('text_chunk', self.handle_text_chunk)
|
||||
self.sio.on('text_generated', self.handle_text_generated)
|
||||
self.sio.on('active_personalities_list', self.handle_personalities_received)
|
||||
|
||||
def handle_connect(self):
|
||||
self.socketio.emit('connect')
|
||||
self.sio.emit('connect')
|
||||
self.list_personalities()
|
||||
|
||||
|
||||
def connect_to_server(self):
|
||||
if not self.connected:
|
||||
try:
|
||||
self.socketio.connect('http://localhost:9600')
|
||||
self.sio.connect('http://localhost:9600')
|
||||
self.connected = True
|
||||
self.connection_status_changed.emit(True)
|
||||
except ConnectionError:
|
||||
@ -43,12 +43,12 @@ class ServerConnector(QObject):
|
||||
|
||||
def disconnect_from_server(self):
|
||||
if self.connected:
|
||||
self.socketio.disconnect()
|
||||
self.sio.disconnect()
|
||||
self.connected = False
|
||||
self.connection_status_changed.emit(False)
|
||||
|
||||
def list_personalities(self):
|
||||
self.socketio.emit('list_active_personalities')
|
||||
self.sio.emit('list_active_personalities')
|
||||
|
||||
@pyqtSlot(str)
|
||||
def generate_text(self, prompt):
|
||||
@ -57,11 +57,11 @@ class ServerConnector(QObject):
|
||||
return
|
||||
|
||||
data = {
|
||||
'client_id': self.socketio.sid,
|
||||
'client_id': self.sio.sid,
|
||||
'prompt': prompt,
|
||||
'personality': self.selected_personality_id
|
||||
}
|
||||
self.socketio.emit('generate_text', data)
|
||||
self.sio.emit('generate_text', data)
|
||||
|
||||
def handle_personalities_list(self, data):
|
||||
personalities = data['personalities']
|
||||
|
@ -13,7 +13,7 @@ from typing import Callable
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from flask_socketio import SocketIO
|
||||
from socketio import AsyncServer
|
||||
import subprocess
|
||||
import importlib
|
||||
import sys, os
|
||||
@ -33,13 +33,13 @@ class LollmsApplication(LoLLMsCom):
|
||||
try_select_binding=False,
|
||||
try_select_model=False,
|
||||
callback=None,
|
||||
socketio:SocketIO=None,
|
||||
sio:AsyncServer=None,
|
||||
free_mode=False
|
||||
) -> None:
|
||||
"""
|
||||
Creates a LOLLMS Application
|
||||
"""
|
||||
super().__init__(socketio)
|
||||
super().__init__(sio)
|
||||
self.app_name = app_name
|
||||
self.config = config
|
||||
self.lollms_paths = lollms_paths
|
||||
|
@ -255,21 +255,21 @@ class LoLLMsServer(LollmsApplication):
|
||||
)
|
||||
|
||||
|
||||
self.socketio = SocketIO(self.app, cors_allowed_origins='*', ping_timeout=1200, ping_interval=4000)
|
||||
self.sio = SocketIO(self.app, cors_allowed_origins='*', ping_timeout=1200, ping_interval=4000)
|
||||
|
||||
# Set log level to warning
|
||||
self.app.logger.setLevel(logging.WARNING)
|
||||
# Configure a custom logger for Flask-SocketIO
|
||||
self.socketio_log = logging.getLogger('socketio')
|
||||
self.socketio_log.setLevel(logging.WARNING)
|
||||
self.socketio_log.addHandler(logging.StreamHandler())
|
||||
self.sio_log = logging.getLogger('socketio')
|
||||
self.sio_log.setLevel(logging.WARNING)
|
||||
self.sio_log.addHandler(logging.StreamHandler())
|
||||
|
||||
self.initialize_routes()
|
||||
self.run(self.config.host, self.config.port)
|
||||
|
||||
|
||||
def initialize_routes(self):
|
||||
@self.socketio.on('connect')
|
||||
@self.sio.on('connect')
|
||||
def handle_connect():
|
||||
client_id = request.sid
|
||||
self.clients[client_id] = {
|
||||
@ -280,7 +280,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
}
|
||||
ASCIIColors.success(f'Client connected with session ID: {client_id}')
|
||||
|
||||
@self.socketio.on('disconnect')
|
||||
@self.sio.on('disconnect')
|
||||
def handle_disconnect():
|
||||
client_id = request.sid
|
||||
if client_id in self.clients:
|
||||
@ -289,7 +289,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
|
||||
|
||||
|
||||
@self.socketio.on('list_available_bindings')
|
||||
@self.sio.on('list_available_bindings')
|
||||
def handle_list_bindings():
|
||||
binding_infs = []
|
||||
for p in self.bindings_path.iterdir():
|
||||
@ -307,7 +307,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
|
||||
emit('bindings_list', {'success':True, 'bindings': binding_infs}, room=request.sid)
|
||||
|
||||
@self.socketio.on('list_available_personalities')
|
||||
@self.sio.on('list_available_personalities')
|
||||
def handle_list_available_personalities():
|
||||
personalities_folder = self.personalities_path
|
||||
personalities = {}
|
||||
@ -359,7 +359,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
print(f"Couldn't load personality from {personality_folder} [{ex}]")
|
||||
emit('personalities_list', {'personalities': personalities}, room=request.sid)
|
||||
|
||||
@self.socketio.on('list_available_models')
|
||||
@self.sio.on('list_available_models')
|
||||
def handle_list_available_models():
|
||||
"""List the available models
|
||||
|
||||
@ -408,7 +408,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
emit('available_models_list', {'success':True, 'available_models': models}, room=request.sid)
|
||||
|
||||
|
||||
@self.socketio.on('list_available_personalities_categories')
|
||||
@self.sio.on('list_available_personalities_categories')
|
||||
def handle_list_available_personalities_categories(data):
|
||||
try:
|
||||
categories = [l for l in (self.personalities_path).iterdir()]
|
||||
@ -416,7 +416,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
except Exception as ex:
|
||||
emit('available_personalities_categories_list', {'success': False, 'error':str(ex)})
|
||||
|
||||
@self.socketio.on('list_available_personalities_names')
|
||||
@self.sio.on('list_available_personalities_names')
|
||||
def handle_list_available_personalities_names(data):
|
||||
try:
|
||||
category = data["category"]
|
||||
@ -425,7 +425,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
except Exception as ex:
|
||||
emit('list_available_personalities_names_list', {'success': False, 'error':str(ex)})
|
||||
|
||||
@self.socketio.on('select_binding')
|
||||
@self.sio.on('select_binding')
|
||||
def handle_select_binding(data):
|
||||
self.cp_config = copy.deepcopy(self.config)
|
||||
self.cp_config["binding_name"] = data['binding_name']
|
||||
@ -446,7 +446,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
print(ex)
|
||||
emit('select_binding', {'success':False, 'binding_name': self.cp_config["binding_name"], 'error':f"Couldn't load binding:\n{ex}"}, room=request.sid)
|
||||
|
||||
@self.socketio.on('select_model')
|
||||
@self.sio.on('select_model')
|
||||
def handle_select_model(data):
|
||||
model_name = data['model_name']
|
||||
if self.binding is None:
|
||||
@ -468,7 +468,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
print(ex)
|
||||
emit('select_model', {'success':False, 'model_name': model_name, 'error':f"Please select a binding first"}, room=request.sid)
|
||||
|
||||
@self.socketio.on('add_personality')
|
||||
@self.sio.on('add_personality')
|
||||
def handle_add_personality(data):
|
||||
personality_path = data['path']
|
||||
try:
|
||||
@ -489,7 +489,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
|
||||
|
||||
|
||||
@self.socketio.on('vectorize_text')
|
||||
@self.sio.on('vectorize_text')
|
||||
def vectorize_text(parameters:dict):
|
||||
"""Vectorizes text
|
||||
|
||||
@ -535,7 +535,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
json.dump(json_db, file, indent=4)
|
||||
|
||||
|
||||
@self.socketio.on('query_database')
|
||||
@self.sio.on('query_database')
|
||||
def query_database(parameters:dict):
|
||||
"""queries a database
|
||||
|
||||
@ -578,12 +578,12 @@ class LoLLMsServer(LollmsApplication):
|
||||
})
|
||||
|
||||
|
||||
@self.socketio.on('list_active_personalities')
|
||||
@self.sio.on('list_active_personalities')
|
||||
def handle_list_active_personalities():
|
||||
personality_names = [p.name for p in self.personalities]
|
||||
emit('active_personalities_list', {'success':True, 'personalities': personality_names}, room=request.sid)
|
||||
|
||||
@self.socketio.on('activate_personality')
|
||||
@self.sio.on('activate_personality')
|
||||
def handle_activate_personality(data):
|
||||
personality_id = data['id']
|
||||
if personality_id<len(self.personalities):
|
||||
@ -594,45 +594,45 @@ class LoLLMsServer(LollmsApplication):
|
||||
else:
|
||||
emit('personality_add_failed', {'success':False, 'error': "Personality ID not valid"}, room=request.sid)
|
||||
|
||||
@self.socketio.on('tokenize')
|
||||
@self.sio.on('tokenize')
|
||||
def tokenize(data):
|
||||
client_id = request.sid
|
||||
prompt = data['prompt']
|
||||
tk = self.model.tokenize(prompt)
|
||||
emit("tokenized", {"tokens":tk}, room=client_id)
|
||||
|
||||
@self.socketio.on('detokenize')
|
||||
@self.sio.on('detokenize')
|
||||
def detokenize(data):
|
||||
client_id = request.sid
|
||||
prompt = data['prompt']
|
||||
txt = self.model.detokenize(prompt)
|
||||
emit("detokenized", {"text":txt}, room=client_id)
|
||||
|
||||
@self.socketio.on('embed')
|
||||
@self.sio.on('embed')
|
||||
def detokenize(data):
|
||||
client_id = request.sid
|
||||
prompt = data['prompt']
|
||||
txt = self.model.embed(prompt)
|
||||
self.socketio.emit("embeded", {"text":txt}, room=client_id)
|
||||
self.sio.emit("embeded", {"text":txt}, room=client_id)
|
||||
|
||||
@self.socketio.on('cancel_text_generation')
|
||||
@self.sio.on('cancel_text_generation')
|
||||
def cancel_text_generation(data):
|
||||
client_id = request.sid
|
||||
self.clients[client_id]["requested_stop"]=True
|
||||
print(f"Client {client_id} requested canceling generation")
|
||||
self.socketio.emit("generation_canceled", {"message":"Generation is canceled."}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
self.sio.emit("generation_canceled", {"message":"Generation is canceled."}, room=client_id)
|
||||
self.sio.sleep(0)
|
||||
self.busy = False
|
||||
|
||||
|
||||
# A copy of the original lollms-server generation code needed for playground
|
||||
@self.socketio.on('generate_text')
|
||||
@self.sio.on('generate_text')
|
||||
def handle_generate_text(data):
|
||||
client_id = request.sid
|
||||
ASCIIColors.info(f"Text generation requested by client: {client_id}")
|
||||
if self.busy:
|
||||
self.socketio.emit("busy", {"message":"I am busy. Come back later."}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
self.sio.emit("busy", {"message":"I am busy. Come back later."}, room=client_id)
|
||||
self.sio.sleep(0)
|
||||
ASCIIColors.warning(f"OOps request {client_id} refused!! Server busy")
|
||||
return
|
||||
def generate_text():
|
||||
@ -666,8 +666,8 @@ class LoLLMsServer(LollmsApplication):
|
||||
if message_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
||||
ASCIIColors.success(f"generated:{len(self.answer['full_text'].split())} words", end='\r')
|
||||
self.answer["full_text"] = self.answer["full_text"] + text
|
||||
self.socketio.emit('text_chunk', {'chunk': text, 'type':MSG_TYPE.MSG_TYPE_CHUNK.value}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
self.sio.emit('text_chunk', {'chunk': text, 'type':MSG_TYPE.MSG_TYPE_CHUNK.value}, room=client_id)
|
||||
self.sio.sleep(0)
|
||||
if client_id in self.clients:# Client disconnected
|
||||
if self.clients[client_id]["requested_stop"]:
|
||||
return False
|
||||
@ -696,10 +696,10 @@ class LoLLMsServer(LollmsApplication):
|
||||
if client_id in self.clients:
|
||||
if not self.clients[client_id]["requested_stop"]:
|
||||
# Emit the generated text to the client
|
||||
self.socketio.emit('text_generated', {'text': generated_text}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
self.sio.emit('text_generated', {'text': generated_text}, room=client_id)
|
||||
self.sio.sleep(0)
|
||||
except Exception as ex:
|
||||
self.socketio.emit('generation_error', {'error': str(ex)}, room=client_id)
|
||||
self.sio.emit('generation_error', {'error': str(ex)}, room=client_id)
|
||||
ASCIIColors.error(f"\ndone")
|
||||
self.busy = False
|
||||
else:
|
||||
@ -738,8 +738,8 @@ class LoLLMsServer(LollmsApplication):
|
||||
def callback(text, message_type: MSG_TYPE, metadata:dict={}):
|
||||
if message_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
||||
self.answer["full_text"] = self.answer["full_text"] + text
|
||||
self.socketio.emit('text_chunk', {'chunk': text}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
self.sio.emit('text_chunk', {'chunk': text}, room=client_id)
|
||||
self.sio.sleep(0)
|
||||
try:
|
||||
if self.clients[client_id]["requested_stop"]:
|
||||
return False
|
||||
@ -769,19 +769,19 @@ class LoLLMsServer(LollmsApplication):
|
||||
ASCIIColors.success("\ndone")
|
||||
|
||||
# Emit the generated text to the client
|
||||
self.socketio.emit('text_generated', {'text': generated_text}, room=client_id)
|
||||
self.socketio.sleep(0)
|
||||
self.sio.emit('text_generated', {'text': generated_text}, room=client_id)
|
||||
self.sio.sleep(0)
|
||||
except Exception as ex:
|
||||
self.socketio.emit('generation_error', {'error': str(ex)}, room=client_id)
|
||||
self.sio.emit('generation_error', {'error': str(ex)}, room=client_id)
|
||||
ASCIIColors.error(f"\ndone")
|
||||
self.busy = False
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
self.socketio.emit('generation_error', {'error': str(ex)}, room=client_id)
|
||||
self.sio.emit('generation_error', {'error': str(ex)}, room=client_id)
|
||||
self.busy = False
|
||||
|
||||
# Start the text generation task in a separate thread
|
||||
task = self.socketio.start_background_task(target=generate_text)
|
||||
task = self.sio.start_background_task(target=generate_text)
|
||||
|
||||
def build_binding(self, bindings_path: Path, cfg: LOLLMSConfig)->LLMBinding:
|
||||
binding_path = Path(bindings_path) / cfg["binding_name"]
|
||||
@ -813,7 +813,7 @@ class LoLLMsServer(LollmsApplication):
|
||||
print(f"{ASCIIColors.color_red}Current personality : {ASCIIColors.color_reset}{self.config.personalities[self.config.active_personality_id]}")
|
||||
ASCIIColors.info(f"Serving on address: http://{host}:{port}")
|
||||
|
||||
self.socketio.run(self.app, host=host, port=port)
|
||||
self.sio.run(self.app, host=host, port=port)
|
||||
|
||||
def main():
|
||||
LoLLMsServer()
|
||||
|
@ -24,8 +24,9 @@ from lollms.main_config import LOLLMSConfig
|
||||
from lollms.com import NotificationType, NotificationDisplayType, LoLLMsCom
|
||||
import urllib
|
||||
import inspect
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
|
||||
from lollms.utilities import trace_exception
|
||||
__author__ = "parisneo"
|
||||
__github__ = "https://github.com/ParisNeo/lollms_bindings_zoo"
|
||||
__copyright__ = "Copyright 2023, "
|
||||
@ -70,6 +71,8 @@ class LLMBinding:
|
||||
|
||||
self.lollmsCom = lollmsCom
|
||||
|
||||
self.download_infos={}
|
||||
|
||||
self.add_default_configurations(binding_config)
|
||||
|
||||
self.interrogatorStorer = None
|
||||
@ -100,7 +103,192 @@ class LLMBinding:
|
||||
self.configuration_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
binding_config.config.file_path = self.configuration_file_path
|
||||
|
||||
def install_model(self, model_type:str, model_path:str, variant_name:str, client_id:int=None):
|
||||
print("Install model triggered")
|
||||
model_path = model_path.replace("\\","/")
|
||||
|
||||
if model_type.lower() in model_path.lower():
|
||||
model_type:str=model_type
|
||||
else:
|
||||
mtt = None
|
||||
for mt in self.models_dir_names:
|
||||
if mt.lower() in model_path.lower():
|
||||
mtt = mt
|
||||
break
|
||||
if mtt:
|
||||
model_type = mtt
|
||||
else:
|
||||
model_type:str=self.models_dir_names[0]
|
||||
|
||||
progress = 0
|
||||
installation_dir = self.searchModelParentFolder(model_path.split('/')[-1], model_type)
|
||||
if model_type=="gptq" or model_type=="awq" or model_type=="transformers":
|
||||
parts = model_path.split("/")
|
||||
if len(parts)==2:
|
||||
filename = parts[1]
|
||||
else:
|
||||
filename = parts[4]
|
||||
installation_path = installation_dir / filename
|
||||
elif model_type=="gpt4all":
|
||||
filename = variant_name
|
||||
model_path = "http://gpt4all.io/models/gguf/"+filename
|
||||
installation_path = installation_dir / filename
|
||||
else:
|
||||
filename = Path(model_path).name
|
||||
installation_path = installation_dir / filename
|
||||
print("Model install requested")
|
||||
print(f"Model path : {model_path}")
|
||||
|
||||
model_name = filename
|
||||
binding_folder = self.config["binding_name"]
|
||||
model_url = model_path
|
||||
signature = f"{model_name}_{binding_folder}_{model_url}"
|
||||
try:
|
||||
self.download_infos[signature]={
|
||||
"start_time":datetime.now(),
|
||||
"total_size":self.get_file_size(model_path),
|
||||
"downloaded_size":0,
|
||||
"progress":0,
|
||||
"speed":0,
|
||||
"cancel":False
|
||||
}
|
||||
|
||||
if installation_path.exists():
|
||||
print("Error: Model already exists. please remove it first")
|
||||
|
||||
self.lollmsCom.notify_model_install(
|
||||
installation_path,
|
||||
model_name,
|
||||
binding_folder,
|
||||
model_url,
|
||||
self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
self.download_infos[signature]['total_size'],
|
||||
self.download_infos[signature]['downloaded_size'],
|
||||
self.download_infos[signature]['progress'],
|
||||
self.download_infos[signature]['speed'],
|
||||
client_id,
|
||||
status=True,
|
||||
error="",
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def callback(downloaded_size, total_size):
|
||||
progress = (downloaded_size / total_size) * 100
|
||||
now = datetime.now()
|
||||
dt = (now - self.download_infos[signature]['start_time']).total_seconds()
|
||||
speed = downloaded_size/dt
|
||||
self.download_infos[signature]['downloaded_size'] = downloaded_size
|
||||
self.download_infos[signature]['speed'] = speed
|
||||
|
||||
if progress - self.download_infos[signature]['progress']>2:
|
||||
self.download_infos[signature]['progress'] = progress
|
||||
self.lollmsCom.notify_model_install(
|
||||
installation_path,
|
||||
model_name,
|
||||
binding_folder,
|
||||
model_url,
|
||||
self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
self.download_infos[signature]['total_size'],
|
||||
self.download_infos[signature]['downloaded_size'],
|
||||
self.download_infos[signature]['progress'],
|
||||
self.download_infos[signature]['speed'],
|
||||
client_id,
|
||||
status=True,
|
||||
error="",
|
||||
)
|
||||
|
||||
if self.download_infos[signature]["cancel"]:
|
||||
raise Exception("canceled")
|
||||
|
||||
|
||||
if hasattr(self, "download_model"):
|
||||
try:
|
||||
self.download_model(model_path, installation_path, callback)
|
||||
except Exception as ex:
|
||||
ASCIIColors.warning(str(ex))
|
||||
trace_exception(ex)
|
||||
self.lollmsCom.notify_model_install(
|
||||
installation_path,
|
||||
model_name,
|
||||
binding_folder,
|
||||
model_url,
|
||||
self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
self.download_infos[signature]['total_size'],
|
||||
self.download_infos[signature]['downloaded_size'],
|
||||
self.download_infos[signature]['progress'],
|
||||
self.download_infos[signature]['speed'],
|
||||
client_id,
|
||||
status=False,
|
||||
error="Canceled",
|
||||
)
|
||||
|
||||
del self.download_infos[signature]
|
||||
try:
|
||||
if installation_path.is_dir():
|
||||
shutil.rmtree(installation_path)
|
||||
else:
|
||||
installation_path.unlink()
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
ASCIIColors.error(f"Couldn't delete file. Please try to remove it manually.\n{installation_path}")
|
||||
return
|
||||
|
||||
else:
|
||||
try:
|
||||
self.download_file(model_path, installation_path, callback)
|
||||
except Exception as ex:
|
||||
ASCIIColors.warning(str(ex))
|
||||
trace_exception(ex)
|
||||
self.lollmsCom.notify_model_install(
|
||||
installation_path,
|
||||
model_name,
|
||||
binding_folder,
|
||||
model_url,
|
||||
self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
self.download_infos[signature]['total_size'],
|
||||
self.download_infos[signature]['downloaded_size'],
|
||||
self.download_infos[signature]['progress'],
|
||||
self.download_infos[signature]['speed'],
|
||||
client_id,
|
||||
status=False,
|
||||
error="Canceled",
|
||||
)
|
||||
del self.download_infos[signature]
|
||||
installation_path.unlink()
|
||||
return
|
||||
self.lollmsCom.notify_model_install(
|
||||
installation_path,
|
||||
model_name,
|
||||
binding_folder,
|
||||
model_url,
|
||||
self.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
self.download_infos[signature]['total_size'],
|
||||
self.download_infos[signature]['downloaded_size'],
|
||||
self.download_infos[signature]['progress'],
|
||||
self.download_infos[signature]['speed'],
|
||||
client_id,
|
||||
status=True,
|
||||
error="",
|
||||
)
|
||||
del self.download_infos[signature]
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
self.lollmsCom.notify_model_install(
|
||||
installation_path,
|
||||
model_name,
|
||||
binding_folder,
|
||||
model_url,
|
||||
'',
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
client_id,
|
||||
status=False,
|
||||
error=str(ex),
|
||||
)
|
||||
|
||||
def add_default_configurations(self, binding_config:TypedConfig):
|
||||
binding_config.addConfigs([
|
||||
|
@ -36,8 +36,8 @@ class NotificationDisplayType(Enum):
|
||||
|
||||
|
||||
class LoLLMsCom:
|
||||
def __init__(self, socketio:socketio.AsyncServer=None) -> None:
|
||||
self.socketio= socketio
|
||||
def __init__(self, sio:socketio.AsyncServer=None) -> None:
|
||||
self.sio= sio
|
||||
|
||||
|
||||
def InfoMessage(self, content, client_id=None, verbose:bool=True):
|
||||
@ -76,7 +76,7 @@ class LoLLMsCom:
|
||||
"wait":True,
|
||||
"result":False
|
||||
}
|
||||
@self.socketio.on('yesNoRes')
|
||||
@self.sio.on('yesNoRes')
|
||||
def yesnores(result):
|
||||
infos["result"] = result["yesRes"]
|
||||
infos["wait"]=False
|
||||
@ -92,7 +92,7 @@ class LoLLMsCom:
|
||||
# wait
|
||||
ASCIIColors.yellow("Waiting for yes no question to be answered")
|
||||
while infos["wait"]:
|
||||
self.socketio.sleep(1)
|
||||
self.sio.sleep(1)
|
||||
return infos["result"]
|
||||
|
||||
def info(self, content, duration:int=4, client_id=None, verbose:bool=True):
|
||||
@ -154,3 +154,20 @@ class LoLLMsCom:
|
||||
ASCIIColors.warning(content)
|
||||
else:
|
||||
ASCIIColors.red(content)
|
||||
|
||||
|
||||
def notify_model_install(self,
|
||||
installation_path,
|
||||
model_name,
|
||||
binding_folder,
|
||||
model_url,
|
||||
start_time,
|
||||
total_size,
|
||||
downloaded_size,
|
||||
progress,
|
||||
speed,
|
||||
client_id,
|
||||
status=True,
|
||||
error="",
|
||||
):
|
||||
pass
|
@ -152,7 +152,7 @@ class WebcamImageSender:
|
||||
Args:
|
||||
socketio (socketio.Client): The SocketIO client object.
|
||||
"""
|
||||
self.socketio = sio
|
||||
self.sio = sio
|
||||
self.last_image = None
|
||||
self.last_change_time = None
|
||||
self.capture_thread = None
|
||||
@ -191,7 +191,7 @@ class WebcamImageSender:
|
||||
|
||||
_, buffer = cv2.imencode('.jpg', frame)
|
||||
image_base64 = base64.b64encode(buffer)
|
||||
run_async(partial(self.socketio.emit,"video_stream_image", image_base64.decode('utf-8')))
|
||||
run_async(partial(self.sio.emit,"video_stream_image", image_base64.decode('utf-8')))
|
||||
|
||||
cap.release()
|
||||
except Exception as ex:
|
||||
|
@ -12,6 +12,8 @@ from lollms.paths import LollmsPaths
|
||||
from lollms.personality import AIPersonality
|
||||
from pathlib import Path
|
||||
from socketio import AsyncServer
|
||||
from functools import partial
|
||||
from lollms.utilities import trace_exception, run_async
|
||||
|
||||
class LOLLMSElfServer(LollmsApplication):
|
||||
__instance = None
|
||||
@ -27,7 +29,7 @@ class LOLLMSElfServer(LollmsApplication):
|
||||
try_select_binding=False,
|
||||
try_select_model=False,
|
||||
callback=None,
|
||||
socketio:AsyncServer = None
|
||||
sio:AsyncServer = None
|
||||
):
|
||||
if LOLLMSElfServer.__instance is None:
|
||||
LOLLMSElfServer(
|
||||
@ -40,7 +42,7 @@ class LOLLMSElfServer(LollmsApplication):
|
||||
try_select_binding=try_select_binding,
|
||||
try_select_model=try_select_model,
|
||||
callback=callback,
|
||||
socketio=socketio
|
||||
sio=sio
|
||||
)
|
||||
return LOLLMSElfServer.__instance
|
||||
@staticmethod
|
||||
@ -58,7 +60,7 @@ class LOLLMSElfServer(LollmsApplication):
|
||||
try_select_binding=False,
|
||||
try_select_model=False,
|
||||
callback=None,
|
||||
socketio:AsyncServer=None
|
||||
sio:AsyncServer=None
|
||||
) -> None:
|
||||
super().__init__(
|
||||
"LOLLMSElfServer",
|
||||
@ -71,7 +73,7 @@ class LOLLMSElfServer(LollmsApplication):
|
||||
try_select_binding=try_select_binding,
|
||||
try_select_model=try_select_model,
|
||||
callback=callback,
|
||||
socketio=socketio
|
||||
sio=sio
|
||||
)
|
||||
if LOLLMSElfServer.__instance is not None:
|
||||
raise Exception("This class is a singleton!")
|
||||
@ -85,3 +87,33 @@ class LOLLMSElfServer(LollmsApplication):
|
||||
if full_path.exists():
|
||||
return full_path
|
||||
return None
|
||||
|
||||
|
||||
def notify_model_install(self,
|
||||
installation_path,
|
||||
model_name,
|
||||
binding_folder,
|
||||
model_url,
|
||||
start_time,
|
||||
total_size,
|
||||
downloaded_size,
|
||||
progress,
|
||||
speed,
|
||||
client_id,
|
||||
status=True,
|
||||
error="",
|
||||
):
|
||||
run_async( partial(self.sio.emit,'install_progress',{
|
||||
'status': status,
|
||||
'error': error,
|
||||
'model_name' : model_name,
|
||||
'binding_folder' : binding_folder,
|
||||
'model_url' : model_url,
|
||||
'start_time': start_time,
|
||||
'total_size': total_size,
|
||||
'downloaded_size': downloaded_size,
|
||||
'progress': progress,
|
||||
'speed': speed,
|
||||
}, room=client_id
|
||||
)
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ from lollms.utilities import load_config, trace_exception, gc
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
import json
|
||||
|
||||
import os
|
||||
# ----------------------------------- Personal files -----------------------------------------
|
||||
class ReloadBindingParams(BaseModel):
|
||||
binding_name: str
|
||||
|
@ -162,5 +162,5 @@ def save_settings():
|
||||
if lollmsElfServer.config["debug"]:
|
||||
print("Configuration saved")
|
||||
# Tell that the setting was changed
|
||||
lollmsElfServer.socketio.emit('save_settings', {"status":True})
|
||||
lollmsElfServer.sio.emit('save_settings', {"status":True})
|
||||
return {"status":True}
|
@ -17,7 +17,7 @@ from pathlib import Path
|
||||
from typing import List
|
||||
import psutil
|
||||
|
||||
|
||||
from lollms.utilities import trace_exception
|
||||
class ModelReferenceParams(BaseModel):
|
||||
path: str
|
||||
|
||||
@ -60,6 +60,7 @@ async def get_available_models():
|
||||
try:
|
||||
model_list = lollmsElfServer.binding.get_available_models(lollmsElfServer)
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
lollmsElfServer.error("Coudln't list models. Please reinstall the binding or notify ParisNeo on the discord server")
|
||||
return []
|
||||
|
||||
|
@ -44,7 +44,7 @@ def add_events(sio:socketio):
|
||||
client_id = sid
|
||||
lollmsElfServer.connections[client_id]["requested_stop"]=True
|
||||
print(f"Client {client_id} requested canceling generation")
|
||||
run_async(partial(lollmsElfServer.socketio.emit,"generation_canceled", {"message":"Generation is canceled."}, to=client_id))
|
||||
run_async(partial(lollmsElfServer.sio.emit,"generation_canceled", {"message":"Generation is canceled."}, to=client_id))
|
||||
lollmsElfServer.busy = False
|
||||
|
||||
|
||||
@ -55,7 +55,7 @@ def add_events(sio:socketio):
|
||||
lollmsElfServer.cancel_gen = False
|
||||
ASCIIColors.info(f"Text generation requested by client: {client_id}")
|
||||
if lollmsElfServer.busy:
|
||||
run_async(partial(lollmsElfServer.socketio.emit,"busy", {"message":"I am busy. Come back later."}, to=client_id))
|
||||
run_async(partial(lollmsElfServer.sio.emit,"busy", {"message":"I am busy. Come back later."}, to=client_id))
|
||||
ASCIIColors.warning(f"OOps request {client_id} refused!! Server busy")
|
||||
return
|
||||
lollmsElfServer.busy = True
|
||||
@ -89,7 +89,7 @@ def add_events(sio:socketio):
|
||||
ASCIIColors.success(f"generated:{len(lollmsElfServer.answer['full_text'].split())} words", end='\r')
|
||||
if text is not None:
|
||||
lollmsElfServer.answer["full_text"] = lollmsElfServer.answer["full_text"] + text
|
||||
run_async(partial(lollmsElfServer.socketio.emit,'text_chunk', {'chunk': text, 'type':MSG_TYPE.MSG_TYPE_CHUNK.value}, to=client_id))
|
||||
run_async(partial(lollmsElfServer.sio.emit,'text_chunk', {'chunk': text, 'type':MSG_TYPE.MSG_TYPE_CHUNK.value}, to=client_id))
|
||||
if client_id in lollmsElfServer.connections:# Client disconnected
|
||||
if lollmsElfServer.connections[client_id]["requested_stop"]:
|
||||
return False
|
||||
@ -120,9 +120,9 @@ def add_events(sio:socketio):
|
||||
if client_id in lollmsElfServer.connections:
|
||||
if not lollmsElfServer.connections[client_id]["requested_stop"]:
|
||||
# Emit the generated text to the client
|
||||
run_async(partial(lollmsElfServer.socketio.emit,'text_generated', {'text': generated_text}, to=client_id))
|
||||
run_async(partial(lollmsElfServer.sio.emit,'text_generated', {'text': generated_text}, to=client_id))
|
||||
except Exception as ex:
|
||||
run_async(partial(lollmsElfServer.socketio.emit,'generation_error', {'error': str(ex)}, to=client_id))
|
||||
run_async(partial(lollmsElfServer.sio.emit,'generation_error', {'error': str(ex)}, to=client_id))
|
||||
ASCIIColors.error(f"\ndone")
|
||||
lollmsElfServer.busy = False
|
||||
else:
|
||||
@ -161,7 +161,7 @@ def add_events(sio:socketio):
|
||||
def callback(text, message_type: MSG_TYPE, metadata:dict={}):
|
||||
if message_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
||||
lollmsElfServer.answer["full_text"] = lollmsElfServer.answer["full_text"] + text
|
||||
run_async(partial(lollmsElfServer.socketio.emit,'text_chunk', {'chunk': text}, to=client_id))
|
||||
run_async(partial(lollmsElfServer.sio.emit,'text_chunk', {'chunk': text}, to=client_id))
|
||||
try:
|
||||
if lollmsElfServer.connections[client_id]["requested_stop"]:
|
||||
return False
|
||||
@ -191,14 +191,14 @@ def add_events(sio:socketio):
|
||||
ASCIIColors.success("\ndone")
|
||||
|
||||
# Emit the generated text to the client
|
||||
run_async(partial(lollmsElfServer.socketio.emit,'text_generated', {'text': generated_text}, to=client_id))
|
||||
run_async(partial(lollmsElfServer.sio.emit,'text_generated', {'text': generated_text}, to=client_id))
|
||||
except Exception as ex:
|
||||
run_async(partial(lollmsElfServer.socketio.emit,'generation_error', {'error': str(ex)}, to=client_id))
|
||||
run_async(partial(lollmsElfServer.sio.emit,'generation_error', {'error': str(ex)}, to=client_id))
|
||||
ASCIIColors.error(f"\ndone")
|
||||
lollmsElfServer.busy = False
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
run_async(partial(lollmsElfServer.socketio.emit,'generation_error', {'error': str(ex)}, to=client_id))
|
||||
run_async(partial(lollmsElfServer.sio.emit,'generation_error', {'error': str(ex)}, to=client_id))
|
||||
lollmsElfServer.busy = False
|
||||
|
||||
|
||||
@ -242,7 +242,7 @@ def add_events(sio:socketio):
|
||||
lollmsElfServer.connections[client_id]['generation_thread'] = threading.Thread(target=lollmsElfServer.start_message_generation, args=(message, message.id, client_id))
|
||||
lollmsElfServer.connections[client_id]['generation_thread'].start()
|
||||
|
||||
lollmsElfServer.socketio.sleep(0.01)
|
||||
lollmsElfServer.sio.sleep(0.01)
|
||||
ASCIIColors.info("Started generation task")
|
||||
lollmsElfServer.busy=True
|
||||
#tpe = threading.Thread(target=lollmsElfServer.start_message_generation, args=(message, message_id, client_id))
|
||||
|
@ -32,202 +32,8 @@ lollmsElfServer = LOLLMSElfServer.get_instance()
|
||||
def add_events(sio:socketio):
|
||||
@sio.on('install_model')
|
||||
def install_model(sid, data):
|
||||
room_id = sid
|
||||
def install_model_():
|
||||
print("Install model triggered")
|
||||
model_path = data["path"].replace("\\","/")
|
||||
|
||||
if data["type"].lower() in model_path.lower():
|
||||
model_type:str=data["type"]
|
||||
else:
|
||||
mtt = None
|
||||
for mt in lollmsElfServer.binding.models_dir_names:
|
||||
if mt.lower() in model_path.lower():
|
||||
mtt = mt
|
||||
break
|
||||
if mtt:
|
||||
model_type = mtt
|
||||
else:
|
||||
model_type:str=lollmsElfServer.binding.models_dir_names[0]
|
||||
|
||||
progress = 0
|
||||
installation_dir = lollmsElfServer.binding.searchModelParentFolder(model_path.split('/')[-1], model_type)
|
||||
if model_type=="gptq" or model_type=="awq" or model_type=="transformers":
|
||||
parts = model_path.split("/")
|
||||
if len(parts)==2:
|
||||
filename = parts[1]
|
||||
else:
|
||||
filename = parts[4]
|
||||
installation_path = installation_dir / filename
|
||||
elif model_type=="gpt4all":
|
||||
filename = data["variant_name"]
|
||||
model_path = "http://gpt4all.io/models/gguf/"+filename
|
||||
installation_path = installation_dir / filename
|
||||
else:
|
||||
filename = Path(model_path).name
|
||||
installation_path = installation_dir / filename
|
||||
print("Model install requested")
|
||||
print(f"Model path : {model_path}")
|
||||
|
||||
model_name = filename
|
||||
binding_folder = lollmsElfServer.config["binding_name"]
|
||||
model_url = model_path
|
||||
signature = f"{model_name}_{binding_folder}_{model_url}"
|
||||
try:
|
||||
lollmsElfServer.download_infos[signature]={
|
||||
"start_time":datetime.now(),
|
||||
"total_size":lollmsElfServer.binding.get_file_size(model_path),
|
||||
"downloaded_size":0,
|
||||
"progress":0,
|
||||
"speed":0,
|
||||
"cancel":False
|
||||
}
|
||||
|
||||
if installation_path.exists():
|
||||
print("Error: Model already exists. please remove it first")
|
||||
run_async( partial(sio.emit,'install_progress',{
|
||||
'status': False,
|
||||
'error': f'model already exists. Please remove it first.\nThe model can be found here:{installation_path}',
|
||||
'model_name' : model_name,
|
||||
'binding_folder' : binding_folder,
|
||||
'model_url' : model_url,
|
||||
'start_time': lollmsElfServer.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
'total_size': lollmsElfServer.download_infos[signature]['total_size'],
|
||||
'downloaded_size': lollmsElfServer.download_infos[signature]['downloaded_size'],
|
||||
'progress': lollmsElfServer.download_infos[signature]['progress'],
|
||||
'speed': lollmsElfServer.download_infos[signature]['speed'],
|
||||
}, room=room_id
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
run_async( partial(sio.emit,'install_progress',{
|
||||
'status': True,
|
||||
'progress': progress,
|
||||
'model_name' : model_name,
|
||||
'binding_folder' : binding_folder,
|
||||
'model_url' : model_url,
|
||||
'start_time': lollmsElfServer.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
'total_size': lollmsElfServer.download_infos[signature]['total_size'],
|
||||
'downloaded_size': lollmsElfServer.download_infos[signature]['downloaded_size'],
|
||||
'progress': lollmsElfServer.download_infos[signature]['progress'],
|
||||
'speed': lollmsElfServer.download_infos[signature]['speed'],
|
||||
|
||||
}, room=room_id)
|
||||
)
|
||||
|
||||
def callback(downloaded_size, total_size):
|
||||
progress = (downloaded_size / total_size) * 100
|
||||
now = datetime.now()
|
||||
dt = (now - lollmsElfServer.download_infos[signature]['start_time']).total_seconds()
|
||||
speed = downloaded_size/dt
|
||||
lollmsElfServer.download_infos[signature]['downloaded_size'] = downloaded_size
|
||||
lollmsElfServer.download_infos[signature]['speed'] = speed
|
||||
|
||||
if progress - lollmsElfServer.download_infos[signature]['progress']>2:
|
||||
lollmsElfServer.download_infos[signature]['progress'] = progress
|
||||
run_async( partial(sio.emit,'install_progress',{
|
||||
'status': True,
|
||||
'model_name' : model_name,
|
||||
'binding_folder' : binding_folder,
|
||||
'model_url' : model_url,
|
||||
'start_time': lollmsElfServer.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
'total_size': lollmsElfServer.download_infos[signature]['total_size'],
|
||||
'downloaded_size': lollmsElfServer.download_infos[signature]['downloaded_size'],
|
||||
'progress': lollmsElfServer.download_infos[signature]['progress'],
|
||||
'speed': lollmsElfServer.download_infos[signature]['speed'],
|
||||
}, room=room_id)
|
||||
)
|
||||
|
||||
if lollmsElfServer.download_infos[signature]["cancel"]:
|
||||
raise Exception("canceled")
|
||||
|
||||
|
||||
if hasattr(lollmsElfServer.binding, "download_model"):
|
||||
try:
|
||||
lollmsElfServer.binding.download_model(model_path, installation_path, callback)
|
||||
except Exception as ex:
|
||||
ASCIIColors.warning(str(ex))
|
||||
trace_exception(ex)
|
||||
run_async( partial(sio.emit,'install_progress',{
|
||||
'status': False,
|
||||
'error': 'canceled',
|
||||
'model_name' : model_name,
|
||||
'binding_folder' : binding_folder,
|
||||
'model_url' : model_url,
|
||||
'start_time': lollmsElfServer.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
'total_size': lollmsElfServer.download_infos[signature]['total_size'],
|
||||
'downloaded_size': lollmsElfServer.download_infos[signature]['downloaded_size'],
|
||||
'progress': lollmsElfServer.download_infos[signature]['progress'],
|
||||
'speed': lollmsElfServer.download_infos[signature]['speed'],
|
||||
}, room=room_id
|
||||
)
|
||||
)
|
||||
del lollmsElfServer.download_infos[signature]
|
||||
try:
|
||||
if installation_path.is_dir():
|
||||
shutil.rmtree(installation_path)
|
||||
else:
|
||||
installation_path.unlink()
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
ASCIIColors.error(f"Couldn't delete file. Please try to remove it manually.\n{installation_path}")
|
||||
return
|
||||
|
||||
else:
|
||||
try:
|
||||
lollmsElfServer.download_file(model_path, installation_path, callback)
|
||||
except Exception as ex:
|
||||
ASCIIColors.warning(str(ex))
|
||||
trace_exception(ex)
|
||||
run_async( partial(sio.emit,'install_progress',{
|
||||
'status': False,
|
||||
'error': 'canceled',
|
||||
'model_name' : model_name,
|
||||
'binding_folder' : binding_folder,
|
||||
'model_url' : model_url,
|
||||
'start_time': lollmsElfServer.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
'total_size': lollmsElfServer.download_infos[signature]['total_size'],
|
||||
'downloaded_size': lollmsElfServer.download_infos[signature]['downloaded_size'],
|
||||
'progress': lollmsElfServer.download_infos[signature]['progress'],
|
||||
'speed': lollmsElfServer.download_infos[signature]['speed'],
|
||||
}, room=room_id
|
||||
)
|
||||
)
|
||||
del lollmsElfServer.download_infos[signature]
|
||||
installation_path.unlink()
|
||||
return
|
||||
run_async( partial(sio.emit,'install_progress',{
|
||||
'status': True,
|
||||
'error': '',
|
||||
'model_name' : model_name,
|
||||
'binding_folder' : binding_folder,
|
||||
'model_url' : model_url,
|
||||
'start_time': lollmsElfServer.download_infos[signature]['start_time'].strftime("%Y-%m-%d %H:%M:%S"),
|
||||
'total_size': lollmsElfServer.download_infos[signature]['total_size'],
|
||||
'downloaded_size': lollmsElfServer.download_infos[signature]['downloaded_size'],
|
||||
'progress': 100,
|
||||
'speed': lollmsElfServer.download_infos[signature]['speed'],
|
||||
}, room=room_id)
|
||||
)
|
||||
del lollmsElfServer.download_infos[signature]
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
run_async( partial(sio.emit,'install_progress',{
|
||||
'status': False,
|
||||
'error': str(ex),
|
||||
'model_name' : model_name,
|
||||
'binding_folder' : binding_folder,
|
||||
'model_url' : model_url,
|
||||
'start_time': '',
|
||||
'total_size': 0,
|
||||
'downloaded_size': 0,
|
||||
'progress': 0,
|
||||
'speed': 0,
|
||||
}, room=room_id
|
||||
)
|
||||
)
|
||||
tpe = threading.Thread(target=install_model_, args=())
|
||||
client_id = sid
|
||||
tpe = threading.Thread(target=lollmsElfServer.binding.install_model, args=(data["type"], data["path"], data["variant_name"], client_id))
|
||||
tpe.start()
|
||||
|
||||
@sio.on('uninstall_model')
|
||||
@ -302,9 +108,10 @@ def add_events(sio:socketio):
|
||||
)
|
||||
except Exception as ex:
|
||||
trace_exception(ex)
|
||||
sio.emit('canceled', {
|
||||
run_async( partial(sio.emit,'canceled', {
|
||||
'status': False,
|
||||
'error':str(ex)
|
||||
},
|
||||
room=sid
|
||||
)
|
||||
)
|
||||
|
@ -36,7 +36,7 @@ def add_events(sio:socketio):
|
||||
if lollmsElfServer.config.data_vectorization_activate and lollmsElfServer.config.use_discussions_history:
|
||||
try:
|
||||
run_async(partial(sio.emit,'show_progress'))
|
||||
lollmsElfServer.socketio.sleep(0)
|
||||
lollmsElfServer.sio.sleep(0)
|
||||
ASCIIColors.yellow("0- Detected discussion vectorization request")
|
||||
folder = lollmsElfServer.lollms_paths.personal_databases_path/"vectorized_dbs"
|
||||
folder.mkdir(parents=True, exist_ok=True)
|
||||
@ -49,7 +49,7 @@ def add_events(sio:socketio):
|
||||
nb_discussions = len(discussions)
|
||||
for (title,discussion) in tqdm(discussions):
|
||||
run_async(partial(sio.emit,'update_progress',{'value':int(100*(index/nb_discussions))}))
|
||||
lollmsElfServer.socketio.sleep(0)
|
||||
lollmsElfServer.sio.sleep(0)
|
||||
index += 1
|
||||
if discussion!='':
|
||||
skill = lollmsElfServer.learn_from_discussion(title, discussion)
|
||||
@ -65,4 +65,4 @@ def add_events(sio:socketio):
|
||||
except Exception as ex:
|
||||
ASCIIColors.error(f"Couldn't vectorize database:{ex}")
|
||||
run_async(partial(sio.emit,'hide_progress'))
|
||||
lollmsElfServer.socketio.sleep(0)
|
||||
lollmsElfServer.sio.sleep(0)
|
@ -7,7 +7,6 @@
|
||||
# All rights are reserved
|
||||
|
||||
from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
from lollms.app import LollmsApplication
|
||||
from lollms.paths import LollmsPaths
|
||||
|
@ -8,18 +8,5 @@ OLLAMA_HOST="0.0.0.0:11434"
|
||||
# Start the OLLAMA server
|
||||
OLLAMA_MODELS=~/ollama/models ollama serve &
|
||||
|
||||
# Check if models.txt exists
|
||||
if [ ! -f models.txt ]; then
|
||||
# Create models.txt and add "mixtral" to it
|
||||
echo "mistral" > ~/models.txt
|
||||
fi
|
||||
|
||||
# Read the models from the file
|
||||
while IFS= read -r model
|
||||
do
|
||||
# Run each model in the background
|
||||
ollama run "$model" &
|
||||
done < ~/models.txt
|
||||
|
||||
# Wait for all background processes to finish
|
||||
wait
|
||||
|
@ -7,7 +7,6 @@
|
||||
# All rights are reserved
|
||||
|
||||
from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
from lollms.app import LollmsApplication
|
||||
from lollms.paths import LollmsPaths
|
||||
|
@ -7,7 +7,6 @@
|
||||
# All rights are reserved
|
||||
|
||||
from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
from lollms.app import LollmsApplication
|
||||
from lollms.paths import LollmsPaths
|
||||
|
Loading…
Reference in New Issue
Block a user