mirror of
https://github.com/ParisNeo/lollms.git
synced 2024-12-26 07:41:06 +00:00
upgraded code
This commit is contained in:
parent
5d4b5faa0f
commit
ddba6ed364
@ -1026,13 +1026,13 @@ class AIPersonality:
|
||||
save_db=self.config.data_vectorization_save_db,
|
||||
data_visualization_method=VisualizationMethod.PCA,
|
||||
database_dict=None)
|
||||
data = GenericDataLoader.read_file(path)
|
||||
self.vectorizer.add_document(path, data, self.config.data_vectorization_chunk_size, self.config.data_vectorization_overlap_size, add_first_line_to_all_chunks=True if path.suffix==".csv" else False)
|
||||
self.vectorizer.index()
|
||||
if callback is not None:
|
||||
callback("File added successfully",MSG_TYPE.MSG_TYPE_INFO)
|
||||
self.HideBlockingMessage("Adding file to vector store.\nPlease stand by")
|
||||
return True
|
||||
data = GenericDataLoader.read_file(path)
|
||||
self.vectorizer.add_document(path, data, self.config.data_vectorization_chunk_size, self.config.data_vectorization_overlap_size, add_first_line_to_all_chunks=True if path.suffix==".csv" else False)
|
||||
self.vectorizer.index()
|
||||
if callback is not None:
|
||||
callback("File added successfully",MSG_TYPE.MSG_TYPE_INFO)
|
||||
self.HideBlockingMessage("Adding file to vector store.\nPlease stand by")
|
||||
return True
|
||||
except Exception as e:
|
||||
trace_exception(e)
|
||||
self.HideBlockingMessage("Adding file to vector store.\nPlease stand by")
|
||||
|
@ -22,6 +22,7 @@ from typing import List
|
||||
import socketio
|
||||
from functools import partial
|
||||
from datetime import datetime
|
||||
import threading
|
||||
import os
|
||||
|
||||
router = APIRouter()
|
||||
@ -123,50 +124,48 @@ def add_events(sio:socketio):
|
||||
client.cancel_generation=False
|
||||
client.continuing=False
|
||||
client.first_chunk=True
|
||||
def do_generation():
|
||||
if not lollmsElfServer.model:
|
||||
ASCIIColors.error("Model not selected. Please select a model")
|
||||
lollmsElfServer.error("Model not selected. Please select a model", client_id=client_id)
|
||||
return
|
||||
|
||||
if not lollmsElfServer.model:
|
||||
ASCIIColors.error("Model not selected. Please select a model")
|
||||
lollmsElfServer.error("Model not selected. Please select a model", client_id=client_id)
|
||||
return
|
||||
if not lollmsElfServer.busy:
|
||||
if client.discussion is None:
|
||||
if lollmsElfServer.db.does_last_discussion_have_messages():
|
||||
client.discussion = lollmsElfServer.db.create_discussion()
|
||||
else:
|
||||
client.discussion = lollmsElfServer.db.load_last_discussion()
|
||||
|
||||
if not lollmsElfServer.busy:
|
||||
if lollmsElfServer.session.get_client(client_id).discussion is None:
|
||||
if lollmsElfServer.db.does_last_discussion_have_messages():
|
||||
lollmsElfServer.session.get_client(client_id).discussion = lollmsElfServer.db.create_discussion()
|
||||
ump = lollmsElfServer.config.discussion_prompt_separator + lollmsElfServer.config.user_name.strip() if lollmsElfServer.config.use_user_name_in_discussions else lollmsElfServer.personality.user_message_prefix
|
||||
message = client.discussion.add_message(
|
||||
message_type = MSG_TYPE.MSG_TYPE_FULL.value,
|
||||
sender_type = SENDER_TYPES.SENDER_TYPES_USER.value,
|
||||
sender = ump.replace(lollmsElfServer.config.discussion_prompt_separator,"").replace(":",""),
|
||||
content="",
|
||||
metadata=None,
|
||||
parent_message_id=lollmsElfServer.message_id
|
||||
)
|
||||
lollmsElfServer.busy=True
|
||||
command = data["command"]
|
||||
parameters = data["parameters"]
|
||||
lollmsElfServer.prepare_reception(client_id)
|
||||
if lollmsElfServer.personality.processor is not None:
|
||||
lollmsElfServer.start_time = datetime.now()
|
||||
lollmsElfServer.personality.processor.callback = partial(lollmsElfServer.process_chunk, client_id=client_id)
|
||||
lollmsElfServer.personality.processor.execute_command(command, parameters)
|
||||
else:
|
||||
lollmsElfServer.session.get_client(client_id).discussion = lollmsElfServer.db.load_last_discussion()
|
||||
lollmsElfServer.warning("Non scripted personalities do not support commands",client_id=client_id)
|
||||
lollmsElfServer.close_message(client_id)
|
||||
lollmsElfServer.busy=False
|
||||
|
||||
ump = lollmsElfServer.config.discussion_prompt_separator +lollmsElfServer.config.user_name.strip() if lollmsElfServer.config.use_user_name_in_discussions else lollmsElfServer.personality.user_message_prefix
|
||||
message = lollmsElfServer.session.get_client(client_id).discussion.add_message(
|
||||
message_type = MSG_TYPE.MSG_TYPE_FULL.value,
|
||||
sender_type = SENDER_TYPES.SENDER_TYPES_USER.value,
|
||||
sender = ump.replace(lollmsElfServer.config.discussion_prompt_separator,"").replace(":",""),
|
||||
content="",
|
||||
metadata=None,
|
||||
parent_message_id=lollmsElfServer.message_id
|
||||
)
|
||||
lollmsElfServer.busy=True
|
||||
|
||||
client_id = sid
|
||||
client = lollmsElfServer.session.get_client(client_id)
|
||||
|
||||
command = data["command"]
|
||||
parameters = data["parameters"]
|
||||
lollmsElfServer.prepare_reception(client_id)
|
||||
if lollmsElfServer.personality.processor is not None:
|
||||
lollmsElfServer.start_time = datetime.now()
|
||||
lollmsElfServer.personality.processor.callback = partial(lollmsElfServer.process_chunk, client_id=client_id)
|
||||
lollmsElfServer.personality.processor.execute_command(command, parameters)
|
||||
#tpe = threading.Thread(target=lollmsElfServer.start_message_generation, args=(message, message_id, client_id))
|
||||
#tpe.start()
|
||||
else:
|
||||
lollmsElfServer.warning("Non scripted personalities do not support commands",client_id=client_id)
|
||||
lollmsElfServer.close_message(client_id)
|
||||
lollmsElfServer.error("I am busy. Come back later.", client_id=client_id)
|
||||
|
||||
lollmsElfServer.busy=False
|
||||
|
||||
#tpe = threading.Thread(target=lollmsElfServer.start_message_generation, args=(message, message_id, client_id))
|
||||
#tpe.start()
|
||||
else:
|
||||
lollmsElfServer.error("I am busy. Come back later.", client_id=client_id)
|
||||
|
||||
lollmsElfServer.busy=False
|
||||
|
||||
|
||||
client.generation_thread = threading.Thread(target=do_generation)
|
||||
client.generation_thread.start()
|
||||
ASCIIColors.info("Started generation task")
|
||||
|
Loading…
Reference in New Issue
Block a user