mirror of
https://github.com/ParisNeo/lollms.git
synced 2025-01-18 02:39:46 +00:00
enhanced commands handling
This commit is contained in:
parent
62677f4ee5
commit
2f13ea8fa9
@ -85,7 +85,7 @@ lollms_base_url: http://0.0.0.0:1234
|
||||
|
||||
# elastic search service
|
||||
elastic_search_service: false
|
||||
elastic_search_url: http://0.0.0.0:9606
|
||||
elastic_search_url: http://0.0.0.0:9200
|
||||
|
||||
# vll service
|
||||
vllm_service: false
|
||||
|
@ -85,7 +85,7 @@ lollms_base_url: http://0.0.0.0:1234
|
||||
|
||||
# elastic search service
|
||||
elastic_search_service: false
|
||||
elastic_search_url: http://0.0.0.0:9606
|
||||
elastic_search_url: http://0.0.0.0:9200
|
||||
|
||||
# vll service
|
||||
vllm_service: false
|
||||
|
@ -1839,9 +1839,6 @@ class APScript(StateMachine):
|
||||
) -> None:
|
||||
super().__init__(states_dict)
|
||||
self.notify = personality.app.notify
|
||||
self.text_files = []
|
||||
self.image_files = []
|
||||
self.images_descriptions = []
|
||||
|
||||
self.personality = personality
|
||||
self.personality_config = personality_config
|
||||
|
@ -15,6 +15,7 @@ from socketio import AsyncServer
|
||||
from functools import partial
|
||||
from lollms.utilities import trace_exception, run_async
|
||||
|
||||
from datetime import datetime
|
||||
class LOLLMSElfServer(LollmsApplication):
|
||||
__instance = None
|
||||
|
||||
@ -88,6 +89,14 @@ class LOLLMSElfServer(LollmsApplication):
|
||||
return full_path
|
||||
return None
|
||||
|
||||
def prepare_reception(self, client_id):
|
||||
if not self.connections[client_id]["continuing"]:
|
||||
self.connections[client_id]["generated_text"] = ""
|
||||
|
||||
self.connections[client_id]["first_chunk"]=True
|
||||
|
||||
self.nb_received_tokens = 0
|
||||
self.start_time = datetime.now()
|
||||
|
||||
def notify_model_install(self,
|
||||
installation_path,
|
||||
|
@ -11,6 +11,7 @@ from fastapi import HTTPException
|
||||
from pydantic import BaseModel
|
||||
import pkg_resources
|
||||
from lollms.server.elf_server import LOLLMSElfServer
|
||||
from lollms.types import SENDER_TYPES
|
||||
from fastapi.responses import FileResponse
|
||||
from lollms.binding import BindingBuilder, InstallOption
|
||||
from ascii_colors import ASCIIColors
|
||||
@ -77,13 +78,53 @@ def add_events(sio:socketio):
|
||||
@sio.on('execute_command')
|
||||
def execute_command(sid, data):
|
||||
client_id = sid
|
||||
command = data["command"]
|
||||
parameters = data["parameters"]
|
||||
if lollmsElfServer.personality.processor is not None:
|
||||
lollmsElfServer.start_time = datetime.now()
|
||||
lollmsElfServer.personality.processor.callback = partial(lollmsElfServer.process_chunk, client_id=client_id)
|
||||
lollmsElfServer.personality.processor.execute_command(command, parameters)
|
||||
lollmsElfServer.cancel_gen = False
|
||||
lollmsElfServer.connections[client_id]["generated_text"]=""
|
||||
lollmsElfServer.connections[client_id]["cancel_generation"]=False
|
||||
lollmsElfServer.connections[client_id]["continuing"]=False
|
||||
lollmsElfServer.connections[client_id]["first_chunk"]=True
|
||||
|
||||
if not lollmsElfServer.model:
|
||||
ASCIIColors.error("Model not selected. Please select a model")
|
||||
lollmsElfServer.error("Model not selected. Please select a model", client_id=client_id)
|
||||
return
|
||||
|
||||
if not lollmsElfServer.busy:
|
||||
if lollmsElfServer.connections[client_id]["current_discussion"] is None:
|
||||
if lollmsElfServer.db.does_last_discussion_have_messages():
|
||||
lollmsElfServer.connections[client_id]["current_discussion"] = lollmsElfServer.db.create_discussion()
|
||||
else:
|
||||
lollmsElfServer.connections[client_id]["current_discussion"] = lollmsElfServer.db.load_last_discussion()
|
||||
|
||||
ump = lollmsElfServer.config.discussion_prompt_separator +lollmsElfServer.config.user_name.strip() if lollmsElfServer.config.use_user_name_in_discussions else lollmsElfServer.personality.user_message_prefix
|
||||
message = lollmsElfServer.connections[client_id]["current_discussion"].add_message(
|
||||
message_type = MSG_TYPE.MSG_TYPE_FULL.value,
|
||||
sender_type = SENDER_TYPES.SENDER_TYPES_USER.value,
|
||||
sender = ump.replace(lollmsElfServer.config.discussion_prompt_separator,"").replace(":",""),
|
||||
content="",
|
||||
metadata=None,
|
||||
parent_message_id=lollmsElfServer.message_id
|
||||
)
|
||||
lollmsElfServer.busy=True
|
||||
|
||||
client_id = sid
|
||||
command = data["command"]
|
||||
parameters = data["parameters"]
|
||||
lollmsElfServer.prepare_reception(client_id)
|
||||
if lollmsElfServer.personality.processor is not None:
|
||||
lollmsElfServer.start_time = datetime.now()
|
||||
lollmsElfServer.personality.processor.callback = partial(lollmsElfServer.process_chunk, client_id=client_id)
|
||||
lollmsElfServer.personality.processor.execute_command(command, parameters)
|
||||
else:
|
||||
lollmsElfServer.warning("Non scripted personalities do not support commands",client_id=client_id)
|
||||
lollmsElfServer.close_message(client_id)
|
||||
lollmsElfServer.busy=False
|
||||
|
||||
#tpe = threading.Thread(target=lollmsElfServer.start_message_generation, args=(message, message_id, client_id))
|
||||
#tpe.start()
|
||||
else:
|
||||
lollmsElfServer.warning("Non scripted personalities do not support commands",client_id=client_id)
|
||||
lollmsElfServer.close_message(client_id)
|
||||
lollmsElfServer.error("I am busy. Come back later.", client_id=client_id)
|
||||
|
||||
lollmsElfServer.busy=False
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user