Upgraded version

This commit is contained in:
Saifeddine ALOUI 2024-01-09 23:26:41 +01:00
parent a0c04e1cc3
commit 10b65fe88b
15 changed files with 290 additions and 235 deletions

View File

@ -232,6 +232,9 @@ class LoLLMsAPI(LollmsApplication):
ASCIIColors.error(f'Client {request.sid} disconnected')
# ---- chatbox -----
@socketio.on('add_webpage')
def add_webpage(data):
ASCIIColors.yellow("Scaping web page")
@ -295,8 +298,38 @@ class LoLLMsAPI(LollmsApplication):
except Exception as ex:
trace_exception(ex)
self.error("Couldn't use the webcam")
@self.socketio.on('create_empty_message')
def create_empty_message(data):
client_id = request.sid
type = data.get("type",0)
message = data.get("message","")
if type==0:
ASCIIColors.info(f"Building empty User message requested by : {client_id}")
# send the message to the bot
print(f"Creating an empty message for AI answer orientation")
if self.connections[client_id]["current_discussion"]:
if not self.model:
self.error("No model selected. Please make sure you select a model before starting generation", client_id = client_id)
return
self.new_message(client_id, self.config.user_name, message, sender_type=SENDER_TYPES.SENDER_TYPES_USER, open=True)
self.socketio.sleep(0.01)
else:
if self.personality is None:
self.warning("Select a personality")
return
ASCIIColors.info(f"Building empty AI message requested by : {client_id}")
# send the message to the bot
print(f"Creating an empty message for AI answer orientation")
if self.connections[client_id]["current_discussion"]:
if not self.model:
self.error("No model selected. Please make sure you select a model before starting generation", client_id=client_id)
return
self.new_message(client_id, self.personality.name, "[edit this to put your ai answer start]", open=True)
self.socketio.sleep(0.01)
# -- interactive view --
@socketio.on('start_webcam_video_stream')
def start_webcam_video_stream():
self.info("Starting video capture")
@ -318,6 +351,9 @@ class LoLLMsAPI(LollmsApplication):
self.audio_cap.stop_recording()
# -- vectorization --
@socketio.on('upgrade_vectorization')
def upgrade_vectorization():
if self.config.data_vectorization_activate and self.config.use_discussions_history:
@ -354,6 +390,10 @@ class LoLLMsAPI(LollmsApplication):
self.socketio.emit('hide_progress')
self.socketio.sleep(0)
# -- model --
@socketio.on('cancel_install')
def cancel_install(data):
try:
@ -621,6 +661,9 @@ class LoLLMsAPI(LollmsApplication):
'binding_folder' : binding_folder
}, room=request.sid)
# -- discussion --
@socketio.on('new_discussion')
def new_discussion(data):
ASCIIColors.yellow("New descussion requested")
@ -692,7 +735,9 @@ class LoLLMsAPI(LollmsApplication):
room=client_id
)
ASCIIColors.green(f"ok")
# -- upload file --
@socketio.on('upload_file')
def upload_file(data):
ASCIIColors.yellow("Uploading file")
@ -715,16 +760,8 @@ class LoLLMsAPI(LollmsApplication):
except Exception as e:
# Error occurred while saving the file
socketio.emit('progress', {'status':False, 'error': str(e)})
@socketio.on('cancel_generation')
def cancel_generation():
client_id = request.sid
self.cancel_gen = True
#kill thread
ASCIIColors.error(f'Client {request.sid} requested cancelling generation')
terminate_thread(self.connections[client_id]['generation_thread'])
ASCIIColors.error(f'Client {request.sid} canceled generation')
self.busy=False
# -- personality --
@socketio.on('get_personality_files')
def get_personality_files(data):
@ -769,16 +806,20 @@ class LoLLMsAPI(LollmsApplication):
# Request the next chunk from the client
self.socketio.emit('request_next_chunk', {'offset': offset + len(chunk)})
@self.socketio.on('cancel_text_generation')
def cancel_text_generation(data):
@socketio.on('execute_command')
def execute_command(data):
client_id = request.sid
self.connections[client_id]["requested_stop"]=True
print(f"Client {client_id} requested canceling generation")
self.socketio.emit("generation_canceled", {"message":"Generation is canceled."}, room=client_id)
self.socketio.sleep(0)
self.busy = False
command = data["command"]
parameters = data["parameters"]
if self.personality.processor is not None:
self.start_time = datetime.now()
self.personality.processor.callback = partial(self.process_chunk, client_id=client_id)
self.personality.processor.execute_command(command, parameters)
else:
self.warning("Non scripted personalities do not support commands",client_id=client_id)
self.close_message(client_id)
# -- misc --
@self.socketio.on('execute_python_code')
def execute_python_code(data):
"""Executes Python code and returns the output."""
@ -802,34 +843,27 @@ class LoLLMsAPI(LollmsApplication):
output = interpreter.getvalue()
self.socketio.emit("execution_output", {"output":output,"execution_time":end_time - start_time}, room=client_id)
@self.socketio.on('create_empty_message')
def create_empty_message(data):
# -- generation --
@socketio.on('cancel_generation')
def cancel_generation():
client_id = request.sid
type = data.get("type",0)
message = data.get("message","")
if type==0:
ASCIIColors.info(f"Building empty User message requested by : {client_id}")
# send the message to the bot
print(f"Creating an empty message for AI answer orientation")
if self.connections[client_id]["current_discussion"]:
if not self.model:
self.error("No model selected. Please make sure you select a model before starting generation", client_id = client_id)
return
self.new_message(client_id, self.config.user_name, message, sender_type=SENDER_TYPES.SENDER_TYPES_USER, open=True)
self.socketio.sleep(0.01)
else:
if self.personality is None:
self.warning("Select a personality")
return
ASCIIColors.info(f"Building empty AI message requested by : {client_id}")
# send the message to the bot
print(f"Creating an empty message for AI answer orientation")
if self.connections[client_id]["current_discussion"]:
if not self.model:
self.error("No model selected. Please make sure you select a model before starting generation", client_id=client_id)
return
self.new_message(client_id, self.personality.name, "[edit this to put your ai answer start]", open=True)
self.socketio.sleep(0.01)
self.cancel_gen = True
#kill thread
ASCIIColors.error(f'Client {request.sid} requested cancelling generation')
terminate_thread(self.connections[client_id]['generation_thread'])
ASCIIColors.error(f'Client {request.sid} canceled generation')
self.busy=False
@self.socketio.on('cancel_text_generation')
def cancel_text_generation(data):
client_id = request.sid
self.connections[client_id]["requested_stop"]=True
print(f"Client {client_id} requested canceling generation")
self.socketio.emit("generation_canceled", {"message":"Generation is canceled."}, room=client_id)
self.socketio.sleep(0)
self.busy = False
# A copy of the original lollms-server generation code needed for playground
@self.socketio.on('generate_text')
@ -993,18 +1027,6 @@ class LoLLMsAPI(LollmsApplication):
# Start the text generation task in a separate thread
task = self.socketio.start_background_task(target=generate_text)
@socketio.on('execute_command')
def execute_command(data):
client_id = request.sid
command = data["command"]
parameters = data["parameters"]
if self.personality.processor is not None:
self.start_time = datetime.now()
self.personality.processor.callback = partial(self.process_chunk, client_id=client_id)
self.personality.processor.execute_command(command, parameters)
else:
self.warning("Non scripted personalities do not support commands",client_id=client_id)
self.close_message(client_id)
@socketio.on('generate_msg')
def generate_msg(data):
client_id = request.sid

22
app.py
View File

@ -386,6 +386,13 @@ try:
self.add_endpoint("/reset", "reset", self.reset, methods=["GET"])
self.add_endpoint("/get_server_address", "get_server_address", self.get_server_address, methods=["GET"])
self.add_endpoint("/list_voices", "list_voices", self.list_voices, methods=["GET"])
self.add_endpoint("/set_voice", "set_voice", self.set_voice, methods=["POST"])
self.add_endpoint("/text2Audio", "text2Audio", self.text2Audio, methods=["POST"])
self.add_endpoint("/install_xtts", "install_xtts", self.install_xtts, methods=["GET"])
# ----
@ -393,16 +400,6 @@ try:
self.add_endpoint(
"/list_voices", "list_voices", self.list_voices, methods=["GET"]
)
self.add_endpoint(
"/set_voice", "set_voice", self.set_voice, methods=["POST"]
)
self.add_endpoint(
"/text2Audio", "text2Audio", self.text2Audio, methods=["POST"]
)
self.add_endpoint(
"/get_presets", "get_presets", self.get_presets, methods=["GET"]
@ -420,9 +417,6 @@ try:
"/execute_code", "execute_code", self.execute_code, methods=["POST"]
)
self.add_endpoint(
"/install_xtts", "install_xtts", self.install_xtts, methods=["GET"]
)
self.add_endpoint(
"/install_sd", "install_sd", self.install_sd, methods=["GET"]
)
@ -755,8 +749,6 @@ try:
return jsonify({"status":True})
def text2Audio(self):
# Get the JSON data from the POST request.
try:

View File

@ -28,6 +28,7 @@ from utilities.execution_engines.python_execution_engine import execute_python
from utilities.execution_engines.latex_execution_engine import execute_latex
from utilities.execution_engines.shell_execution_engine import execute_bash
# ----------------------- Defining router and main class ------------------------------
router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()

View File

@ -28,6 +28,7 @@ from utilities.execution_engines.python_execution_engine import execute_python
from utilities.execution_engines.latex_execution_engine import execute_latex
from utilities.execution_engines.shell_execution_engine import execute_bash
# ----------------------- Defining router and main class ------------------------------
router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()

View File

@ -37,6 +37,7 @@ class DeleteDiscussionParameters(BaseModel):
client_id: str
id: int
# ----------------------- Defining router and main class ------------------------------
router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()

View File

@ -19,6 +19,7 @@ from api.db import DiscussionsDB
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
# ----------------------- Defining router and main class ------------------------------
router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()

View File

@ -23,6 +23,8 @@ from fastapi import FastAPI, UploadFile, File
import shutil
class PersonalPathParameters(BaseModel):
path:str
# ----------------------- Defining router and main class ------------------------------
router = APIRouter()
lollmsElfServer = LOLLMSWebUI.get_instance()

View File

@ -18,9 +18,10 @@ from pathlib import Path
from typing import List
import sys
# ----------------------- Defining router and main class ------------------------------
# Create an instance of the LoLLMSWebUI class
lollmsElfServer = LOLLMSWebUI.get_instance()
router = APIRouter()
@router.get("/get_lollms_webui_version")

View File

@ -13,21 +13,30 @@ from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception
from lollms.utilities import detect_antiprompt, remove_text_from_string, trace_exception, find_first_available_file_index, add_period
from pathlib import Path
from ascii_colors import ASCIIColors
import os
import platform
from utilities.execution_engines.python_execution_engine import execute_python
from utilities.execution_engines.latex_execution_engine import execute_latex
from utilities.execution_engines.shell_execution_engine import execute_bash
# ----------------------- Defining router and main class ------------------------------
router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
@router.post("/execute_code")
async def execute_code(request: Request):
# ----------------------- voice ------------------------------
@router.get("/set_voice")
def list_voices():
ASCIIColors.yellow("Listing voices")
voices=["main_voice"]
voices_dir:Path=lollmsElfServer.lollms_paths.custom_voices_path
voices += [v.stem for v in voices_dir.iterdir() if v.suffix==".wav"]
return {"voices":voices}
@router.post("/set_voice")
async def set_voice(request: Request):
"""
Executes Python code and returns the output.
@ -37,32 +46,19 @@ async def execute_code(request: Request):
try:
data = (await request.json())
code = data["code"]
discussion_id = data.get("discussion_id","unknown_discussion")
message_id = data.get("message_id","unknown_message")
language = data.get("language","python")
ASCIIColors.info("Executing python code:")
ASCIIColors.yellow(code)
if language=="python":
return execute_python(code, discussion_id, message_id)
elif language=="latex":
return execute_latex(code, discussion_id, message_id)
elif language in ["bash","shell","cmd","powershell"]:
return execute_bash(code, discussion_id, message_id)
return {"output": "Unsupported language", "execution_time": 0}
lollmsElfServer.config.current_voice=data["voice"]
if lollmsElfServer.config.auto_save:
lollmsElfServer.config.save_config()
return {"status":True}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
@router.post("/open_code_folder")
async def open_code_folder(request: Request):
@router.post("/text2Audio")
async def text2Audio(request: Request):
"""
Opens code folder.
Executes Python code and returns the output.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
@ -70,134 +66,39 @@ async def open_code_folder(request: Request):
try:
data = (await request.json())
discussion_id = data.get("discussion_id","unknown_discussion")
# Get the JSON data from the POST request.
try:
from lollms.audio_gen_modules.lollms_xtts import LollmsXTTS
if lollmsElfServer.tts is None:
lollmsElfServer.tts = LollmsXTTS(lollmsElfServer, voice_samples_path=Path(__file__).parent/"voices")
except:
return {"url": None}
data = request.get_json()
voice=data.get("voice",lollmsElfServer.config.current_voice)
index = find_first_available_file_index(lollmsElfServer.tts.output_folder, "voice_sample_",".wav")
output_fn=data.get("fn",f"voice_sample_{index}.wav")
if voice is None:
voice = "main_voice"
lollmsElfServer.info("Starting to build voice")
try:
from lollms.audio_gen_modules.lollms_xtts import LollmsXTTS
if lollmsElfServer.tts is None:
lollmsElfServer.tts = LollmsXTTS(lollmsElfServer, voice_samples_path=Path(__file__).parent/"voices")
language = lollmsElfServer.config.current_language# convert_language_name()
if voice!="main_voice":
voices_folder = lollmsElfServer.lollms_paths.custom_voices_path
else:
voices_folder = Path(__file__).parent/"voices"
lollmsElfServer.tts.set_speaker_folder(voices_folder)
url = f"audio/{output_fn}"
preprocessed_text= add_period(data['text'])
ASCIIColors.info("Opening folder:")
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
if platform.system() == 'Windows':
os.startfile(str(root_folder))
elif platform.system() == 'Linux':
os.system('xdg-open ' + str(root_folder))
elif platform.system() == 'Darwin':
os.system('open ' + str(root_folder))
return {"output": "OK", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
@router.post("/open_code_folder_in_vs_code")
async def open_code_folder_in_vs_code(request: Request):
"""
Opens code folder.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
code = data["code"]
discussion_id = data.get("discussion_id","unknown_discussion")
message_id = data.get("message_id","unknown_message")
language = data.get("language","python")
ASCIIColors.info("Opening folder:")
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
tmp_file = root_folder/f"ai_code_{message_id}.py"
with open(tmp_file,"w") as f:
f.write(code)
os.system('code ' + str(root_folder))
return {"output": "OK", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
@router.post("/open_file")
async def open_file(request: Request):
"""
Opens code in vs code.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
path = data.get('path')
os.system("start "+path)
return {"output": "OK", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
@router.post("/open_code_in_vs_code")
async def open_code_in_vs_code(request: Request):
"""
Opens code in vs code.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
discussion_id = data.get("discussion_id","unknown_discussion")
message_id = data.get("message_id","")
code = data["code"]
discussion_id = data.get("discussion_id","unknown_discussion")
message_id = data.get("message_id","unknown_message")
language = data.get("language","python")
ASCIIColors.info("Opening folder:")
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"/f"{message_id}.py"
root_folder.mkdir(parents=True,exist_ok=True)
tmp_file = root_folder/f"ai_code_{message_id}.py"
with open(tmp_file,"w") as f:
f.write(code)
os.system('code ' + str(root_folder))
return {"output": "OK", "execution_time": 0}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)
return {"status":False,"error":str(ex)}
@router.post("/open_code_folder")
async def open_code_folder(request: Request):
"""
Opens code folder.
:param request: The HTTP request object.
:return: A JSON response with the status of the operation.
"""
try:
data = (await request.json())
discussion_id = data.get("discussion_id","unknown_discussion")
ASCIIColors.info("Opening folder:")
# Create a temporary file.
root_folder = lollmsElfServer.lollms_paths.personal_outputs_path/"discussions"/f"d_{discussion_id}"
root_folder.mkdir(parents=True,exist_ok=True)
if platform.system() == 'Windows':
os.startfile(str(root_folder))
elif platform.system() == 'Linux':
os.system('xdg-open ' + str(root_folder))
elif platform.system() == 'Darwin':
os.system('open ' + str(root_folder))
return {"output": "OK", "execution_time": 0}
lollmsElfServer.tts.tts_to_file(preprocessed_text, f"{voice}.wav", f"{output_fn}", language=language)
lollmsElfServer.info("Voice file ready")
return {"url": url}
except:
return {"url": None}
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error(ex)

View File

@ -18,13 +18,15 @@ from ascii_colors import ASCIIColors
from lollms.personality import MSG_TYPE, AIPersonality
from lollms.types import MSG_TYPE, SENDER_TYPES
from lollms.utilities import load_config, trace_exception, gc
from lollms.utilities import find_first_available_file_index, convert_language_name
from lollms.utilities import find_first_available_file_index, convert_language_name, PackageManager, run_async
from lollms_webui import LOLLMSWebUI
from pathlib import Path
from typing import List
from functools import partial
import socketio
import threading
import os
import time
from api.db import Discussion
from datetime import datetime
@ -60,4 +62,69 @@ def add_events(sio:socketio):
if not lollmsElfServer.model:
lollmsElfServer.error("No model selected. Please make sure you select a model before starting generation", client_id=client_id)
return
lollmsElfServer.new_message(client_id, lollmsElfServer.personality.name, "[edit this to put your ai answer start]", open=True)
lollmsElfServer.new_message(client_id, lollmsElfServer.personality.name, "[edit this to put your ai answer start]", open=True)
@sio.on('add_webpage')
def add_webpage(sid, data):
ASCIIColors.yellow("Scaping web page")
url = data['url']
index = find_first_available_file_index(lollmsElfServer.lollms_paths.personal_uploads_path,"web_",".txt")
file_path=lollmsElfServer.lollms_paths.personal_uploads_path/f"web_{index}.txt"
lollmsElfServer.scrape_and_save(url=url, file_path=file_path)
try:
if not lollmsElfServer.personality.processor is None:
lollmsElfServer.personality.processor.add_file(file_path, partial(lollmsElfServer.process_chunk, client_id = sid))
# File saved successfully
run_async(partial(sio.emit,'web_page_added', {'status':True,}))
else:
lollmsElfServer.personality.add_file(file_path, partial(lollmsElfServer.process_chunk, client_id = sid))
# File saved successfully
run_async(partial(sio.emit,'web_page_added', {'status':True}))
except Exception as e:
# Error occurred while saving the file
run_async(partial(sio.emit,'web_page_added', {'status':False}))
@sio.on('take_picture')
def take_picture(sid):
try:
lollmsElfServer.info("Loading camera")
if not PackageManager.check_package_installed("cv2"):
PackageManager.install_package("opencv-python")
import cv2
cap = cv2.VideoCapture(0)
n = time.time()
lollmsElfServer.info("Stand by for taking a shot in 2s")
while(time.time()-n<2):
_, frame = cap.read()
_, frame = cap.read()
cap.release()
lollmsElfServer.info("Shot taken")
cam_shot_path = lollmsElfServer.lollms_paths.personal_uploads_path/"camera_shots"
cam_shot_path.mkdir(parents=True, exist_ok=True)
filename = find_first_available_file_index(cam_shot_path, "cam_shot_", extension=".png")
save_path = cam_shot_path/f"cam_shot_{filename}.png" # Specify the desired folder path
try:
cv2.imwrite(str(save_path), frame)
if not lollmsElfServer.personality.processor is None:
lollmsElfServer.info("Sending file to scripted persona")
lollmsElfServer.personality.processor.add_file(save_path, partial(lollmsElfServer.process_chunk, client_id = sid))
# File saved successfully
run_async(partial(sio.emit,'picture_taken', {'status':True, 'progress': 100}))
lollmsElfServer.info("File sent to scripted persona")
else:
lollmsElfServer.info("Sending file to persona")
lollmsElfServer.personality.add_file(save_path, partial(lollmsElfServer.process_chunk, client_id = sid))
# File saved successfully
run_async(partial(sio.emit,'picture_taken', {'status':True, 'progress': 100}))
lollmsElfServer.info("File sent to persona")
except Exception as e:
trace_exception(e)
# Error occurred while saving the file
run_async(partial(sio.emit,'picture_taken', {'status':False, 'error': str(e)}))
except Exception as ex:
trace_exception(ex)
lollmsElfServer.error("Couldn't use the webcam")

View File

@ -0,0 +1,59 @@
"""
project: lollms
file: lollms_discussion_events.py
author: ParisNeo
description:
This module contains a set of Socketio routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
application. These routes are specific to discussion operation
"""
from fastapi import APIRouter, Request
from fastapi import HTTPException
from pydantic import BaseModel
import pkg_resources
from lollms.server.elf_server import LOLLMSElfServer
from fastapi.responses import FileResponse
from lollms.binding import BindingBuilder, InstallOption
from ascii_colors import ASCIIColors
from lollms.personality import MSG_TYPE, AIPersonality
from lollms.types import MSG_TYPE, SENDER_TYPES
from lollms.utilities import load_config, trace_exception, gc
from lollms.utilities import find_first_available_file_index, convert_language_name, PackageManager, run_async
from lollms_webui import LOLLMSWebUI
from pathlib import Path
from typing import List
from functools import partial
import socketio
import threading
import os
import time
from api.db import Discussion
from datetime import datetime
router = APIRouter()
lollmsElfServer:LOLLMSWebUI = LOLLMSWebUI.get_instance()
# ----------------------------------- events -----------------------------------------
def add_events(sio:socketio):
@sio.on('start_webcam_video_stream')
def start_webcam_video_stream(sid):
lollmsElfServer.info("Starting video capture")
lollmsElfServer.webcam.start_capture()
@sio.on('stop_webcam_video_stream')
def stop_webcam_video_stream(sid):
lollmsElfServer.info("Stopping video capture")
lollmsElfServer.webcam.stop_capture()
@sio.on('start_audio_stream')
def start_audio_stream(sid):
lollmsElfServer.info("Starting audio capture")
lollmsElfServer.audio_cap.start_recording()
@sio.on('stop_audio_stream')
def stop_audio_stream(sid):
lollmsElfServer.info("Stopping audio capture")
lollmsElfServer.audio_cap.stop_recording()

@ -1 +1 @@
Subproject commit 918fcf7c00d5f1240cb511d39771d590221531c5
Subproject commit 6708243a62713be44a60c17851037ff3a454c9b9

View File

@ -81,9 +81,11 @@ if __name__ == "__main__":
from lollms.server.events.lollms_generation_events import add_events as lollms_generation_events_add
from lollms.server.events.lollms_personality_events import add_events as lollms_personality_events_add
from lollms.server.events.lollms_files_events import add_events as lollms_files_events_add
from events.lollms_generation_events import add_events as lollms_webui_generation_events_add
from events.lollms_discussion_events import add_events as lollms_webui_discussion_events_add
from events.lollms_chatbox_events import add_events as lollms_chatbox_events_add
from events.lollms_interactive_events import add_events as lollms_interactive_events_add
@ -106,14 +108,19 @@ if __name__ == "__main__":
app.include_router(chat_bar_router)
app.include_router(lollms_configuration_infos_router)
lollms_generation_events_add(sio)
lollms_personality_events_add(sio)
lollms_files_events_add(sio)
lollms_webui_generation_events_add(sio)
lollms_webui_discussion_events_add(sio)
lollms_chatbox_events_add(sio)
lollms_interactive_events_add(sio)
app.mount("/extensions", StaticFiles(directory=Path(__file__).parent/"web"/"dist", html=True), name="extensions")

@ -1 +1 @@
Subproject commit 0f32c4da1b6c7dbdbed1f50bd8a09ad4b18adc1a
Subproject commit e46c0ea5187ae7bc0a79a16c542db0215b84d7b3

@ -1 +1 @@
Subproject commit 08c176fc621478790a103a37cab11f4082a2fb69
Subproject commit cd7b93f7f0bb1a17928378ff7381729a846de371