fixed code

This commit is contained in:
Saifeddine ALOUI 2024-01-07 15:27:14 +01:00
parent 376f056ae8
commit 06514cbfaf
15 changed files with 207 additions and 114 deletions

60
app.py
View File

@ -281,6 +281,7 @@ try:
self.add_endpoint("/list_models", "list_models", self.list_models, methods=["GET"])
self.add_endpoint("/get_active_model", "get_active_model", self.get_active_model, methods=["GET"])
self.add_endpoint("/add_reference_to_local_model", "add_reference_to_local_model", self.add_reference_to_local_model, methods=["POST"])
self.add_endpoint("/get_model_status", "get_model_status", self.get_model_status, methods=["GET"])
self.add_endpoint("/post_to_personality", "post_to_personality", self.post_to_personality, methods=["POST"])
@ -299,6 +300,7 @@ try:
self.add_endpoint("/get_personality_settings", "get_personality_settings", self.get_personality_settings, methods=["POST"])
self.add_endpoint("/get_active_personality_settings", "get_active_personality_settings", self.get_active_personality_settings, methods=["GET"])
self.add_endpoint("/set_active_personality_settings", "set_active_personality_settings", self.set_active_personality_settings, methods=["POST"])
self.add_endpoint("/get_current_personality_path_infos", "get_current_personality_path_infos", self.get_current_personality_path_infos, methods=["GET"])
self.add_endpoint("/list_databases", "list_databases", self.list_databases, methods=["GET"])
@ -334,6 +336,13 @@ try:
self.add_endpoint("/list_discussions", "list_discussions", self.list_discussions, methods=["GET"])
self.add_endpoint("/get_generation_status", "get_generation_status", self.get_generation_status, methods=["GET"])
self.add_endpoint("/", "", self.index, methods=["GET"])
self.add_endpoint("/settings/", "", self.index, methods=["GET"])
self.add_endpoint("/playground/", "", self.index, methods=["GET"])
# ----
@ -351,9 +360,6 @@ try:
self.add_endpoint("/", "", self.index, methods=["GET"])
self.add_endpoint("/settings/", "", self.index, methods=["GET"])
self.add_endpoint("/playground/", "", self.index, methods=["GET"])
@ -369,7 +375,6 @@ try:
self.add_endpoint("/get_server_address", "get_server_address", self.get_server_address, methods=["GET"])
self.add_endpoint("/get_model_status", "get_model_status", self.get_model_status, methods=["GET"])
self.add_endpoint(
"/delete_discussion",
@ -378,27 +383,14 @@ try:
methods=["POST"],
)
self.add_endpoint(
"/edit_message", "edit_message", self.edit_message, methods=["GET"]
)
self.add_endpoint(
"/message_rank_up", "message_rank_up", self.message_rank_up, methods=["GET"]
)
self.add_endpoint(
"/message_rank_down", "message_rank_down", self.message_rank_down, methods=["GET"]
)
self.add_endpoint(
"/delete_message", "delete_message", self.delete_message, methods=["GET"]
)
self.add_endpoint("/edit_message", "edit_message", self.edit_message, methods=["GET"])
self.add_endpoint("/message_rank_up", "message_rank_up", self.message_rank_up, methods=["GET"])
self.add_endpoint("/message_rank_down", "message_rank_down", self.message_rank_down, methods=["GET"])
self.add_endpoint("/delete_message", "delete_message", self.delete_message, methods=["GET"])
self.add_endpoint(
"/get_config", "get_config", self.get_config, methods=["GET"]
)
self.add_endpoint("/get_config", "get_config", self.get_config, methods=["GET"])
self.add_endpoint(
"/get_current_personality_path_infos", "get_current_personality_path_infos", self.get_current_personality_path_infos, methods=["GET"]
)
self.add_endpoint(
"/get_available_models", "get_available_models", self.get_available_models, methods=["GET"]
@ -428,9 +420,6 @@ try:
"/help", "help", self.help, methods=["GET"]
)
self.add_endpoint(
"/get_generation_status", "get_generation_status", self.get_generation_status, methods=["GET"]
)
self.add_endpoint(
"/update_setting", "update_setting", self.update_setting, methods=["POST"]
@ -539,7 +528,8 @@ try:
self.binding = None
self.model = None
for per in self.mounted_personalities:
per.model = None
if per is not None:
per.model = None
gc.collect()
self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.INSTALL_IF_NECESSARY, lollmsCom=self)
self.model = None
@ -1153,7 +1143,8 @@ try:
self.binding = None
self.model = None
for per in self.mounted_personalities:
per.model = None
if per is not None:
per.model = None
gc.collect()
self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, InstallOption.INSTALL_IF_NECESSARY, lollmsCom=self)
self.model = None
@ -1182,7 +1173,8 @@ try:
if self.model is not None:
ASCIIColors.yellow("New model OK")
for per in self.mounted_personalities:
per.model = self.model
if per is not None:
per.model = self.model
except Exception as ex:
trace_exception(ex)
self.InfoMessage(f"It looks like you we couldn't load the model.\nHere is the error message:\n{ex}")
@ -1748,7 +1740,8 @@ try:
self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, lollmsCom=self)
self.model = self.binding.build_model()
for per in self.mounted_personalities:
per.model = self.model
if per is not None:
per.model = self.model
return jsonify({"status": True})
except Exception as ex:
self.error(f"Couldn't build binding: [{ex}]")
@ -1777,7 +1770,8 @@ try:
self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, lollmsCom=self)
self.model = self.binding.build_model()
for per in self.mounted_personalities:
per.model = self.model
if per is not None:
per.model = self.model
return jsonify({"status": True})
except Exception as ex:
@ -1810,7 +1804,8 @@ try:
self.binding = BindingBuilder().build_binding(self.config, self.lollms_paths, lollmsCom=self)
self.model = self.binding.build_model()
for per in self.mounted_personalities:
per.model = self.model
if per is not None:
per.model = self.model
else:
self.config.binding_name = None
if self.config.auto_save:
@ -1903,7 +1898,8 @@ try:
self.binding = None
self.model = None
for per in self.mounted_personalities:
per.model = None
if per is not None:
per.model = None
gc.collect()
ASCIIColors.info(f"issuing command : python gptqlora.py --model_path {self.lollms_paths.personal_models_path/fn/data['model_name']}")
subprocess.run(["python", "gptqlora.py", "--model_path", self.lollms_paths.personal_models_path/fn/data["model_name"]],cwd=self.lollms_paths.gptqlora_path)

View File

@ -1,3 +1,12 @@
"""
project: lollms_webui
file: lollms_discussion.py
author: ParisNeo
description:
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
application. These routes allow users to manipulate the discussion elements.
"""
from fastapi import APIRouter
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel

View File

@ -0,0 +1,25 @@
"""
project: lollms_message
file: lollms_discussion.py
author: ParisNeo
description:
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
application. These routes allow users to manipulate the message elements.
"""
from fastapi import APIRouter
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.utilities import detect_antiprompt, remove_text_from_string
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
router = APIRouter()
lollmsElfServer = LOLLMSWebUI.get_instance()

48
endpoints/lollms_user.py Normal file
View File

@ -0,0 +1,48 @@
"""
project: lollms_user
file: lollms_user.py
author: ParisNeo
description:
This module contains a set of FastAPI routes that provide information about the Lord of Large Language and Multimodal Systems (LoLLMs) Web UI
application. These routes allow users to manipulate user information.
"""
from fastapi import APIRouter
from lollms_webui import LOLLMSWebUI
from pydantic import BaseModel
from starlette.responses import StreamingResponse
from lollms.types import MSG_TYPE
from lollms.main_config import BaseConfig
from lollms.utilities import detect_antiprompt, remove_text_from_string
from ascii_colors import ASCIIColors
from api.db import DiscussionsDB
from pathlib import Path
from safe_store.text_vectorizer import TextVectorizer, VectorizationMethod, VisualizationMethod
import tqdm
class PersonalPathParameters(BaseModel):
path:str
router = APIRouter()
lollmsElfServer = LOLLMSWebUI.get_instance()
@router.get("/switch_personal_path")
def switch_personal_path(data:PersonalPathParameters):
path = data.path
global_paths_cfg = Path("./global_paths_cfg.yaml")
if global_paths_cfg.exists():
try:
cfg = BaseConfig()
cfg.load_config(global_paths_cfg)
cfg.lollms_personal_path = path
cfg.save_config(global_paths_cfg)
return {"status": True}
except Exception as ex:
print(ex)
return {"status": False, 'error':f"Couldn't switch path: {ex}"}
@router.post("/upload_avatar")
def upload_avatar(data):
file = data.files['avatar']
file.save(lollmsElfServer.lollms_paths.personal_user_infos_path/file.filename)
return {"status": True,"fileName":file.filename}

View File

@ -33,7 +33,7 @@ lollmsElfServer = LOLLMSWebUI.get_instance()
# ----------------------------------- events -----------------------------------------
def add_events(sio:socketio):
@sio.on('generate_msg')
def handle_generate_msg(sid, environ, data):
def handle_generate_msg(sid, data):
client_id = sid
lollmsElfServer.cancel_gen = False
lollmsElfServer.connections[client_id]["generated_text"]=""
@ -79,7 +79,7 @@ def add_events(sio:socketio):
lollmsElfServer.error("I am busy. Come back later.", client_id=client_id)
@sio.on('generate_msg_from')
def handle_generate_msg_from(sid, environ, data):
def handle_generate_msg_from(sid, data):
client_id = sid
lollmsElfServer.cancel_gen = False
lollmsElfServer.connections[client_id]["continuing"]=False
@ -101,7 +101,7 @@ def add_events(sio:socketio):
lollmsElfServer.connections[client_id]['generation_thread'].start()
@sio.on('continue_generate_msg_from')
def handle_continue_generate_msg_from(sid, environ, data):
def handle_continue_generate_msg_from(sid, data):
client_id = sid
lollmsElfServer.cancel_gen = False
lollmsElfServer.connections[client_id]["continuing"]=True

@ -1 +1 @@
Subproject commit adf1e23559cbea865972fd80ed2304eb7ec23901
Subproject commit 4ef9f5d72c3b9a8af1ec2caea52e203191feb9c4

View File

@ -20,8 +20,9 @@ from lollms.paths import LollmsPaths
from lollms.helpers import ASCIIColors, trace_exception
from lollms.com import NotificationType, NotificationDisplayType, LoLLMsCom
from lollms.app import LollmsApplication
from lollms.utilities import File64BitsManager, PromptReshaper, PackageManager, find_first_available_file_index, run_async
from lollms.utilities import File64BitsManager, PromptReshaper, PackageManager, find_first_available_file_index
import git
import asyncio
try:
from lollms.media import WebcamImageSender, AudioRecorder
@ -943,7 +944,7 @@ class LOLLMSWebUI(LOLLMSElfServer):
display_type:NotificationDisplayType=NotificationDisplayType.TOAST,
verbose=True
):
run_async(
asyncio.run(
self.socketio.emit('notification', {
'content': content,# self.connections[client_id]["generated_text"],
'notification_type': notification_type.value,
@ -991,28 +992,29 @@ class LOLLMSWebUI(LOLLMSElfServer):
model = self.config["model_name"],
personality = self.config["personalities"][self.config["active_personality_id"]],
) # first the content is empty, but we'll fill it at the end
asyncio.run(
self.socketio.emit('new_message',
{
"sender": sender,
"message_type": message_type.value,
"sender_type": SENDER_TYPES.SENDER_TYPES_AI.value,
"content": content,
"parameters": parameters,
"metadata": metadata,
"ui": ui,
"id": msg.id,
"parent_message_id": msg.parent_message_id,
self.socketio.emit('new_message',
{
"sender": sender,
"message_type": message_type.value,
"sender_type": SENDER_TYPES.SENDER_TYPES_AI.value,
"content": content,
"parameters": parameters,
"metadata": metadata,
"ui": ui,
"id": msg.id,
"parent_message_id": msg.parent_message_id,
'binding': self.config["binding_name"],
'model' : self.config["model_name"],
'personality': self.config["personalities"][self.config["active_personality_id"]],
'binding': self.config["binding_name"],
'model' : self.config["model_name"],
'personality': self.config["personalities"][self.config["active_personality_id"]],
'created_at': self.connections[client_id]["current_discussion"].current_message.created_at,
'finished_generating_at': self.connections[client_id]["current_discussion"].current_message.finished_generating_at,
'created_at': self.connections[client_id]["current_discussion"].current_message.created_at,
'finished_generating_at': self.connections[client_id]["current_discussion"].current_message.finished_generating_at,
'open': open
}, to=client_id
'open': open
}, to=client_id
)
)
def update_message(self, client_id, chunk,
@ -1024,33 +1026,35 @@ class LOLLMSWebUI(LOLLMSElfServer):
self.connections[client_id]["current_discussion"].current_message.finished_generating_at=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
mtdt = json.dumps(metadata, indent=4) if metadata is not None and type(metadata)== list else metadata
if self.nb_received_tokens==1:
asyncio.run(
self.socketio.emit('update_message', {
"sender": self.personality.name,
'id':self.connections[client_id]["current_discussion"].current_message.id,
'content': "✍ warming up ...",# self.connections[client_id]["generated_text"],
'ui': ui,
'discussion_id':self.connections[client_id]["current_discussion"].discussion_id,
'message_type': MSG_TYPE.MSG_TYPE_STEP_END.value,
'finished_generating_at': self.connections[client_id]["current_discussion"].current_message.finished_generating_at,
'parameters':parameters,
'metadata':metadata
}, to=client_id
)
)
asyncio.run(
self.socketio.emit('update_message', {
"sender": self.personality.name,
'id':self.connections[client_id]["current_discussion"].current_message.id,
'content': "✍ warming up ...",# self.connections[client_id]["generated_text"],
'content': chunk,# self.connections[client_id]["generated_text"],
'ui': ui,
'discussion_id':self.connections[client_id]["current_discussion"].discussion_id,
'message_type': MSG_TYPE.MSG_TYPE_STEP_END.value,
'message_type': msg_type.value if msg_type is not None else MSG_TYPE.MSG_TYPE_CHUNK.value if self.nb_received_tokens>1 else MSG_TYPE.MSG_TYPE_FULL.value,
'finished_generating_at': self.connections[client_id]["current_discussion"].current_message.finished_generating_at,
'parameters':parameters,
'metadata':metadata
}, to=client_id
)
self.socketio.emit('update_message', {
"sender": self.personality.name,
'id':self.connections[client_id]["current_discussion"].current_message.id,
'content': chunk,# self.connections[client_id]["generated_text"],
'ui': ui,
'discussion_id':self.connections[client_id]["current_discussion"].discussion_id,
'message_type': msg_type.value if msg_type is not None else MSG_TYPE.MSG_TYPE_CHUNK.value if self.nb_received_tokens>1 else MSG_TYPE.MSG_TYPE_FULL.value,
'finished_generating_at': self.connections[client_id]["current_discussion"].current_message.finished_generating_at,
'parameters':parameters,
'metadata':metadata
}, to=client_id
)
self.socketio.sleep(0.01)
)
if msg_type != MSG_TYPE.MSG_TYPE_INFO:
self.connections[client_id]["current_discussion"].update_message(self.connections[client_id]["generated_text"], new_metadata=mtdt, new_ui=ui)
@ -1063,20 +1067,23 @@ class LOLLMSWebUI(LOLLMSElfServer):
self.connections[client_id]["generated_text"]=self.connections[client_id]["generated_text"].split("!@>")[0]
# Send final message
self.connections[client_id]["current_discussion"].current_message.finished_generating_at=datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.socketio.emit('close_message', {
"sender": self.personality.name,
"id": self.connections[client_id]["current_discussion"].current_message.id,
"content":self.connections[client_id]["generated_text"],
asyncio.run(
self.socketio.emit('close_message', {
"sender": self.personality.name,
"id": self.connections[client_id]["current_discussion"].current_message.id,
"content":self.connections[client_id]["generated_text"],
'binding': self.config["binding_name"],
'model' : self.config["model_name"],
'personality':self.config["personalities"][self.config["active_personality_id"]],
'binding': self.config["binding_name"],
'model' : self.config["model_name"],
'personality':self.config["personalities"][self.config["active_personality_id"]],
'created_at': self.connections[client_id]["current_discussion"].current_message.created_at,
'finished_generating_at': self.connections[client_id]["current_discussion"].current_message.finished_generating_at,
'created_at': self.connections[client_id]["current_discussion"].current_message.created_at,
'finished_generating_at': self.connections[client_id]["current_discussion"].current_message.finished_generating_at,
}, to=client_id
)
)
}, to=client_id
)
def process_chunk(
self,
chunk:str,
@ -1118,17 +1125,19 @@ class LOLLMSWebUI(LOLLMSElfServer):
if message_type == MSG_TYPE.MSG_TYPE_NEW_MESSAGE:
self.nb_received_tokens = 0
self.start_time = datetime.now()
self.new_message(
client_id,
self.personality.name if personality is None else personality.name,
chunk if parameters["type"]!=MSG_TYPE.MSG_TYPE_UI.value else '',
metadata = [{
"title":chunk,
"content":parameters["metadata"]
}
] if parameters["type"]==MSG_TYPE.MSG_TYPE_JSON_INFOS.value else None,
ui= chunk if parameters["type"]==MSG_TYPE.MSG_TYPE_UI.value else None,
message_type= MSG_TYPE(parameters["type"]))
asyncio.run(
self.new_message(
client_id,
self.personality.name if personality is None else personality.name,
chunk if parameters["type"]!=MSG_TYPE.MSG_TYPE_UI.value else '',
metadata = [{
"title":chunk,
"content":parameters["metadata"]
}
] if parameters["type"]==MSG_TYPE.MSG_TYPE_JSON_INFOS.value else None,
ui= chunk if parameters["type"]==MSG_TYPE.MSG_TYPE_UI.value else None,
message_type= MSG_TYPE(parameters["type"]))
)
elif message_type == MSG_TYPE.MSG_TYPE_FINISHED_MESSAGE:
self.close_message(client_id)
@ -1395,12 +1404,13 @@ class LOLLMSWebUI(LOLLMSElfServer):
if ttl is None or ttl=="" or ttl=="untitled":
title = self.make_discussion_title(d, client_id=client_id)
d.rename(title)
self.socketio.emit('disucssion_renamed',{
'status': True,
'discussion_id':d.discussion_id,
'title':title
}, to=client_id)
asyncio.run(
self.socketio.emit('disucssion_renamed',{
'status': True,
'discussion_id':d.discussion_id,
'title':title
}, to=client_id)
)
self.busy=False
else:

View File

@ -68,8 +68,10 @@ if __name__ == "__main__":
from lollms.server.endpoints.lollms_extensions_infos import router as lollms_extensions_infos_router
from lollms.server.endpoints.lollms_generator import router as lollms_generator_router
from lollms.server.endpoints.lollms_configuration_infos import router as lollms_configuration_infos_router
from endpoints.lollms_discussion import router as lollms_discussion_router
from endpoints.lollms_webui_infos import router as lollms_webui_infos_router
from endpoints.lollms_discussion import router as lollms_discussion_router
from endpoints.lollms_message import router as lollms_message_router
from endpoints.lollms_user import router as lollms_user_router
@ -89,6 +91,9 @@ if __name__ == "__main__":
app.include_router(lollms_webui_infos_router)
app.include_router(lollms_generator_router)
app.include_router(lollms_discussion_router)
app.include_router(lollms_message_router)
app.include_router(lollms_user_router)
app.include_router(lollms_configuration_infos_router)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

4
web/dist/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LoLLMS WebUI - Welcome</title>
<script type="module" crossorigin src="/assets/index-c332aeeb.js"></script>
<link rel="stylesheet" href="/assets/index-a5ef9055.css">
<script type="module" crossorigin src="/assets/index-46f88ef1.js"></script>
<link rel="stylesheet" href="/assets/index-27a6ba78.css">
</head>
<body>
<div id="app"></div>

View File

@ -3681,9 +3681,9 @@ export default {
try {
const obj = {
language: pers.language,
category: pers.category,
folder: pers.folder,
language: pers.language?pers.language:"",
category: pers.category?pers.category:"",
folder: pers.folder?pers.folder:"",
}
const res = await axios.post('/mount_personality', obj);

@ -1 +1 @@
Subproject commit 882003f258657cc2f3f4492e3e4d62d5f0c53fd8
Subproject commit 4dabcb35d6eecd7935478b587d2e8308241d8fd0

@ -1 +1 @@
Subproject commit 203d9f7262d8b076b59b8c3131d8403688d09495
Subproject commit 0f32c4da1b6c7dbdbed1f50bd8a09ad4b18adc1a

@ -1 +1 @@
Subproject commit 13392fa155326de95e42f2106c4b371f81057d1d
Subproject commit 12006786e070ddc17822db3be2217afd9441ff88